diff --git a/.clang-format b/.clang-format index cebd24feb..1e5444d6f 100644 --- a/.clang-format +++ b/.clang-format @@ -4,6 +4,7 @@ Language: Cpp AccessModifierOffset: -1 AlignAfterOpenBracket: Align AlignConsecutiveMacros: None +#InsertNewlineAtEOF: true # Requires clang-format-16 support AlignConsecutiveAssignments: None AlignConsecutiveBitFields: None AlignConsecutiveDeclarations: None diff --git a/.github/workflows/pikiwidb.yml b/.github/workflows/pikiwidb.yml index 2fe5e0e6a..4f37da405 100644 --- a/.github/workflows/pikiwidb.yml +++ b/.github/workflows/pikiwidb.yml @@ -28,6 +28,8 @@ jobs: - name: Build run: | + brew install autoconf + brew install go sh build.sh - name: GTest diff --git a/.gitignore b/.gitignore index 4d0438520..316924f0f 100644 --- a/.gitignore +++ b/.gitignore @@ -80,3 +80,6 @@ compile_commands.json # build support build_support/__pycache__ build_support/clang_format_exclusions.txt + +# pkg +pkg \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 511e0b4d6..02e794285 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -19,11 +19,21 @@ IF (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") ENDIF () ELSEIF (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") # using GCC - IF (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "10.0") - MESSAGE(FATAL_ERROR "GCC G++ version must be greater than 10.0") + IF (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "11.0") + MESSAGE(FATAL_ERROR "GCC G++ version must be greater than 11.0") ENDIF () ENDIF () +# get current date and time +EXECUTE_PROCESS(COMMAND date "+%Y-%m-%d_%H:%M:%S" OUTPUT_VARIABLE BUILD_TIMESTAMP OUTPUT_STRIP_TRAILING_WHITESPACE) +ADD_DEFINITIONS(-DKPIKIWIDB_BUILD_DATE="${BUILD_TIMESTAMP}") + +message(STATUS "Build timestamp: ${BUILD_TIMESTAMP}") + +# get git commit id +EXECUTE_PROCESS(COMMAND git rev-parse HEAD OUTPUT_VARIABLE GIT_COMMIT_ID OUTPUT_STRIP_TRAILING_WHITESPACE) +ADD_DEFINITIONS(-DKPIKIWIDB_GIT_COMMIT_ID="${GIT_COMMIT_ID}") +MESSAGE(STATUS "Git commit id: ${GIT_COMMIT_ID}") ############# You should enable sanitizer if you are developing pika ############# # Uncomment the following two lines to enable AddressSanitizer to detect memory leaks and other memory-related bugs. @@ -119,12 +129,14 @@ ADD_SUBDIRECTORY(src) ############################################################################# ### custom target ############################################################################# -SET(CMAKE_EXPORT_COMPILE_COMMANDS ON) +# Add files which should be ignored while formatting +LIST(APPEND CLANG_FORMAT_IGNORE_FILES "${PROJECT_SOURCE_DIR}/src/storage/src/storage_murmur3.h") +FILE(WRITE ${BUILD_SUPPORT_DIR}/clang_format_exclusions.txt "") +FOREACH(IGNORE_FILE ${CLANG_FORMAT_IGNORE_FILES}) + FILE(APPEND ${BUILD_SUPPORT_DIR}/clang_format_exclusions.txt "${IGNORE_FILE}\n") +ENDFOREACH() STRING(CONCAT FORMAT_DIRS "${PROJECT_SOURCE_DIR}/src,") -FILE(WRITE - ${BUILD_SUPPORT_DIR}/clang_format_exclusions.txt - "${PROJECT_SOURCE_DIR}/src/redis_intset.c\n${PROJECT_SOURCE_DIR}/src/redis_zip_list.c") ADD_CUSTOM_TARGET(format COMMAND ${BUILD_SUPPORT_DIR}/run_clang_format.py ${CLANG_FORMAT_BIN} diff --git a/README.md b/README.md index 6bce1e508..69fdc0cce 100644 --- a/README.md +++ b/README.md @@ -2,12 +2,42 @@ ![](docs/images/pikiwidb-logo.png) [中文](README_CN.md) -A C++11 implementation of Redis Server, use RocksDB for persist storage.(not including cluster yet) +A C++20 implementation of Redis Server, use RocksDB for persist storage.(not including cluster yet) ## Requirements -* C++11 + +* C++20 * Linux or OS X +## compile + +**It is recommended to use the latest version of Ubuntu or Debian for Linux systems** + +Execute compilation + +If the machine's GCC version is less than 11, especially on CentOS6 or CentOS7, you need to upgrade the gcc version firstly. + +Execute the following commands on CentOS: + +```bash +sudo yum -y install centos-release-scl +sudo yum -y install devtoolset-11-gcc devtoolset-11-gcc-c++ +scl enable devtoolset-11 bash +``` + +Execute this command to start compiling Pikiwidb: + +```bash +./build.sh +``` + +Pikiwidb is compiled by default in release mode, which does not support debugging. If debugging is needed, compile in debug mode. + +```bash +./clean.sh +./build.sh --debug +``` + ## Support module for write your own extensions PikiwiDB supports module now, still in progress, much work to do. I added three commands(ldel, skeys, hgets) for demonstration. diff --git a/README_CN.md b/README_CN.md index 9f41a66a2..45f29f4f9 100644 --- a/README_CN.md +++ b/README_CN.md @@ -2,79 +2,122 @@ ![](docs/images/pikiwidb-logo.png) [Click me switch to English](README.en.md) -C++11实现的增强版Redis服务器,使用RocksDB作为持久化存储引擎。(集群支持尚正在计划中) +C++20 实现的增强版 Redis 服务器,使用 RocksDB 作为持久化存储引擎。(集群支持尚正在计划中) ## 环境需求 -* C++11、CMake + +* C++20、CMake * Linux 或 MAC OS -## 与Redis完全兼容 - 你可以用redis的各种工具来测试PikiwiDB,比如官方的redis-cli, redis-benchmark。 +## 编译 + +**建议使用最新版本的 Ubuntu 或 Debian Linux 系统** + +执行编译: + +如果机器的 GCC 版本低于 11,特别是在 CentOS 6.x 或 CentOS 7.x 上,你需要先升级 GCC 版本。 + +在 CentOS 上执行以下命令: + +```bash +sudo yum -y install centos-release-scl +sudo yum -y install devtoolset-11-gcc devtoolset-11-gcc-c++ +scl enable devtoolset-11 bash +``` + +执行以下命令开始编译 PikiwiDB: + +```bash +./build.sh +``` + +PikiwiDB 默认以 release 模式编译,不支持调试。如果需要调试,请以 debug 模式编译。 + +```bash +./clean.sh +./build.sh --debug +``` + +## 与 Redis 完全兼容 - PikiwiDB可以和redis之间进行复制,可以读取redis的rdb文件或aof文件。当然,PikiwiDB生成的aof或rdb文件也可以被redis读取。 +你可以用 Redis 的各种工具来测试 PikiwiDB,比如官方的 redis-cli, redis-benchmark。 - 你还可以用redis-sentinel来实现PikiwiDB的高可用! +PikiwiDB 可以和 Redis 之间进行复制,可以读取 Redis 的 rdb 文件或 aof 文件。当然,PikiwiDB 生成的 aof 或 rdb 文件也可以被 Redis 读取。 - 总之,PikiwiDB与Redis完全兼容。 +你还可以用 redis-sentinel 来实现 PikiwiDB 的高可用! + +总之,PikiwiDB 与 Redis 完全兼容。 ## 高性能 -- PikiwiDB性能大约比Redis3.2高出20%(使用redis-benchmark测试pipeline请求,比如设置-P=50或更高) -- PikiwiDB的高性能有一部分得益于独立的网络线程处理IO,因此和redis比占了便宜。但PikiwiDB逻辑仍然是单线程的。 -- 另一部分得益于C++ STL的高效率(CLANG的表现比GCC更好)。 -- 在测试前,你要确保std::list的size()是O(1)复杂度,这才遵循C++11的标准。否则list相关命令不可测。 -运行下面这个命令,试试和redis比一比~ +- PikiwiDB 性能大约比 Redis 3.2 高出 20% (使用 redis-benchmark 测试 pipeline 请求,比如设置 -P=50 或更高) +- PikiwiDB 的高性能有一部分得益于独立的网络线程处理 IO,因此和 redis 比占了便宜。但 PikiwiDB 逻辑仍然是单线程的。 +- 另一部分得益于 C++ STL 的高效率(CLANG 的表现比 GCC 更好)。 +- 在测试前,你要确保 std::list 的 size() 是 O(1) 复杂度,这才遵循 C++11 的标准。否则 list 相关命令不可测。 + +运行下面这个命令,试试和 redis 比一比~ ```bash ./redis-benchmark -q -n 1000000 -P 50 -c 50 ``` -## 编写扩展模块 - PikiwiDB支持动态库模块,可以在运行时添加新命令。 - 我添加了三个命令(ldel, skeys, hgets)作为演示。 - ## 支持冷数据淘汰 - 是的,在内存受限的情况下,你可以让PikiwiDB根据简单的LRU算法淘汰一些key以释放内存。 + +是的,在内存受限的情况下,你可以让 PikiwiDB 根据简单的 LRU 算法淘汰一些 key 以释放内存。 ## 主从复制,事务,RDB/AOF持久化,慢日志,发布订阅 - 这些特性PikiwiDB都有:-) + +这些特性 PikiwiDB 都有:-) ## 持久化:内存不再是上限 - RocksDB可以配置为PikiwiDB的持久化存储引擎,可以存储更多的数据。 +RocksDB 可以配置为 PikiwiDB 的持久化存储引擎,可以存储更多的数据。 ## 命令列表 -#### 展示PikiwiDB支持的所有命令 + +#### 展示 PikiwiDB 支持的所有命令 + - cmdlist #### key commands + - type exists del expire pexpire expireat pexpireat ttl pttl persist move keys randomkey rename renamenx scan sort #### server commands + - select dbsize bgsave save lastsave flushdb flushall client debug shutdown bgrewriteaof ping echo info monitor auth #### string commands + - set get getrange setrange getset append bitcount bitop getbit setbit incr incrby incrbyfloat decr decrby mget mset msetnx setnx setex psetex strlen #### list commands + - lpush rpush lpushx rpushx lpop rpop lindex llen lset ltrim lrange linsert lrem rpoplpush blpop brpop brpoplpush #### hash commands + - hget hmget hgetall hset hsetnx hmset hlen hexists hkeys hvals hdel hincrby hincrbyfloat hscan hstrlen #### set commands + - sadd scard srem sismember smembers sdiff sdiffstore sinter sinterstore sunion sunionstore smove spop srandmember sscan #### sorted set commands + - zadd zcard zrank zrevrank zrem zincrby zscore zrange zrevrange zrangebyscore zrevrangebyscore zremrangebyrank zremrangebyscore #### pubsub commands + - subscribe unsubscribe publish psubscribe punsubscribe pubsub #### multi commands + - watch unwatch multi exec discard #### replication commands + - sync slaveof + ## Contact Us diff --git a/build.sh b/build.sh index a7d6c28d9..0cf18b5b0 100755 --- a/build.sh +++ b/build.sh @@ -6,12 +6,6 @@ C_GREEN="\033[32m" C_END="\033[0m" -BUILD_TIME=$(git log -1 --format=%ai) -BUILD_TIME=${BUILD_TIME: 0: 10} - -COMMIT_ID=$(git rev-parse HEAD) -SHORT_COMMIT_ID=${COMMIT_ID: 0: 8} - BUILD_TYPE=release VERBOSE=0 CMAKE_FLAGS="" @@ -66,19 +60,11 @@ fi echo "cpu core ${CPU_CORE}" -if [ -z "$SHORT_COMMIT_ID" ]; then - echo "no git commit id" - SHORT_COMMIT_ID="pikiwidb" -fi - -echo "BUILD_TIME:" $BUILD_TIME -echo "COMMIT_ID:" $SHORT_COMMIT_ID - echo "BUILD_TYPE:" $BUILD_TYPE echo "CMAKE_FLAGS:" $CMAKE_FLAGS echo "MAKE_FLAGS:" $MAKE_FLAGS -cmake -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILD_TIME=$BUILD_TIME -DGIT_COMMIT_ID=$SHORT_COMMIT_ID ${CMAKE_FLAGS} -S . -B ${PREFIX} +cmake -DCMAKE_BUILD_TYPE=${BUILD_TYPE} ${CMAKE_FLAGS} -S . -B ${PREFIX} cmake --build ${PREFIX} -- ${MAKE_FLAGS} -j ${CPU_CORE} if [ $? -eq 0 ]; then diff --git a/cmake/boost.cmake b/cmake/boost.cmake deleted file mode 100644 index cf7f239e8..000000000 --- a/cmake/boost.cmake +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -include_guard() - -include(cmake/utils.cmake) - -FetchContent_DeclareGitHubWithMirror(pikiwidb-boost - pikiwidb/boost boost-1.83.0 - SHA256=A3B453E3D5FD39E6A4C733C31548512A1E74B7328D4C358FAC562930A0E6E5B4 -) - -FetchContent_MakeAvailableWithArgs(pikiwidb-boost) diff --git a/cmake/double-conversion.cmake b/cmake/double-conversion.cmake deleted file mode 100644 index 88b69768e..000000000 --- a/cmake/double-conversion.cmake +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -include_guard() - -include(cmake/utils.cmake) - -FetchContent_DeclareGitHubWithMirror(double-conversion - google/double-conversion v3.3.0 - SHA256=4080014235f90854ffade6d1c423940b314bbca273a338235f049da296e47183 -) -FetchContent_MakeAvailableWithArgs(double-conversion - BUILD_TESTING=OFF -) diff --git a/cmake/findTools.cmake b/cmake/findTools.cmake index 9b8bb651c..1373e978b 100644 --- a/cmake/findTools.cmake +++ b/cmake/findTools.cmake @@ -4,11 +4,8 @@ IF(${AUTOCONF} MATCHES AUTOCONF-NOTFOUND) MESSAGE(FATAL_ERROR "not find autoconf on localhost") ENDIF() -#set(CLANG_SEARCH_PATH "/usr/local/bin" "/usr/bin" "/usr/local/opt/llvm/bin" -# "/usr/local/opt/llvm@12/bin") FIND_PROGRAM(CLANG_FORMAT_BIN - NAMES clang-format - HINTS ${CLANG_SEARCH_PATH}) + NAMES clang-format) IF("${CLANG_FORMAT_BIN}" STREQUAL "CLANG_FORMAT_BIN-NOTFOUND") MESSAGE(WARNING "couldn't find clang-format.") ELSE() @@ -16,8 +13,7 @@ ELSE() ENDIF() FIND_PROGRAM(CLANG_TIDY_BIN - NAMES clang-tidy clang-tidy-12 - HINTS ${CLANG_SEARCH_PATH}) + NAMES clang-tidy clang-tidy-12 clang-tidy-14) IF("${CLANG_TIDY_BIN}" STREQUAL "CLANG_TIDY_BIN-NOTFOUND") MESSAGE(WARNING "couldn't find clang-tidy.") ELSE() @@ -34,9 +30,7 @@ ELSE() ENDIF() FIND_PROGRAM(CLANG_APPLY_REPLACEMENTS_BIN - NAMES clang-apply-replacements clang-apply-replacements-12 - HINTS ${CLANG_SEARCH_PATH}) - + NAMES clang-apply-replacements clang-apply-replacements-12 clang-apply-replacements-14) IF("${CLANG_APPLY_REPLACEMENTS_BIN}" STREQUAL "CLANG_APPLY_REPLACEMENTS_BIN-NOTFOUND") MESSAGE(WARNING "couldn't find clang-apply-replacements.") ELSE() diff --git a/cmake/fmt.cmake b/cmake/fmt.cmake index 2e2ca4f73..fd584943e 100644 --- a/cmake/fmt.cmake +++ b/cmake/fmt.cmake @@ -3,13 +3,11 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() -include(cmake/utils.cmake) +FetchContent_Declare(fmt + URL https://github.com/fmtlib/fmt/archive/10.1.1.zip + URL_HASH SHA256=3c2e73019178ad72b0614a3124f25de454b9ca3a1afe81d5447b8d3cbdb6d322 + ) -FetchContent_DeclareGitHubWithMirror(fmt - fmtlib/fmt 10.1.1 - SHA256=3c2e73019178ad72b0614a3124f25de454b9ca3a1afe81d5447b8d3cbdb6d322 -) - -FetchContent_MakeAvailableWithArgs(fmt) +FetchContent_MakeAvailable(fmt) diff --git a/cmake/folly.cmake b/cmake/folly.cmake deleted file mode 100644 index 8185434eb..000000000 --- a/cmake/folly.cmake +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -set(DEPS_FETCH_PROXY "" CACHE STRING - "a template URL to proxy the traffic for fetching dependencies, e.g. with DEPS_FETCH_PROXY = https://some-proxy/, - https://example/some-dep.zip -> https://some-proxy/https://example/some-dep.zip") - -cmake_host_system_information(RESULT CPU_CORE QUERY NUMBER_OF_LOGICAL_CORES) - -if(CMAKE_GENERATOR STREQUAL "Ninja") - set(MAKE_COMMAND make -j${CPU_CORE}) -else() - set(MAKE_COMMAND $(MAKE) -j${CPU_CORE}) -endif() - -if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") - cmake_policy(SET CMP0135 NEW) -endif() - -include(cmake/utils.cmake) -include(cmake/boost.cmake) -include(cmake/unwind.cmake) -include(cmake/gflags.cmake) -include(cmake/glog.cmake) -include(cmake/double-conversion.cmake) -include(cmake/fmt.cmake) - -add_compile_definitions(FOLLY_NO_CONFIG) - -FetchContent_Declare(pikiwidb-folly - URL https://github.com/pikiwidb/folly/archive/v2023.10.16.00.zip - URL_HASH SHA256=EB29DC13474E3979A0680F624FF5820FA7A4E9CE0110607669AE87D69CFC104D - PATCH_COMMAND patch -p1 -s -E -i ${PROJECT_SOURCE_DIR}/cmake/patches/folly_coroutine.patch -) - -FetchContent_MakeAvailableWithArgs(pikiwidb-folly) - -target_link_libraries(pikiwidb-folly pikiwidb-boost glog double-conversion fmt) -target_include_directories(pikiwidb-folly PUBLIC $) diff --git a/cmake/gflags.cmake b/cmake/gflags.cmake index 410b1b067..939701148 100644 --- a/cmake/gflags.cmake +++ b/cmake/gflags.cmake @@ -3,13 +3,11 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() -include(cmake/utils.cmake) - -FetchContent_DeclareGitHubWithMirror(gflags - gflags/gflags v2.2.2 - SHA256=19713a36c9f32b33df59d1c79b4958434cb005b5b47dc5400a7a4b078111d9b5 +FetchContent_Declare(gflags + URL https://github.com/gflags/gflags/archive/v2.2.2.zip + URL_HASH SHA256=19713a36c9f32b33df59d1c79b4958434cb005b5b47dc5400a7a4b078111d9b5 ) FetchContent_MakeAvailableWithArgs(gflags diff --git a/cmake/glog.cmake b/cmake/glog.cmake deleted file mode 100644 index fbbde6fe9..000000000 --- a/cmake/glog.cmake +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -include_guard() - -include(cmake/utils.cmake) - -FetchContent_Declare(glog - URL https://github.com/google/glog/archive/v0.6.0.zip - URL_HASH SHA256=122fb6b712808ef43fbf80f75c52a21c9760683dae470154f02bddfc61135022 - PATCH_COMMAND patch -p1 -s -E -i ${PROJECT_SOURCE_DIR}/cmake/patches/glog_demangle.patch -) - -FetchContent_MakeAvailableWithArgs(glog - CMAKE_MODULE_PATH=${PROJECT_SOURCE_DIR}/cmake/modules/glog - WITH_GFLAGS=ON - BUILD_TESTING=OFF - BUILD_SHARED_LIBS=OFF - WITH_UNWIND=ON -) diff --git a/cmake/modules/glog/FindUnwind.cmake b/cmake/modules/glog/FindUnwind.cmake deleted file mode 100644 index 34a6aa33b..000000000 --- a/cmake/modules/glog/FindUnwind.cmake +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -if(unwind_SOURCE_DIR) - message(STATUS "Found unwind in ${unwind_SOURCE_DIR}") - - add_library(unwind::unwind ALIAS unwind) - install(TARGETS unwind EXPORT glog-targets) -endif() diff --git a/cmake/modules/glog/Findgflags.cmake b/cmake/modules/glog/Findgflags.cmake deleted file mode 100644 index ffcec18c6..000000000 --- a/cmake/modules/glog/Findgflags.cmake +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -if(gflags_SOURCE_DIR) - message(STATUS "Found gflags in ${gflags_SOURCE_DIR}") - - install(TARGETS gflags_static EXPORT glog-targets) -endif() diff --git a/cmake/modules/spdlog/fmtConfig.cmake b/cmake/modules/spdlog/fmtConfig.cmake deleted file mode 100644 index e7342c31e..000000000 --- a/cmake/modules/spdlog/fmtConfig.cmake +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -if(fmt_SOURCE_DIR) - message(STATUS "Found fmt in ${fmt_SOURCE_DIR}") - - add_library(fmt::fmt ALIAS fmt) -endif() diff --git a/cmake/patches/folly_coroutine.patch b/cmake/patches/folly_coroutine.patch deleted file mode 100644 index 384d38025..000000000 --- a/cmake/patches/folly_coroutine.patch +++ /dev/null @@ -1,27 +0,0 @@ -diff --color -urN a/folly/experimental/coro/Coroutine.h b/folly/experimental/coro/Coroutine.h ---- a/folly/experimental/coro/Coroutine.h 2023-10-23 18:00:57.000000000 +0800 -+++ b/folly/experimental/coro/Coroutine.h 2023-10-31 10:49:25.549555846 +0800 -@@ -228,23 +228,9 @@ - - bool eager = false; - --// FIXME: when building against Apple SDKs using c++17, we hit this all over --// the place on complex testing infrastructure for iOS. Since it's not clear --// how to fix the issue properly right now, force ignore this warnings and help --// unblock expected/optional coroutines. This should be removed once the build --// configuration is changed to use -Wno-deprecated-experimental-coroutine. --#if defined(__clang__) && (__clang_major__ < 17 && __clang_major__ > 13) --#pragma clang diagnostic push --#pragma clang diagnostic ignored "-Wdeprecated-experimental-coroutine" - static detect_promise_return_object_eager_conversion_ go() noexcept { - co_return; - } --#pragma clang diagnostic pop --#else -- static detect_promise_return_object_eager_conversion_ go() noexcept { -- co_return; -- } --#endif - }; - - } // namespace detail diff --git a/cmake/patches/glog_demangle.patch b/cmake/patches/glog_demangle.patch deleted file mode 100644 index dcf1c85b1..000000000 --- a/cmake/patches/glog_demangle.patch +++ /dev/null @@ -1,214 +0,0 @@ -diff --color -urN a/CMakeLists.txt b/CMakeLists.txt ---- a/CMakeLists.txt 2022-04-05 06:03:27.000000000 +0800 -+++ b/CMakeLists.txt 2023-10-30 10:51:22.244757726 +0800 -@@ -589,7 +589,7 @@ - src/base/googleinit.h - src/base/mutex.h - src/demangle.cc -- src/demangle.h -+ src/glog_demangle.h - src/logging.cc - src/raw_logging.cc - src/symbolize.cc -diff --color -urN a/src/demangle.cc b/src/demangle.cc ---- a/src/demangle.cc 2022-04-05 06:03:27.000000000 +0800 -+++ b/src/demangle.cc 2023-10-30 10:51:22.244757726 +0800 -@@ -36,7 +36,7 @@ - - #include // for NULL - --#include "demangle.h" -+#include "glog_demangle.h" - #include "utilities.h" - - #if defined(GLOG_OS_WINDOWS) -diff --color -urN a/src/demangle.h b/src/demangle.h ---- a/src/demangle.h 2022-04-05 06:03:27.000000000 +0800 -+++ b/src/demangle.h 1970-01-01 08:00:00.000000000 +0800 -@@ -1,85 +0,0 @@ --// Copyright (c) 2006, Google Inc. --// All rights reserved. --// --// Redistribution and use in source and binary forms, with or without --// modification, are permitted provided that the following conditions are --// met: --// --// * Redistributions of source code must retain the above copyright --// notice, this list of conditions and the following disclaimer. --// * Redistributions in binary form must reproduce the above --// copyright notice, this list of conditions and the following disclaimer --// in the documentation and/or other materials provided with the --// distribution. --// * Neither the name of Google Inc. nor the names of its --// contributors may be used to endorse or promote products derived from --// this software without specific prior written permission. --// --// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS --// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT --// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR --// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT --// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, --// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT --// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, --// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY --// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT --// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE --// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --// --// Author: Satoru Takabayashi --// --// An async-signal-safe and thread-safe demangler for Itanium C++ ABI --// (aka G++ V3 ABI). -- --// The demangler is implemented to be used in async signal handlers to --// symbolize stack traces. We cannot use libstdc++'s --// abi::__cxa_demangle() in such signal handlers since it's not async --// signal safe (it uses malloc() internally). --// --// Note that this demangler doesn't support full demangling. More --// specifically, it doesn't print types of function parameters and --// types of template arguments. It just skips them. However, it's --// still very useful to extract basic information such as class, --// function, constructor, destructor, and operator names. --// --// See the implementation note in demangle.cc if you are interested. --// --// Example: --// --// | Mangled Name | The Demangler | abi::__cxa_demangle() --// |---------------|---------------|----------------------- --// | _Z1fv | f() | f() --// | _Z1fi | f() | f(int) --// | _Z3foo3bar | foo() | foo(bar) --// | _Z1fIiEvi | f<>() | void f(int) --// | _ZN1N1fE | N::f | N::f --// | _ZN3Foo3BarEv | Foo::Bar() | Foo::Bar() --// | _Zrm1XS_" | operator%() | operator%(X, X) --// | _ZN3FooC1Ev | Foo::Foo() | Foo::Foo() --// | _Z1fSs | f() | f(std::basic_string, --// | | | std::allocator >) --// --// See the unit test for more examples. --// --// Note: we might want to write demanglers for ABIs other than Itanium --// C++ ABI in the future. --// -- --#ifndef BASE_DEMANGLE_H_ --#define BASE_DEMANGLE_H_ -- --#include "config.h" --#include -- --_START_GOOGLE_NAMESPACE_ -- --// Demangle "mangled". On success, return true and write the --// demangled symbol name to "out". Otherwise, return false. --// "out" is modified even if demangling is unsuccessful. --bool GLOG_EXPORT Demangle(const char *mangled, char *out, size_t out_size); -- --_END_GOOGLE_NAMESPACE_ -- --#endif // BASE_DEMANGLE_H_ -diff --color -urN a/src/glog_demangle.h b/src/glog_demangle.h ---- a/src/glog_demangle.h 1970-01-01 08:00:00.000000000 +0800 -+++ b/src/glog_demangle.h 2023-10-30 10:51:22.244757726 +0800 -@@ -0,0 +1,85 @@ -+// Copyright (c) 2006, Google Inc. -+// All rights reserved. -+// -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following disclaimer -+// in the documentation and/or other materials provided with the -+// distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived from -+// this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+// -+// Author: Satoru Takabayashi -+// -+// An async-signal-safe and thread-safe demangler for Itanium C++ ABI -+// (aka G++ V3 ABI). -+ -+// The demangler is implemented to be used in async signal handlers to -+// symbolize stack traces. We cannot use libstdc++'s -+// abi::__cxa_demangle() in such signal handlers since it's not async -+// signal safe (it uses malloc() internally). -+// -+// Note that this demangler doesn't support full demangling. More -+// specifically, it doesn't print types of function parameters and -+// types of template arguments. It just skips them. However, it's -+// still very useful to extract basic information such as class, -+// function, constructor, destructor, and operator names. -+// -+// See the implementation note in demangle.cc if you are interested. -+// -+// Example: -+// -+// | Mangled Name | The Demangler | abi::__cxa_demangle() -+// |---------------|---------------|----------------------- -+// | _Z1fv | f() | f() -+// | _Z1fi | f() | f(int) -+// | _Z3foo3bar | foo() | foo(bar) -+// | _Z1fIiEvi | f<>() | void f(int) -+// | _ZN1N1fE | N::f | N::f -+// | _ZN3Foo3BarEv | Foo::Bar() | Foo::Bar() -+// | _Zrm1XS_" | operator%() | operator%(X, X) -+// | _ZN3FooC1Ev | Foo::Foo() | Foo::Foo() -+// | _Z1fSs | f() | f(std::basic_string, -+// | | | std::allocator >) -+// -+// See the unit test for more examples. -+// -+// Note: we might want to write demanglers for ABIs other than Itanium -+// C++ ABI in the future. -+// -+ -+#ifndef BASE_DEMANGLE_H_ -+#define BASE_DEMANGLE_H_ -+ -+#include "config.h" -+#include -+ -+_START_GOOGLE_NAMESPACE_ -+ -+// Demangle "mangled". On success, return true and write the -+// demangled symbol name to "out". Otherwise, return false. -+// "out" is modified even if demangling is unsuccessful. -+bool GLOG_EXPORT Demangle(const char *mangled, char *out, size_t out_size); -+ -+_END_GOOGLE_NAMESPACE_ -+ -+#endif // BASE_DEMANGLE_H_ -diff --color -urN a/src/symbolize.cc b/src/symbolize.cc ---- a/src/symbolize.cc 2022-04-05 06:03:27.000000000 +0800 -+++ b/src/symbolize.cc 2023-10-30 10:51:22.244757726 +0800 -@@ -62,7 +62,7 @@ - #include - - #include "symbolize.h" --#include "demangle.h" -+#include "glog_demangle.h" - - _START_GOOGLE_NAMESPACE_ - diff --git a/cmake/rocksdb.cmake b/cmake/rocksdb.cmake index 14a0b6a42..69b4546e7 100644 --- a/cmake/rocksdb.cmake +++ b/cmake/rocksdb.cmake @@ -3,7 +3,7 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() FetchContent_Declare( rocksdb @@ -11,21 +11,21 @@ FetchContent_Declare( GIT_TAG v8.3.3 ) -FetchContent_MakeAvailableWithArgs(rocksdb - BUILD_TYPE=OFF - WITH_TESTS=OFF - WITH_BENCHMARK=OFF - WITH_BENCHMARK_TOOLS=OFF - WITH_TOOLS=OFF - WITH_CORE_TOOLS=OFF - WITH_TRACE_TOOLS=OFF - WITH_EXAMPLES=OFF - ROCKSDB_BUILD_SHARED=OFF - WITH_LIBURING=OFF - WITH_LZ4=OFF - WITH_SNAPPY=OFF - WITH_ZLIB=ON - WITH_ZSTD=OFF - WITH_GFLAGS=OFF - USE_RTTI=ON -) +SET(BUILD_TYPE OFF CACHE BOOL "" FORCE) +SET(WITH_TESTS OFF CACHE BOOL "" FORCE) +SET(WITH_BENCHMARK OFF CACHE BOOL "" FORCE) +SET(WITH_BENCHMARK_TOOLS OFF CACHE BOOL "" FORCE) +SET(WITH_TOOLS OFF CACHE BOOL "" FORCE) +SET(WITH_CORE_TOOLS OFF CACHE BOOL "" FORCE) +SET(WITH_TRACE_TOOLS OFF CACHE BOOL "" FORCE) +SET(WITH_EXAMPLES OFF CACHE BOOL "" FORCE) +SET(ROCKSDB_BUILD_SHARED OFF CACHE BOOL "" FORCE) +SET(WITH_LIBURING OFF CACHE BOOL "" FORCE) +SET(WITH_LZ4 OFF CACHE BOOL "" FORCE) +SET(WITH_SNAPPY OFF CACHE BOOL "" FORCE) +SET(WITH_ZLIB ON CACHE BOOL "" FORCE) +SET(WITH_ZSTD OFF CACHE BOOL "" FORCE) +SET(WITH_GFLAGS OFF CACHE BOOL "" FORCE) +SET(USE_RTTI ON CACHE BOOL "" FORCE) + +FetchContent_MakeAvailable(rocksdb) diff --git a/cmake/spdlog.cmake b/cmake/spdlog.cmake index c7e701428..e04a1f050 100644 --- a/cmake/spdlog.cmake +++ b/cmake/spdlog.cmake @@ -3,16 +3,13 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() -include(cmake/utils.cmake) +FetchContent_Declare(spdlog + URL https://github.com/gabime/spdlog/archive/v1.12.0.zip + URL_HASH SHA256=6174BF8885287422A6C6A0312EB8A30E8D22BCFCEE7C48A6D02D1835D7769232 + ) -FetchContent_DeclareGitHubWithMirror(spdlog - gabime/spdlog v1.12.0 - SHA256=6174BF8885287422A6C6A0312EB8A30E8D22BCFCEE7C48A6D02D1835D7769232 -) - -FetchContent_MakeAvailableWithArgs(spdlog - CMAKE_MODULE_PATH=${PROJECT_SOURCE_DIR}/cmake/modules/spdlog - SPDLOG_FMT_EXTERNAL=ON -) +SET(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/modules/spdlog" CACHE STRING "" FORCE) +SET(SPDLOG_FMT_EXTERNAL ON CACHE BOOL "" FORCE) +FetchContent_MakeAvailable(spdlog) diff --git a/cmake/unwind.cmake b/cmake/unwind.cmake deleted file mode 100644 index 43f7e4f28..000000000 --- a/cmake/unwind.cmake +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -include_guard() - -include(cmake/utils.cmake) - -FetchContent_DeclareGitHubWithMirror(unwind - libunwind/libunwind v1.7.2 - SHA256=f39929bff6ebd4426e806f0e834e077f2dc3c16fa19dbfb8996a1c93b3caf8cb -) - -FetchContent_GetProperties(unwind) -if(NOT unwind_POPULATED) - FetchContent_Populate(unwind) - - execute_process(COMMAND autoreconf -i - WORKING_DIRECTORY ${unwind_SOURCE_DIR} - ) - execute_process(COMMAND ${unwind_SOURCE_DIR}/configure CC=${CMAKE_C_COMPILER} -C --enable-static=yes --enable-shared=no --enable-minidebuginfo=no --enable-zlibdebuginfo=no --disable-documentation --disable-tests - WORKING_DIRECTORY ${unwind_BINARY_DIR} - ) - add_custom_target(make_unwind - COMMAND ${MAKE_COMMAND} - WORKING_DIRECTORY ${unwind_BINARY_DIR} - BYPRODUCTS ${unwind_BINARY_DIR}/src/.libs/libunwind.a - ) -endif() - -add_library(unwind INTERFACE) -target_include_directories(unwind INTERFACE $ $) -target_link_libraries(unwind INTERFACE $) -add_dependencies(unwind make_unwind) diff --git a/cmake/utils.cmake b/cmake/utils.cmake deleted file mode 100644 index 79502e0d2..000000000 --- a/cmake/utils.cmake +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -include_guard() - -include(FetchContent) - -macro(parse_var arg key value) - string(REGEX REPLACE "^(.+)=(.+)$" "\\1;\\2" REGEX_RESULT ${arg}) - list(GET REGEX_RESULT 0 ${key}) - list(GET REGEX_RESULT 1 ${value}) -endmacro() - -function(FetchContent_MakeAvailableWithArgs dep) - if(NOT ${dep}_POPULATED) - FetchContent_Populate(${dep}) - - foreach(arg IN LISTS ARGN) - parse_var(${arg} key value) - set(${key}_OLD ${${key}}) - set(${key} ${value} CACHE INTERNAL "") - endforeach() - - add_subdirectory(${${dep}_SOURCE_DIR} ${${dep}_BINARY_DIR} EXCLUDE_FROM_ALL) - - foreach(arg IN LISTS ARGN) - parse_var(${arg} key value) - set(${key} ${${key}_OLD} CACHE INTERNAL "") - endforeach() - endif() -endfunction() - -function(FetchContent_DeclareWithMirror dep url hash) - FetchContent_Declare(${dep} - URL ${DEPS_FETCH_PROXY}${url} - URL_HASH ${hash} - ) -endfunction() - -function(FetchContent_DeclareGitHubWithMirror dep repo tag hash) - FetchContent_DeclareWithMirror(${dep} - https://github.com/${repo}/archive/${tag}.zip - ${hash} - ) -endfunction() diff --git a/pikiwidb.conf b/pikiwidb.conf index e720a2133..9be37fe90 100644 --- a/pikiwidb.conf +++ b/pikiwidb.conf @@ -10,7 +10,7 @@ port 9221 # If you want you can bind a single interface, if the bind option is not # specified all the interfaces will listen for incoming connections. # -# bind 127.0.0.1 +ip 127.0.0.1 # Close the connection after a client is idle for N seconds (0 to disable) @@ -35,7 +35,7 @@ logfile stdout # Set the number of databases. The default database is DB 0, you can select # a different one on a per-connection basis using SELECT where # dbid is a number between 0 and 'databases'-1 -databases 3 +databases 16 ################################ SNAPSHOTTING ################################# # @@ -57,7 +57,7 @@ databases 3 # points by adding a save directive with a single empty string argument # like in the following example: # -save "" +# save "" #save 900 1 #save 300 10 @@ -315,37 +315,27 @@ slowlog-log-slower-than 10000 # You can reclaim memory used by the slow log with SLOWLOG RESET. slowlog-max-len 128 -############################### ADVANCED CONFIG ############################### - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeot, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are perforemd with the same frequency, but Redis checks for -# tasks to perform accordingly to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 ############################### BACKENDS CONFIG ############################### -# PikiwiDB is a in memory database, though it has aof and rdb for dump data to disk, it -# is very limited. Try use leveldb for real storage, pikiwidb as cache. The cache algorithm -# is like linux page cache, please google or read your favorite linux book -# 0 is default, no backend -# 1 is RocksDB, currently only support RocksDB -backend 1 -backendpath dump -# the frequency of dump to backend per second -backendhz 10 -# the rocksdb number per db -db-instance-num 5 -# default 86400 * 7 -rocksdb-ttl-second 604800 -# default 86400 * 3 -rocksdb-periodic-second 259200; +# PikiwiDB uses RocksDB as the underlying storage engine, and the data belonging +# to the same DB is distributed among several RocksDB instances. + +# RocksDB instances number per DB +db-instance-num 3 +# default is 86400 * 7 +small-compaction-threshold 604800 +# default is 86400 * 3 +small-compaction-duration-threshold 259200 + +############################### ROCKSDB CONFIG ############################### +rocksdb-max-subcompactions 2 +rocksdb-max-background-jobs 4 +rocksdb-max-write-buffer-number 2 +rocksdb-min-write-buffer-number-to-merge 2 +# default is 64M +rocksdb-write-buffer-size 67108864 +rocksdb-level0-file-num-compaction-trigger 4 +rocksdb-number-levels 7 +rocksdb-enable-pipelined-write no +rocksdb-level0-slowdown-writes-trigger 20 +rocksdb-level0-stop-writes-trigger 36 + diff --git a/pikiwidbtests.sh b/pikiwidbtests.sh new file mode 100755 index 000000000..d5b691935 --- /dev/null +++ b/pikiwidbtests.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +# clear the log file +function cleanup() { + rm -rf ./logs* + rm -rf ./db* + rm -rf dbsync/ + rm src/redis-server +} + +# check if tcl is installed +function check_tcl { + if [ -z "$(which tclsh)" ]; then + echo "tclsh is not installed" + exit 1 + fi +} + +# handle different build directories. +function setup_build_dir { + BUILD_DIR="./bin" + echo "BUILD_DIR: $BUILD_DIR" +} + +# setup pikiwidb bin and conf +function setup_pikiwidb_bin { + PIKIWIDB_BIN="./$BUILD_DIR/pikiwidb" + if [ ! -f "$PIKIWIDB_BIN" ]; then + echo "pikiwidb bin not found" + exit 1 + fi + cp $PIKIWIDB_BIN src/redis-server + cp ./pikiwidb.conf tests/assets/default.conf +} + + +cleanup + +check_tcl + +setup_build_dir + +setup_pikiwidb_bin + +echo "run pikiwidb tests $1" + +if [ "$1" == "all" ]; then + tclsh tests/test_helper.tcl --clients 1 +else + tclsh tests/test_helper.tcl --clients 1 --single unit/$1 +fi + +if [ $? -ne 0 ]; then + echo "pikiwidb tests failed" + cleanup + exit 1 +fi + +# You can use './pikiwidb.sh all clean 'to ensure that the +# data can be deleted immediately after the test +if [ "$2" == "clean" ]; then + cleanup +fi \ No newline at end of file diff --git a/src/base_cmd.cc b/src/base_cmd.cc index d4218d226..44c76f6fa 100644 --- a/src/base_cmd.cc +++ b/src/base_cmd.cc @@ -7,6 +7,7 @@ #include "base_cmd.h" #include "common.h" +#include "log.h" #include "pikiwidb.h" namespace pikiwidb { @@ -15,8 +16,8 @@ BaseCmd::BaseCmd(std::string name, int16_t arity, uint32_t flag, uint32_t aclCat name_ = std::move(name); arity_ = arity; flag_ = flag; - aclCategory_ = aclCategory; - cmdId_ = g_pikiwidb->GetCmdTableManager().GetCmdId(); + acl_category_ = aclCategory; + cmd_id_ = g_pikiwidb->GetCmdID(); } bool BaseCmd::CheckArg(size_t num) const { @@ -29,8 +30,10 @@ bool BaseCmd::CheckArg(size_t num) const { std::vector BaseCmd::CurrentKey(PClient* client) const { return std::vector{client->Key()}; } void BaseCmd::Execute(PClient* client) { + DEBUG("execute command: {}", client->CmdName()); + auto dbIndex = client->GetCurrentDB(); - if (!isExclusive()) { + if (!HasFlag(kCmdFlagsExclusive)) { PSTORE.GetBackend(dbIndex)->LockShared(); } @@ -39,7 +42,7 @@ void BaseCmd::Execute(PClient* client) { } DoCmd(client); - if (!isExclusive()) { + if (!HasFlag(kCmdFlagsExclusive)) { PSTORE.GetBackend(dbIndex)->UnLockShared(); } } @@ -55,13 +58,13 @@ void BaseCmd::SetFlag(uint32_t flag) { flag_ |= flag; } void BaseCmd::ResetFlag(uint32_t flag) { flag_ &= ~flag; } bool BaseCmd::HasSubCommand() const { return false; } BaseCmd* BaseCmd::GetSubCmd(const std::string& cmdName) { return nullptr; } -uint32_t BaseCmd::AclCategory() const { return aclCategory_; } -void BaseCmd::AddAclCategory(uint32_t aclCategory) { aclCategory_ |= aclCategory; } +uint32_t BaseCmd::AclCategory() const { return acl_category_; } +void BaseCmd::AddAclCategory(uint32_t aclCategory) { acl_category_ |= aclCategory; } std::string BaseCmd::Name() const { return name_; } // CmdRes& BaseCommand::Res() { return res_; } // void BaseCommand::SetResp(const std::shared_ptr& resp) { resp_ = resp; } // std::shared_ptr BaseCommand::GetResp() { return resp_.lock(); } -uint32_t BaseCmd::GetCmdId() const { return cmdId_; } +uint32_t BaseCmd::GetCmdID() const { return cmd_id_; } // BaseCmdGroup BaseCmdGroup::BaseCmdGroup(const std::string& name, uint32_t flag) : BaseCmdGroup(name, -2, flag) {} @@ -86,4 +89,4 @@ bool BaseCmdGroup::DoInitial(PClient* client) { return true; } -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/base_cmd.h b/src/base_cmd.h index 1c9698bdc..d0a55242c 100644 --- a/src/base_cmd.h +++ b/src/base_cmd.h @@ -21,16 +21,22 @@ namespace pikiwidb { // command definition +// base cmd +const std::string kCmdNamePing = "ping"; // key cmd const std::string kCmdNameDel = "del"; const std::string kCmdNameExists = "exists"; +const std::string kCmdNameType = "type"; +const std::string kCmdNameExpire = "expire"; +const std::string kCmdNameTtl = "ttl"; const std::string kCmdNamePExpire = "pexpire"; const std::string kCmdNameExpireat = "expireat"; const std::string kCmdNamePExpireat = "pexpireat"; const std::string kCmdNamePersist = "persist"; const std::string kCmdNameKeys = "keys"; +const std::string kCmdNamePttl = "pttl"; // string cmd const std::string kCmdNameSet = "set"; const std::string kCmdNameGet = "get"; @@ -53,6 +59,7 @@ const std::string kCmdNameGetRange = "getrange"; const std::string kCmdNameSetRange = "setrange"; const std::string kCmdNameDecr = "decr"; const std::string kCmdNameIncr = "incr"; +const std::string kCmdNameMSetnx = "msetnx"; // multi const std::string kCmdNameMulti = "multi"; @@ -67,6 +74,7 @@ const std::string kCmdNameFlushdb = "flushdb"; const std::string kCmdNameFlushall = "flushall"; const std::string kCmdNameAuth = "auth"; const std::string kCmdNameSelect = "select"; +const std::string kCmdNameShutdown = "shutdown"; // hash cmd const std::string kCmdNameHSet = "hset"; @@ -84,6 +92,7 @@ const std::string kCmdNameHIncrbyFloat = "hincrbyfloat"; const std::string kCmdNameHSetNX = "hsetnx"; const std::string kCmdNameHIncrby = "hincrby"; const std::string kCmdNameHRandField = "hrandfield"; +const std::string kCmdNameHExists = "hexists"; // set cmd const std::string kCmdNameSIsMember = "sismember"; @@ -100,6 +109,7 @@ const std::string kCmdNameSPop = "spop"; const std::string kCmdNameSMembers = "smembers"; const std::string kCmdNameSDiff = "sdiff"; const std::string kCmdNameSDiffstore = "sdiffstore"; +const std::string kCmdNameSScan = "sscan"; // list cmd const std::string kCmdNameLPush = "lpush"; @@ -120,8 +130,18 @@ const std::string kCmdNameLLen = "llen"; const std::string kCmdNameZAdd = "zadd"; const std::string kCmdNameZRevrange = "zrevrange"; const std::string kCmdNameZRangebyscore = "zrangebyscore"; -const std::string kCmdNameZRevRangeByScore = "zrevrangebyscore"; +const std::string kCmdNameZRemrangebyscore = "zremrangebyscore"; +const std::string kCmdNameZRemrangebyrank = "zremrangebyrank"; +const std::string kCmdNameZRevrangebyscore = "zrevrangebyscore"; const std::string kCmdNameZCard = "zcard"; +const std::string kCmdNameZScore = "zscore"; +const std::string kCmdNameZRange = "zrange"; +const std::string kCmdNameZRangebylex = "zrangebylex"; +const std::string kCmdNameZRevrangebylex = "zrevrangebylex"; +const std::string kCmdNameZRank = "zrank"; +const std::string kCmdNameZRevrank = "zrevrank"; +const std::string kCmdNameZRem = "zrem"; +const std::string kCmdNameZIncrby = "zincrby"; enum CmdFlags { kCmdFlagsWrite = (1 << 0), // May modify the dataset @@ -273,9 +293,7 @@ class BaseCmd : public std::enable_shared_from_this { // void SetResp(const std::shared_ptr& resp); // std::shared_ptr GetResp(); - uint32_t GetCmdId() const; - - bool isExclusive() { return static_cast(flag_ & kCmdFlagsExclusive); } + uint32_t GetCmdID() const; protected: // Execute a specific command @@ -291,8 +309,8 @@ class BaseCmd : public std::enable_shared_from_this { // std::weak_ptr resp_; // uint64_t doDuration_ = 0; - uint32_t cmdId_ = 0; - uint32_t aclCategory_ = 0; + uint32_t cmd_id_ = 0; + uint32_t acl_category_ = 0; private: // The function to be executed first before executing `DoCmd` diff --git a/src/client.cc b/src/client.cc index 4296a7b96..749cc2380 100644 --- a/src/client.cc +++ b/src/client.cc @@ -289,7 +289,7 @@ int PClient::handlePacket(const char* start, int bytes) { return static_cast(ptr - start); } - DEFER { reset(); }; + // DEFER { reset(); }; // handle packet // const auto& params = parser_.GetParams(); @@ -329,10 +329,12 @@ int PClient::handlePacket(const char* start, int bytes) { // const PCommandInfo* info = PCommandTable::GetCommandInfo(cmdName_); // if (!info) { // 如果这个命令不存在,那么就走新的命令处理流程 - executeCommand(); + // executeCommand(); // return static_cast(ptr - start); // } + g_pikiwidb->SubmitFast(std::make_shared(shared_from_this())); + // check transaction // if (IsFlagOn(ClientFlag_multi)) { // if (cmdName_ != kCmdNameMulti && cmdName_ != kCmdNameExec && cmdName_ != kCmdNameWatch && @@ -376,24 +378,24 @@ int PClient::handlePacket(const char* start, int bytes) { // 为了兼容老的命令处理流程,新的命令处理流程在这里 // 后面可以把client这个类重构,完整的支持新的命令处理流程 void PClient::executeCommand() { - auto [cmdPtr, ret] = g_pikiwidb->GetCmdTableManager().GetCommand(CmdName(), this); - - if (!cmdPtr) { - if (ret == CmdRes::kInvalidParameter) { - SetRes(CmdRes::kInvalidParameter); - } else { - SetRes(CmdRes::kSyntaxErr, "unknown command '" + CmdName() + "'"); - } - return; - } + // auto [cmdPtr, ret] = g_pikiwidb->GetCmdTableManager().GetCommand(CmdName(), this); - if (!cmdPtr->CheckArg(params_.size())) { - SetRes(CmdRes::kWrongNum, CmdName()); - return; - } - - // execute a specific command - cmdPtr->Execute(this); + // if (!cmdPtr) { + // if (ret == CmdRes::kInvalidParameter) { + // SetRes(CmdRes::kInvalidParameter); + // } else { + // SetRes(CmdRes::kSyntaxErr, "unknown command '" + CmdName() + "'"); + // } + // return; + // } + // + // if (!cmdPtr->CheckArg(params_.size())) { + // SetRes(CmdRes::kWrongNum, CmdName()); + // return; + // } + // + // // execute a specific command + // cmdPtr->Execute(this); } PClient* PClient::Current() { return s_current; } @@ -420,13 +422,14 @@ int PClient::HandlePackets(pikiwidb::TcpConnection* obj, const char* start, int total += processed; } - obj->SendPacket(Message()); - Clear(); + // obj->SendPacket(Message()); + // Clear(); // reply_.Clear(); return total; } void PClient::OnConnect() { + SetState(ClientState::kOK); if (isPeerMaster()) { PREPL.SetMasterState(kPReplStateConnected); PREPL.SetMaster(std::static_pointer_cast(shared_from_this())); @@ -434,7 +437,7 @@ void PClient::OnConnect() { SetName("MasterConnection"); SetFlag(kClientFlagMaster); - if (g_config.masterauth.empty()) { + if (g_config.master_auth.empty()) { SetAuth(); } } else { @@ -446,7 +449,7 @@ void PClient::OnConnect() { const std::string& PClient::PeerIP() const { if (auto c = getTcpConnection(); c) { - return c->GetPeerIp(); + return c->GetPeerIP(); } static const std::string kEmpty; @@ -492,7 +495,17 @@ bool PClient::SendPacket(const evbuffer_iovec* iovecs, size_t nvecs) { return false; } +void PClient::WriteReply2Client() { + if (auto c = getTcpConnection(); c) { + c->SendPacket(Message()); + } + Clear(); + reset(); +} + void PClient::Close() { + SetState(ClientState::kClosed); + reset(); if (auto c = getTcpConnection(); c) { c->ActiveClose(); tcp_connection_.reset(); diff --git a/src/client.h b/src/client.h index e4aefe02e..7e0940eaa 100644 --- a/src/client.h +++ b/src/client.h @@ -101,6 +101,11 @@ enum ClientFlag { kClientFlagMaster = (1 << 3), }; +enum class ClientState { + kOK, + kClosed, +}; + class DB; struct PSlaveInfo; @@ -121,6 +126,8 @@ class PClient : public std::enable_shared_from_this, public CmdRes { bool SendPacket(UnboundedBuffer& data); bool SendPacket(const evbuffer_iovec* iovecs, size_t nvecs); + void WriteReply2Client(); + void Close(); // dbno @@ -196,6 +203,12 @@ class PClient : public std::enable_shared_from_this, public CmdRes { bool GetAuth() const { return auth_; } void RewriteCmd(std::vector& params) { parser_.SetParams(params); } + inline size_t ParamsSize() const { return params_.size(); } + + inline ClientState State() const { return state_; } + + inline void SetState(ClientState state) { state_ = state; } + // All parameters of this command (including the command itself) // e.g:["set","key","value"] std::span argv_; @@ -245,6 +258,8 @@ class PClient : public std::enable_shared_from_this, public CmdRes { bool auth_ = false; time_t last_auth_ = 0; + ClientState state_; + static thread_local PClient* s_current; }; -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index 378c98c06..141b08a9d 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -8,6 +8,7 @@ #include "cmd_admin.h" #include "db.h" #include "pstd/env.h" +#include "pikiwidb.h" #include "store.h" namespace pikiwidb { @@ -21,14 +22,27 @@ CmdConfigGet::CmdConfigGet(const std::string& name, int16_t arity) bool CmdConfigGet::DoInitial(PClient* client) { return true; } -void CmdConfigGet::DoCmd(PClient* client) { client->AppendString("config cmd in development"); } +void CmdConfigGet::DoCmd(PClient* client) { + std::vector results; + for (int i = 0; i < client->argv_.size() - 2; i++) { + g_config.Get(client->argv_[i + 2], &results); + } + client->AppendStringVector(results); +} CmdConfigSet::CmdConfigSet(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsAdmin, kAclCategoryAdmin) {} bool CmdConfigSet::DoInitial(PClient* client) { return true; } -void CmdConfigSet::DoCmd(PClient* client) { client->AppendString("config cmd in development"); } +void CmdConfigSet::DoCmd(PClient* client) { + auto s = g_config.Set(client->argv_[2], client->argv_[3]); + if (!s.ok()) { + client->SetRes(CmdRes::kInvalidParameter); + } else { + client->SetRes(CmdRes::kOK); + } +} FlushdbCmd::FlushdbCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsExclusive | kCmdFlagsAdmin | kCmdFlagsWrite, @@ -106,4 +120,31 @@ void SelectCmd::DoCmd(PClient* client) { client->SetRes(CmdRes::kOK); } -} // namespace pikiwidb \ No newline at end of file +ShutdownCmd::ShutdownCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsAdmin | kCmdFlagsWrite, kAclCategoryAdmin | kAclCategoryWrite) {} + +bool ShutdownCmd::DoInitial(PClient* client) { + // For now, only shutdown need check local + if (client->PeerIP().find("127.0.0.1") == std::string::npos && + client->PeerIP().find(g_config.ip.ToString()) == std::string::npos) { + client->SetRes(CmdRes::kErrOther, kCmdNameShutdown + " should be localhost"); + return false; + } + return true; +} + +void ShutdownCmd::DoCmd(PClient* client) { + PSTORE.GetBackend(client->GetCurrentDB())->UnLockShared(); + g_pikiwidb->Stop(); + PSTORE.GetBackend(client->GetCurrentDB())->LockShared(); + client->SetRes(CmdRes::kNone); +} + +PingCmd::PingCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryList) {} + +bool PingCmd::DoInitial(PClient* client) { return true; } + +void PingCmd::DoCmd(PClient* client) { client->SetRes(CmdRes::kPong, "PONG"); } + +} // namespace pikiwidb diff --git a/src/cmd_admin.h b/src/cmd_admin.h index 9b20bca3f..d3093dd7d 100644 --- a/src/cmd_admin.h +++ b/src/cmd_admin.h @@ -12,8 +12,6 @@ namespace pikiwidb { -extern PConfig g_config; - class CmdConfig : public BaseCmdGroup { public: CmdConfig(const std::string& name, int arity); @@ -84,4 +82,26 @@ class SelectCmd : public BaseCmd { void DoCmd(PClient* client) override; }; -} // namespace pikiwidb +class ShutdownCmd : public BaseCmd { + public: + ShutdownCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + +class PingCmd : public BaseCmd { + public: + PingCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + +} // namespace pikiwidb \ No newline at end of file diff --git a/src/cmd_hash.cc b/src/cmd_hash.cc index 7b3358e6f..60f2e3b01 100644 --- a/src/cmd_hash.cc +++ b/src/cmd_hash.cc @@ -4,7 +4,6 @@ * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. */ - #include "cmd_hash.h" #include @@ -152,7 +151,7 @@ void HGetAllCmd::DoCmd(PClient* client) { int64_t total_fv = 0; int64_t cursor = 0; int64_t next_cursor = 0; - size_t raw_limit = g_config.max_client_response_size; + size_t raw_limit = g_config.max_client_response_size.load(); std::string raw; std::vector fvs; storage::Status s; @@ -255,7 +254,7 @@ HScanCmd::HScanCmd(const std::string& name, int16_t arity) bool HScanCmd::DoInitial(PClient* client) { if (auto size = client->argv_.size(); size != 3 && size != 5 && size != 7) { - client->SetRes(CmdRes::kSyntaxErr); + client->SetRes(CmdRes::kSyntaxErr, kCmdNameHScan); return false; } client->SetKey(client->argv_[1]); @@ -269,7 +268,7 @@ void HScanCmd::DoCmd(PClient* client) { int64_t count{10}; std::string pattern{"*"}; if (pstd::String2int(argv[2], &cursor) == 0) { - client->SetRes(CmdRes::kInvalidCursor); + client->SetRes(CmdRes::kInvalidCursor, kCmdNameHScan); return; } for (size_t i = 3; i < argv.size(); i += 2) { @@ -280,8 +279,12 @@ void HScanCmd::DoCmd(PClient* client) { client->SetRes(CmdRes::kInvalidInt, kCmdNameHScan); return; } + if (count < 0) { + client->SetRes(CmdRes::kSyntaxErr, kCmdNameHScan); + return; + } } else { - client->SetRes(CmdRes::kErrOther, kCmdNameHScan); + client->SetRes(CmdRes::kSyntaxErr, kCmdNameHScan); return; } } @@ -455,4 +458,27 @@ void HRandFieldCmd::DoCmd(PClient* client) { } } +HExistsCmd::HExistsCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategoryHash) {} + +bool HExistsCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void HExistsCmd::DoCmd(PClient* client) { + // parse arguments + auto& field = client->argv_[2]; + + // execute command + std::vector res; + auto s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HExists(client->Key(), field); + if (!s.ok() && !s.IsNotFound()) { + return client->SetRes(CmdRes::kErrOther, s.ToString()); + } + + // reply + client->AppendInteger(s.IsNotFound() ? 0 : 1); +} + } // namespace pikiwidb diff --git a/src/cmd_hash.h b/src/cmd_hash.h index c1e1a8313..fa79a9cd6 100644 --- a/src/cmd_hash.h +++ b/src/cmd_hash.h @@ -181,4 +181,15 @@ class HRandFieldCmd : public BaseCmd { static constexpr std::string_view kWithValueString = "withvalues"; }; +class HExistsCmd : public BaseCmd { + public: + HExistsCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + } // namespace pikiwidb diff --git a/src/cmd_keys.cc b/src/cmd_keys.cc index 8499e247e..40b59544b 100644 --- a/src/cmd_keys.cc +++ b/src/cmd_keys.cc @@ -54,6 +54,79 @@ void ExistsCmd::DoCmd(PClient* client) { } } +TypeCmd::TypeCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategoryKeyspace) {} + +bool TypeCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void TypeCmd::DoCmd(PClient* client) { + std::vector types(1); + rocksdb::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->GetType(client->Key(), true, types); + if (s.ok()) { + client->AppendContent("+" + types[0]); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +ExpireCmd::ExpireCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryKeyspace) {} + +bool ExpireCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ExpireCmd::DoCmd(PClient* client) { + uint64_t sec = 0; + if (pstd::String2int(client->argv_[2], &sec) == 0) { + client->SetRes(CmdRes ::kInvalidInt); + return; + } + auto res = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Expire(client->Key(), sec); + if (res != -1) { + client->AppendInteger(res); + } else { + client->SetRes(CmdRes::kErrOther, "expire internal error"); + } +} + +TtlCmd::TtlCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategoryKeyspace) {} + +bool TtlCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void TtlCmd::DoCmd(PClient* client) { + std::map type_timestamp; + std::map type_status; + type_timestamp = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->TTL(client->Key(), &type_status); + for (const auto& item : type_timestamp) { + if (item.second == -3) { + client->SetRes(CmdRes::kErrOther, "ttl internal error"); + return; + } + } + if (type_timestamp[storage::kStrings] != -2) { + client->AppendInteger(type_timestamp[storage::kStrings]); + } else if (type_timestamp[storage::kHashes] != -2) { + client->AppendInteger(type_timestamp[storage::kHashes]); + } else if (type_timestamp[storage::kLists] != -2) { + client->AppendInteger(type_timestamp[storage::kLists]); + } else if (type_timestamp[storage::kZSets] != -2) { + client->AppendInteger(type_timestamp[storage::kZSets]); + } else if (type_timestamp[storage::kSets] != -2) { + client->AppendInteger(type_timestamp[storage::kSets]); + } else { + client->AppendInteger(-2); + } +} + PExpireCmd::PExpireCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryKeyspace) {} @@ -167,4 +240,59 @@ void KeysCmd::DoCmd(PClient* client) { } } +PttlCmd::PttlCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategoryKeyspace) {} + +bool PttlCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +// like Blackwidow , Floyd still possible has same key in different data structure +void PttlCmd::DoCmd(PClient* client) { + std::map type_status; + auto type_timestamp = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->TTL(client->Key(), &type_status); + for (const auto& item : type_timestamp) { + // mean operation exception errors happen in database + if (item.second == -3) { + client->SetRes(CmdRes::kErrOther, "ttl internal error"); + return; + } + } + if (type_timestamp[storage::kStrings] != -2) { + if (type_timestamp[storage::kStrings] == -1) { + client->AppendInteger(-1); + } else { + client->AppendInteger(type_timestamp[storage::kStrings] * 1000); + } + } else if (type_timestamp[storage::kHashes] != -2) { + if (type_timestamp[storage::kHashes] == -1) { + client->AppendInteger(-1); + } else { + client->AppendInteger(type_timestamp[storage::kHashes] * 1000); + } + } else if (type_timestamp[storage::kLists] != -2) { + if (type_timestamp[storage::kLists] == -1) { + client->AppendInteger(-1); + } else { + client->AppendInteger(type_timestamp[storage::kLists] * 1000); + } + } else if (type_timestamp[storage::kSets] != -2) { + if (type_timestamp[storage::kSets] == -1) { + client->AppendInteger(-1); + } else { + client->AppendInteger(type_timestamp[storage::kSets] * 1000); + } + } else if (type_timestamp[storage::kZSets] != -2) { + if (type_timestamp[storage::kZSets] == -1) { + client->AppendInteger(-1); + } else { + client->AppendInteger(type_timestamp[storage::kZSets] * 1000); + } + } else { + // this key not exist + client->AppendInteger(-2); + } +} + } // namespace pikiwidb diff --git a/src/cmd_keys.h b/src/cmd_keys.h index c78453cee..fbe3048ad 100644 --- a/src/cmd_keys.h +++ b/src/cmd_keys.h @@ -33,6 +33,39 @@ class ExistsCmd : public BaseCmd { void DoCmd(PClient* client) override; }; +class TypeCmd : public BaseCmd { + public: + TypeCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + +class ExpireCmd : public BaseCmd { + public: + ExpireCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + +class TtlCmd : public BaseCmd { + public: + TtlCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + class PExpireCmd : public BaseCmd { public: PExpireCmd(const std::string& name, int16_t arity); @@ -87,4 +120,16 @@ class KeysCmd : public BaseCmd { private: void DoCmd(PClient* client) override; }; + +class PttlCmd : public BaseCmd { + public: + PttlCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + } // namespace pikiwidb diff --git a/src/cmd_kv.cc b/src/cmd_kv.cc index 3f222fae6..824ae2ca0 100644 --- a/src/cmd_kv.cc +++ b/src/cmd_kv.cc @@ -485,7 +485,7 @@ bool GetBitCmd::DoInitial(PClient* client) { void GetBitCmd::DoCmd(PClient* client) { int32_t bit_val = 0; long offset = 0; - if (!Strtol(client->argv_[2].c_str(), client->argv_[2].size(), &offset)) { + if (!pstd::String2int(client->argv_[2].c_str(), client->argv_[2].size(), &offset)) { client->SetRes(CmdRes::kInvalidInt); return; } @@ -543,8 +543,8 @@ bool SetBitCmd::DoInitial(PClient* client) { void SetBitCmd::DoCmd(PClient* client) { long offset = 0; long on = 0; - if (!Strtol(client->argv_[2].c_str(), client->argv_[2].size(), &offset) || - !Strtol(client->argv_[3].c_str(), client->argv_[3].size(), &on)) { + if (!pstd::String2int(client->argv_[2].c_str(), client->argv_[2].size(), &offset) || + !pstd::String2int(client->argv_[3].c_str(), client->argv_[3].size(), &on)) { client->SetRes(CmdRes::kInvalidInt); return; } @@ -596,4 +596,36 @@ void SetRangeCmd::DoCmd(PClient* client) { } client->AppendInteger(static_cast(ret)); } + +MSetnxCmd::MSetnxCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryString) {} + +bool MSetnxCmd::DoInitial(PClient* client) { + size_t argcSize = client->argv_.size(); + if (argcSize % 2 == 0) { + client->SetRes(CmdRes::kWrongNum, kCmdNameMSetnx); + return false; + } + std::vector keys; + for (size_t index = 1; index < argcSize; index += 2) { + keys.emplace_back(client->argv_[index]); + } + client->SetKey(keys); + return true; +} + +void MSetnxCmd::DoCmd(PClient* client) { + int32_t success = 0; + std::vector kvs; + for (size_t index = 1; index != client->argv_.size(); index += 2) { + kvs.push_back({client->argv_[index], client->argv_[index + 1]}); + } + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->MSetnx(kvs, &success); + if (s.ok()) { + client->AppendInteger(success); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + } // namespace pikiwidb diff --git a/src/cmd_kv.h b/src/cmd_kv.h index 24814b66d..1d73582dd 100644 --- a/src/cmd_kv.h +++ b/src/cmd_kv.h @@ -246,4 +246,16 @@ class SetRangeCmd : public BaseCmd { private: void DoCmd(PClient *client) override; }; -} // namespace pikiwidb \ No newline at end of file + +class MSetnxCmd : public BaseCmd { + public: + MSetnxCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +} // namespace pikiwidb diff --git a/src/cmd_list.cc b/src/cmd_list.cc index bbdf8dc1e..1805e410c 100644 --- a/src/cmd_list.cc +++ b/src/cmd_list.cc @@ -219,7 +219,7 @@ void LSetCmd::DoCmd(PClient* client) { // while strtol ensures that the string is within the range of long type const std::string index_str = client->argv_[2]; - if (IsValidNumber(index_str)) { + if (pstd::IsValidNumber(index_str)) { int64_t val = 0; if (1 != pstd::String2int(index_str, &val)) { client->SetRes(CmdRes::kErrOther, "lset cmd error"); // this will not happend in normal case diff --git a/src/cmd_list.h b/src/cmd_list.h index ebdfd29bb..1bbb6fe7b 100644 --- a/src/cmd_list.h +++ b/src/cmd_list.h @@ -150,4 +150,4 @@ class LLenCmd : public BaseCmd { private: void DoCmd(PClient* client) override; }; -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/cmd_set.cc b/src/cmd_set.cc index 7cf56ca06..352b8e695 100644 --- a/src/cmd_set.cc +++ b/src/cmd_set.cc @@ -316,4 +316,64 @@ void SDiffstoreCmd::DoCmd(PClient* client) { } client->AppendInteger(reply_num); } + +SScanCmd::SScanCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategorySet) {} + +bool SScanCmd::DoInitial(PClient* client) { + if (auto size = client->argv_.size(); size != 3 && size != 5 && size != 7) { + client->SetRes(CmdRes::kSyntaxErr); + return false; + } + client->SetKey(client->argv_[1]); + return true; +} + +void SScanCmd::DoCmd(PClient* client) { + const auto& argv = client->argv_; + // parse arguments + int64_t cursor = 0; + int64_t count = 10; + std::string pattern{"*"}; + if (pstd::String2int(argv[2], &cursor) == 0) { + client->SetRes(CmdRes::kInvalidCursor, kCmdNameSScan); + return; + } + for (size_t i = 3; i < argv.size(); i += 2) { + if (auto lower = pstd::StringToLower(argv[i]); kMatchSymbol == lower) { + pattern = argv[i + 1]; + } else if (kCountSymbol == lower) { + if (pstd::String2int(argv[i + 1], &count) == 0) { + client->SetRes(CmdRes::kInvalidInt, kCmdNameSScan); + return; + } + if (count < 0) { + client->SetRes(CmdRes::kSyntaxErr, kCmdNameSScan); + return; + } + } else { + client->SetRes(CmdRes::kSyntaxErr, kCmdNameSScan); + return; + } + } + + // execute command + std::vector members; + int64_t next_cursor{}; + auto status = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->SScan(client->Key(), cursor, pattern, count, &members, &next_cursor); + if (!status.ok() && !status.IsNotFound()) { + client->SetRes(CmdRes::kErrOther, status.ToString()); + return; + } + + // reply to client + client->AppendArrayLen(2); + client->AppendString(std::to_string(next_cursor)); + client->AppendArrayLenUint64(members.size()); + for (const auto& member : members) { + client->AppendString(member); + } +} } // namespace pikiwidb diff --git a/src/cmd_set.h b/src/cmd_set.h index 78f00e395..9b0ad6019 100644 --- a/src/cmd_set.h +++ b/src/cmd_set.h @@ -165,4 +165,17 @@ class SDiffstoreCmd : public BaseCmd { void DoCmd(PClient *client) override; }; +class SScanCmd : public BaseCmd { + public: + SScanCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; + + static constexpr const char *kMatchSymbol = "match"; + static constexpr const char *kCountSymbol = "count"; +}; } // namespace pikiwidb diff --git a/src/cmd_table_manager.cc b/src/cmd_table_manager.cc index 4eb8e4524..f8429b836 100644 --- a/src/cmd_table_manager.cc +++ b/src/cmd_table_manager.cc @@ -37,18 +37,24 @@ void CmdTableManager::InitCmdTable() { configPtr->AddSubCmd(std::make_unique("get", -3)); configPtr->AddSubCmd(std::make_unique("set", -4)); cmds_->insert(std::make_pair(kCmdNameConfig, std::move(configPtr))); + ADD_COMMAND(Ping, 0); // server ADD_COMMAND(Flushdb, 1); ADD_COMMAND(Flushall, 1); ADD_COMMAND(Select, 2); + ADD_COMMAND(Shutdown, 1); // keyspace ADD_COMMAND(Del, -2); ADD_COMMAND(Exists, -2); + ADD_COMMAND(Type, 2); + ADD_COMMAND(Expire, 3); + ADD_COMMAND(Ttl, 2); ADD_COMMAND(PExpire, 3); ADD_COMMAND(Expireat, 3); ADD_COMMAND(PExpireat, 3); + ADD_COMMAND(Pttl, 2); ADD_COMMAND(Persist, 2); ADD_COMMAND(Keys, 2); @@ -74,6 +80,7 @@ void CmdTableManager::InitCmdTable() { ADD_COMMAND(SetRange, 4); ADD_COMMAND(Decr, 2); ADD_COMMAND(SetBit, 4); + ADD_COMMAND(MSetnx, -3); // hash ADD_COMMAND(HSet, -4); @@ -91,6 +98,7 @@ void CmdTableManager::InitCmdTable() { ADD_COMMAND(HSetNX, 4); ADD_COMMAND(HIncrby, 4); ADD_COMMAND(HRandField, -2); + ADD_COMMAND(HExists, 3); // set ADD_COMMAND(SIsMember, 3); @@ -107,6 +115,7 @@ void CmdTableManager::InitCmdTable() { ADD_COMMAND(SMembers, 2); ADD_COMMAND(SDiff, -2); ADD_COMMAND(SDiffstore, -3); + ADD_COMMAND(SScan, -3); // list ADD_COMMAND(LPush, -3); @@ -127,8 +136,18 @@ void CmdTableManager::InitCmdTable() { ADD_COMMAND(ZAdd, -4); ADD_COMMAND(ZRevrange, -4); ADD_COMMAND(ZRangebyscore, -4); - ADD_COMMAND(ZRevRangeByScore, -4); + ADD_COMMAND(ZRemrangebyscore, 4); + ADD_COMMAND(ZRemrangebyrank, 4); + ADD_COMMAND(ZRevrangebyscore, -4); ADD_COMMAND(ZCard, 2); + ADD_COMMAND(ZScore, 3); + ADD_COMMAND(ZRange, -4); + ADD_COMMAND(ZRangebylex, -3); + ADD_COMMAND(ZRevrangebylex, -3); + ADD_COMMAND(ZRank, 3); + ADD_COMMAND(ZRevrank, 3); + ADD_COMMAND(ZRem, -3); + ADD_COMMAND(ZIncrby, 4); } std::pair CmdTableManager::GetCommand(const std::string& cmdName, PClient* client) { diff --git a/src/cmd_thread_pool.cc b/src/cmd_thread_pool.cc new file mode 100644 index 000000000..03b44b7d4 --- /dev/null +++ b/src/cmd_thread_pool.cc @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include "cmd_thread_pool.h" +#include "cmd_thread_pool_worker.h" +#include "log.h" + +namespace pikiwidb { + +void CmdThreadPoolTask::Run(BaseCmd *cmd) { cmd->Execute(client_.get()); } +const std::string &CmdThreadPoolTask::CmdName() { return client_->CmdName(); } +std::shared_ptr CmdThreadPoolTask::Client() { return client_; } + +CmdThreadPool::CmdThreadPool(std::string name) : name_(std::move(name)) {} + +pstd::Status CmdThreadPool::Init(int fast_thread, int slow_thread, std::string name) { + if (fast_thread <= 0) { + return pstd::Status::InvalidArgument("thread num must be positive"); + } + name_ = std::move(name); + fast_thread_num_ = fast_thread; + slow_thread_num_ = slow_thread; + threads_.reserve(fast_thread_num_ + slow_thread_num_); + workers_.reserve(fast_thread_num_ + slow_thread_num_); + return pstd::Status::OK(); +} + +void CmdThreadPool::Start() { + for (int i = 0; i < fast_thread_num_; ++i) { + auto fastWorker = std::make_shared(this, 2, "fast worker" + std::to_string(i)); + std::thread thread(&CmdWorkThreadPoolWorker::Work, fastWorker); + threads_.emplace_back(std::move(thread)); + workers_.emplace_back(fastWorker); + INFO("fast worker [{}] starting ...", i); + } + for (int i = 0; i < slow_thread_num_; ++i) { + auto slowWorker = std::make_shared(this, 2, "slow worker" + std::to_string(i)); + std::thread thread(&CmdWorkThreadPoolWorker::Work, slowWorker); + threads_.emplace_back(std::move(thread)); + workers_.emplace_back(slowWorker); + INFO("slow worker [{}] starting ...", i); + } +} + +void CmdThreadPool::SubmitFast(const std::shared_ptr &runner) { + std::unique_lock rl(fast_mutex_); + fast_tasks_.emplace_back(runner); + fast_condition_.notify_one(); +} + +void CmdThreadPool::SubmitSlow(const std::shared_ptr &runner) { + std::unique_lock rl(slow_mutex_); + slow_tasks_.emplace_back(runner); + slow_condition_.notify_one(); +} + +void CmdThreadPool::Stop() { DoStop(); } + +void CmdThreadPool::DoStop() { + if (stopped_.load()) { + return; + } + stopped_.store(true); + + for (auto &worker : workers_) { + worker->Stop(); + } + + { + std::unique_lock fl(fast_mutex_); + fast_condition_.notify_all(); + } + { + std::unique_lock sl(slow_mutex_); + slow_condition_.notify_all(); + } + + for (auto &thread : threads_) { + if (thread.joinable()) { + thread.join(); + } + } + threads_.clear(); + workers_.clear(); + fast_tasks_.clear(); + slow_tasks_.clear(); +} + +CmdThreadPool::~CmdThreadPool() { DoStop(); } + +} // namespace pikiwidb diff --git a/src/cmd_thread_pool.h b/src/cmd_thread_pool.h new file mode 100644 index 000000000..3b65d6c87 --- /dev/null +++ b/src/cmd_thread_pool.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include "base_cmd.h" +#include "pstd_status.h" + +namespace pikiwidb { + +// task interface +// inherit this class and implement the Run method +// then submit the task to the thread pool +class CmdThreadPoolTask { + public: + CmdThreadPoolTask(std::shared_ptr client) : client_(std::move(client)) {} + void Run(BaseCmd *cmd); + const std::string &CmdName(); + std::shared_ptr Client(); + + private: + std::shared_ptr client_; +}; + +class CmdWorkThreadPoolWorker; + +class CmdFastWorker; + +class CmdSlowWorker; + +class CmdThreadPool { + friend CmdWorkThreadPoolWorker; + friend CmdFastWorker; + friend CmdSlowWorker; + + public: + explicit CmdThreadPool() = default; + + explicit CmdThreadPool(std::string name); + + pstd::Status Init(int fast_thread, int slow_thread, std::string name); + + // start the thread pool + void Start(); + + // stop the thread pool + void Stop(); + + // submit a fast task to the thread pool + void SubmitFast(const std::shared_ptr &runner); + + // submit a slow task to the thread pool + void SubmitSlow(const std::shared_ptr &runner); + + // get the fast thread num + inline int FastThreadNum() const { return fast_thread_num_; }; + + // get the slow thread num + inline int SlowThreadNum() const { return slow_thread_num_; }; + + // get the thread pool size + inline int ThreadPollSize() const { return fast_thread_num_ + slow_thread_num_; }; + + ~CmdThreadPool(); + + private: + void DoStop(); + + private: + std::deque> fast_tasks_; // fast task queue + std::deque> slow_tasks_; // slow task queue + + std::vector threads_; + std::vector> workers_; + std::string name_; // thread pool name + int fast_thread_num_ = 0; + int slow_thread_num_ = 0; + std::mutex fast_mutex_; + std::condition_variable fast_condition_; + std::mutex slow_mutex_; + std::condition_variable slow_condition_; + std::atomic_bool stopped_ = false; +}; + +} // namespace pikiwidb diff --git a/src/cmd_thread_pool_worker.cc b/src/cmd_thread_pool_worker.cc new file mode 100644 index 000000000..cafa31a71 --- /dev/null +++ b/src/cmd_thread_pool_worker.cc @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include "cmd_thread_pool_worker.h" +#include "log.h" +#include "pikiwidb.h" + +namespace pikiwidb { + +void CmdWorkThreadPoolWorker::Work() { + while (running_) { + LoadWork(); + for (const auto &task : self_task_) { + if (task->Client()->State() != ClientState::kOK) { // the client is closed + continue; + } + auto [cmdPtr, ret] = cmd_table_manager_.GetCommand(task->CmdName(), task->Client().get()); + + if (!cmdPtr) { + if (ret == CmdRes::kInvalidParameter) { + task->Client()->SetRes(CmdRes::kInvalidParameter); + } else { + task->Client()->SetRes(CmdRes::kSyntaxErr, "unknown command '" + task->CmdName() + "'"); + } + g_pikiwidb->PushWriteTask(task->Client()); + continue; + } + + if (!cmdPtr->CheckArg(task->Client()->ParamsSize())) { + task->Client()->SetRes(CmdRes::kWrongNum, task->CmdName()); + g_pikiwidb->PushWriteTask(task->Client()); + continue; + } + task->Run(cmdPtr); + g_pikiwidb->PushWriteTask(task->Client()); + } + self_task_.clear(); + } + INFO("worker [{}] goodbye...", name_); +} + +void CmdWorkThreadPoolWorker::Stop() { running_ = false; } + +void CmdFastWorker::LoadWork() { + std::unique_lock lock(pool_->fast_mutex_); + while (pool_->fast_tasks_.empty()) { + if (!running_) { + return; + } + pool_->fast_condition_.wait(lock); + } + + if (pool_->fast_tasks_.empty()) { + return; + } + const auto num = std::min(static_cast(pool_->fast_tasks_.size()), once_task_); + std::move(pool_->fast_tasks_.begin(), pool_->fast_tasks_.begin() + num, std::back_inserter(self_task_)); + pool_->fast_tasks_.erase(pool_->fast_tasks_.begin(), pool_->fast_tasks_.begin() + num); +} + +void CmdSlowWorker::LoadWork() { + { + std::unique_lock lock(pool_->slow_mutex_); + while (pool_->slow_tasks_.empty() && loop_more_) { // loopMore is used to get the fast worker + if (!running_) { + return; + } + pool_->slow_condition_.wait_for(lock, std::chrono::milliseconds(wait_time_)); + loop_more_ = false; + } + + const auto num = std::min(static_cast(pool_->slow_tasks_.size()), once_task_); + if (num > 0) { + std::move(pool_->slow_tasks_.begin(), pool_->slow_tasks_.begin() + num, std::back_inserter(self_task_)); + pool_->slow_tasks_.erase(pool_->slow_tasks_.begin(), pool_->slow_tasks_.begin() + num); + return; // If the slow task is obtained, the fast task is no longer obtained + } + } + + { + std::unique_lock lock(pool_->fast_mutex_); + loop_more_ = true; + + const auto num = std::min(static_cast(pool_->fast_tasks_.size()), once_task_); + if (num > 0) { + std::move(pool_->fast_tasks_.begin(), pool_->fast_tasks_.begin() + num, std::back_inserter(self_task_)); + pool_->fast_tasks_.erase(pool_->fast_tasks_.begin(), pool_->fast_tasks_.begin() + num); + } + } +} + +} // namespace pikiwidb diff --git a/src/cmd_thread_pool_worker.h b/src/cmd_thread_pool_worker.h new file mode 100644 index 000000000..ecc9361da --- /dev/null +++ b/src/cmd_thread_pool_worker.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#pragma once + +#include +#include + +#include "cmd_table_manager.h" +#include "cmd_thread_pool.h" + +namespace pikiwidb { + +class CmdWorkThreadPoolWorker { + public: + explicit CmdWorkThreadPoolWorker(CmdThreadPool *pool, int onceTask, std::string name) + : pool_(pool), once_task_(onceTask), name_(std::move(name)) { + cmd_table_manager_.InitCmdTable(); + } + + void Work(); + + void Stop(); + + // load the task from the thread pool + virtual void LoadWork() = 0; + + virtual ~CmdWorkThreadPoolWorker() = default; + + protected: + std::vector> self_task_; // the task that the worker get from the thread pool + CmdThreadPool *pool_ = nullptr; + const int once_task_ = 0; // the max task num that the worker can get from the thread pool + const std::string name_; + bool running_ = true; + + pikiwidb::CmdTableManager cmd_table_manager_; +}; + +// fast worker +class CmdFastWorker : public CmdWorkThreadPoolWorker { + public: + explicit CmdFastWorker(CmdThreadPool *pool, int onceTask, std::string name) + : CmdWorkThreadPoolWorker(pool, onceTask, std::move(name)) {} + + void LoadWork() override; +}; + +// slow worker +class CmdSlowWorker : public CmdWorkThreadPoolWorker { + public: + explicit CmdSlowWorker(CmdThreadPool *pool, int onceTask, std::string name) + : CmdWorkThreadPoolWorker(pool, onceTask, std::move(name)) {} + + // when the slow worker queue is empty, it will try to get the fast worker + void LoadWork() override; + + private: + bool loop_more_ = false; // When the slow queue is empty, try to get the fast queue + int wait_time_ = 200; // When the slow queue is empty, wait 200 ms to check again +}; + +} // namespace pikiwidb diff --git a/src/cmd_zset.cc b/src/cmd_zset.cc index 30127b77b..8fc8d02fb 100644 --- a/src/cmd_zset.cc +++ b/src/cmd_zset.cc @@ -48,6 +48,40 @@ int32_t DoScoreStrRange(std::string begin_score, std::string end_score, bool* le return 0; } +static int32_t DoMemberRange(const std::string& raw_min_member, const std::string& raw_max_member, bool* left_close, + bool* right_close, std::string* min_member, std::string* max_member) { + if (raw_min_member == "-") { + *min_member = "-"; + } else if (raw_min_member == "+") { + *min_member = "+"; + } else { + if (!raw_min_member.empty() && raw_min_member.at(0) == '(') { + *left_close = false; + } else if (!raw_min_member.empty() && raw_min_member.at(0) == '[') { + *left_close = true; + } else { + return -1; + } + min_member->assign(raw_min_member.begin() + 1, raw_min_member.end()); + } + + if (raw_max_member == "+") { + *max_member = "+"; + } else if (raw_max_member == "-") { + *max_member = "-"; + } else { + if (!raw_max_member.empty() && raw_max_member.at(0) == '(') { + *right_close = false; + } else if (!raw_max_member.empty() && raw_max_member.at(0) == '[') { + *right_close = true; + } else { + return -1; + } + max_member->assign(raw_max_member.begin() + 1, raw_max_member.end()); + } + return 0; +} + ZAddCmd::ZAddCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategorySortedSet) {} @@ -220,33 +254,46 @@ void ZRangebyscoreCmd::DoCmd(PClient* client) { } } -ZCardCmd::ZCardCmd(const std::string& name, int16_t arity) - : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategorySortedSet) {} +ZRemrangebyrankCmd::ZRemrangebyrankCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryString) {} -bool ZCardCmd::DoInitial(PClient* client) { +bool ZRemrangebyrankCmd::DoInitial(PClient* client) { client->SetKey(client->argv_[1]); return true; } -void ZCardCmd::DoCmd(PClient* client) { - int32_t reply_Num = 0; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZCard(client->Key(), &reply_Num); - if (!s.ok()) { - client->SetRes(CmdRes::kSyntaxErr, "ZCard cmd error"); +void ZRemrangebyrankCmd::DoCmd(PClient* client) { + int32_t ret = 0; + int32_t start = 0; + int32_t end = 0; + + if (pstd::String2int(client->argv_[2], &start) == 0) { + client->SetRes(CmdRes::kInvalidInt); return; } - client->AppendInteger(reply_Num); + if (pstd::String2int(client->argv_[3], &end) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + + storage::Status s; + s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZRemrangebyrank(client->Key(), start, end, &ret); + if (s.ok() || s.IsNotFound()) { + client->AppendInteger(ret); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } } -ZRevRangeByScoreCmd::ZRevRangeByScoreCmd(const std::string& name, int16_t arity) +ZRevrangebyscoreCmd::ZRevrangebyscoreCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategorySortedSet) {} -bool ZRevRangeByScoreCmd::DoInitial(PClient* client) { +bool ZRevrangebyscoreCmd::DoInitial(PClient* client) { client->SetKey(client->argv_[1]); return true; } -void ZRevRangeByScoreCmd::DoCmd(PClient* client) { +void ZRevrangebyscoreCmd::DoCmd(PClient* client) { double min_score = 0; double max_score = 0; bool right_close = true; @@ -323,4 +370,423 @@ void ZRevRangeByScoreCmd::DoCmd(PClient* client) { } } -} // namespace pikiwidb \ No newline at end of file +ZCardCmd::ZCardCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategorySortedSet) {} + +bool ZCardCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZCardCmd::DoCmd(PClient* client) { + int32_t reply_Num = 0; + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZCard(client->Key(), &reply_Num); + if (!s.ok()) { + client->SetRes(CmdRes::kSyntaxErr, "ZCard cmd error"); + return; + } + client->AppendInteger(reply_Num); +} + +ZRangeCmd::ZRangeCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategorySortedSet) {} + +bool ZRangeCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZRangeCmd::DoCmd(PClient* client) { + double start = 0; + double stop = 0; + int64_t count = -1; + int64_t offset = 0; + bool with_scores = false; + bool by_score = false; + bool by_lex = false; + bool left_close = false; + bool right_close = false; + bool is_rev = false; + size_t argc = client->argv_.size(); + if (argc >= 5) { + size_t index = 4; + while (index < argc) { + if (strcasecmp(client->argv_[index].data(), "byscore") == 0) { + by_score = true; + } else if (strcasecmp(client->argv_[index].data(), "bylex") == 0) { + by_lex = true; + } else if (strcasecmp(client->argv_[index].data(), "rev") == 0) { + is_rev = true; + } else if (strcasecmp(client->argv_[index].data(), "withscores") == 0) { + with_scores = true; + } else if (strcasecmp(client->argv_[index].data(), "limit") == 0) { + if (index + 3 > argc) { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + if (pstd::String2int(client->argv_[index].data(), client->argv_[index].size(), &offset) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + index++; + if (pstd::String2int(client->argv_[index].data(), client->argv_[index].size(), &count) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + } else { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } + } + if (by_score && by_lex) { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + + int32_t ret = 0; + std::string lex_min; + std::string lex_max; + if (by_lex) { + ret = DoMemberRange(client->argv_[2], client->argv_[3], &left_close, &right_close, &lex_min, &lex_max); + if (ret == -1) { + client->SetRes(CmdRes::kErrOther, "min or max not valid string range item"); + return; + } + } else { + ret = DoScoreStrRange(client->argv_[2], client->argv_[3], &left_close, &right_close, &start, &stop); + if (ret == -1) { + client->SetRes(CmdRes::kErrOther, "start or stop is not a float"); + return; + } + } + + std::vector score_members; + std::vector lex_members; + storage::Status s; + if (!is_rev) { + if (by_score) { + s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->ZRangebyscore(client->Key(), start, stop, left_close, right_close, &score_members); + } else if (by_lex) { + s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->ZRangebylex(client->Key(), lex_min, lex_max, left_close, right_close, &lex_members); + } else { + s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZRange(client->Key(), start, stop, &score_members); + } + } else { + if (by_score) { + s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->ZRevrangebyscore(client->Key(), start, stop, left_close, right_close, &score_members); + } else if (by_lex) { + s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->ZRangebylex(client->Key(), lex_min, lex_max, left_close, right_close, &lex_members); + } else { + s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->ZRevrange(client->Key(), start, stop, &score_members); + } + } + if (!s.ok() && !s.IsNotFound()) { + client->SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + FitLimit(count, offset, static_cast(score_members.size())); + size_t m_start = offset; + size_t m_end = offset + count; + if (by_lex) { + if (with_scores) { + client->SetRes(CmdRes::kSyntaxErr, "by lex not support with scores"); + } else { + client->AppendArrayLen(count); + for (; m_start < m_end; m_start++) { + client->AppendContent(lex_members[m_start]); + } + } + } else { + if (with_scores) { + char buf[32]; + int64_t len = 0; + client->AppendArrayLen(count * 2); + for (; m_start < m_end; m_start++) { + client->AppendStringLenUint64(score_members[m_start].member.size()); + client->AppendContent(score_members[m_start].member); + len = pstd::D2string(buf, sizeof(buf), score_members[m_start].score); + client->AppendStringLen(len); + client->AppendContent(buf); + } + } else { + client->AppendArrayLen(count); + for (; m_start < m_end; m_start++) { + client->AppendStringLenUint64(score_members[m_start].member.size()); + client->AppendContent(score_members[m_start].member); + } + } + } +} + +ZScoreCmd::ZScoreCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategoryString) {} + +bool ZScoreCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZScoreCmd::DoCmd(PClient* client) { + double score = 0; + + storage::Status s; + s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZScore(client->Key(), client->argv_[2], &score); + if (s.ok() || s.IsNotFound()) { + client->AppendString(std::to_string(score)); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +ZRangebylexCmd::ZRangebylexCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategorySortedSet) {} + +bool ZRangebylexCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZRangebylexCmd::DoCmd(PClient* client) { + if (strcasecmp(client->argv_[2].data(), "+") == 0 || strcasecmp(client->argv_[3].data(), "-") == 0) { + client->AppendContent("*0"); + } + + size_t argc = client->argv_.size(); + int64_t count = -1; + int64_t offset = 0; + bool left_close = true; + bool right_close = true; + if (argc == 7 && strcasecmp(client->argv_[4].data(), "limit") == 0) { + if (pstd::String2int(client->argv_[5].data(), client->argv_[5].size(), &offset) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + if (pstd::String2int(client->argv_[6].data(), client->argv_[6].size(), &count) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + } else if (argc == 4) { + } else { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + + std::string min_member; + std::string max_member; + int32_t ret = DoMemberRange(client->argv_[2], client->argv_[3], &left_close, &right_close, &min_member, &max_member); + if (ret == -1) { + client->SetRes(CmdRes::kErrOther, "min or max not valid string range item"); + return; + } + std::vector members; + storage::Status s; + s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->ZRangebylex(client->Key(), min_member, max_member, left_close, right_close, &members); + if (!s.ok() && !s.IsNotFound()) { + client->SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + + FitLimit(count, offset, static_cast(members.size())); + size_t index = offset; + size_t end = offset + count; + + client->AppendArrayLen(static_cast(members.size())); + for (; index < end; index++) { + client->AppendStringLenUint64(members[index].size()); + client->AppendContent(members[index]); + } +} + +ZRevrangebylexCmd::ZRevrangebylexCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategorySortedSet) {} + +bool ZRevrangebylexCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZRevrangebylexCmd::DoCmd(PClient* client) { + if (strcasecmp(client->argv_[2].data(), "+") == 0 || strcasecmp(client->argv_[3].data(), "-") == 0) { + client->AppendContent("*0"); + } + + size_t argc = client->argv_.size(); + int64_t count = -1; + int64_t offset = 0; + bool left_close = true; + bool right_close = true; + if (argc == 7 && strcasecmp(client->argv_[4].data(), "limit") == 0) { + if (pstd::String2int(client->argv_[5].data(), client->argv_[5].size(), &offset) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + if (pstd::String2int(client->argv_[6].data(), client->argv_[6].size(), &count) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + } else if (argc == 4) { + } else { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + + std::string min_member; + std::string max_member; + int32_t ret = DoMemberRange(client->argv_[2], client->argv_[3], &left_close, &right_close, &min_member, &max_member); + std::vector members; + storage::Status s; + s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->ZRangebylex(client->Key(), min_member, max_member, left_close, right_close, &members); + if (!s.ok() && !s.IsNotFound()) { + client->SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + + FitLimit(count, offset, static_cast(members.size())); + size_t index = offset + count - 1; + size_t start = offset; + client->AppendArrayLen(static_cast(members.size())); + for (; index >= start; index--) { + client->AppendStringLenUint64(members[index].size()); + client->AppendContent(members[index]); + } +} + +ZRankCmd::ZRankCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategorySortedSet) {} + +bool ZRankCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZRankCmd::DoCmd(PClient* client) { + int32_t rank = 0; + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZRank(client->Key(), client->argv_[2], &rank); + if (s.ok()) { + client->AppendInteger(rank); + } else if (s.IsNotFound()) { + client->AppendContent("$-1"); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +ZRevrankCmd::ZRevrankCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategorySortedSet) {} + +bool ZRevrankCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZRevrankCmd::DoCmd(PClient* client) { + int32_t revrank = 0; + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZRevrank(client->Key(), client->argv_[2], &revrank); + if (s.ok()) { + client->AppendInteger(revrank); + } else if (s.IsNotFound()) { + client->AppendContent("$-1"); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +ZRemCmd::ZRemCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategorySortedSet) {} + +bool ZRemCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZRemCmd::DoCmd(PClient* client) { + auto iter = client->argv_.begin() + 2; + std::vector members(iter, client->argv_.end()); + int32_t deleted = 0; + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZRem(client->Key(), members, &deleted); + if (s.ok() || s.IsNotFound()) { + client->AppendInteger(deleted); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +ZIncrbyCmd::ZIncrbyCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategorySortedSet) {} + +bool ZIncrbyCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZIncrbyCmd::DoCmd(PClient* client) { + double by = .0f; + double score = .0f; + if (pstd::String2d(client->argv_[2].data(), client->argv_[2].size(), &by) == 0) { + client->SetRes(CmdRes::kInvalidFloat); + return; + } + + std::string member = client->argv_[3]; + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZIncrby(client->Key(), member, by, &score); + if (s.ok()) { + char buf[32]; + int64_t len = pstd::D2string(buf, sizeof(buf), score); + client->AppendStringLen(len); + client->AppendContent(buf); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +ZRemrangebyscoreCmd::ZRemrangebyscoreCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategorySortedSet) {} + +bool ZRemrangebyscoreCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZRemrangebyscoreCmd::DoCmd(PClient* client) { + double min_score = 0; + double max_score = 0; + bool left_close = true; + bool right_close = true; + int32_t ret = DoScoreStrRange(client->argv_[2], client->argv_[3], &left_close, &right_close, &min_score, &max_score); + if (ret == -1) { + client->SetRes(CmdRes::kErrOther, "min or max is not a float"); + return; + } + + int32_t s_ret = 0; + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->ZRemrangebyscore(client->Key(), min_score, max_score, left_close, right_close, &s_ret); + if (s.ok()) { + client->AppendInteger(s_ret); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +} // namespace pikiwidb diff --git a/src/cmd_zset.h b/src/cmd_zset.h index ddef7b956..13049eaa9 100644 --- a/src/cmd_zset.h +++ b/src/cmd_zset.h @@ -45,9 +45,20 @@ class ZRangebyscoreCmd : public BaseCmd { void DoCmd(PClient *client) override; }; -class ZRevRangeByScoreCmd : public BaseCmd { +class ZRemrangebyrankCmd : public BaseCmd { public: - ZRevRangeByScoreCmd(const std::string &name, int16_t arity); + ZRemrangebyrankCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class ZRevrangebyscoreCmd : public BaseCmd { + public: + ZRevrangebyscoreCmd(const std::string &name, int16_t arity); protected: bool DoInitial(PClient *client) override; @@ -67,4 +78,103 @@ class ZCardCmd : public BaseCmd { void DoCmd(PClient *client) override; }; -} // namespace pikiwidb \ No newline at end of file +class ZRangeCmd : public BaseCmd { + public: + ZRangeCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class ZScoreCmd : public BaseCmd { + public: + ZScoreCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class ZRangebylexCmd : public BaseCmd { + public: + ZRangebylexCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class ZRevrangebylexCmd : public BaseCmd { + public: + ZRevrangebylexCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class ZRankCmd : public BaseCmd { + public: + ZRankCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class ZRevrankCmd : public BaseCmd { + public: + ZRevrankCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class ZRemCmd : public BaseCmd { + public: + ZRemCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class ZIncrbyCmd : public BaseCmd { + public: + ZIncrbyCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class ZRemrangebyscoreCmd : public BaseCmd { + public: + ZRemrangebyscoreCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +} // namespace pikiwidb diff --git a/src/command.cc b/src/command.cc deleted file mode 100644 index 5cbad2b9c..000000000 --- a/src/command.cc +++ /dev/null @@ -1,290 +0,0 @@ -/* - * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - */ - -#include "command.h" -#include "replication.h" - -using std::size_t; - -namespace pikiwidb { - -// const PCommandInfo PCommandTable::s_info[] = { -// // key -// {"type", kPAttrRead, 2, &type}, -// {"exists", kPAttrRead, 2, &exists}, -// {"del", kPAttrWrite, -2, &del}, -// {"expire", kPAttrRead, 3, &expire}, -// {"ttl", kPAttrRead, 2, &ttl}, -// {"pexpire", kPAttrRead, 3, &pexpire}, -// {"pttl", kPAttrRead, 2, &pttl}, -// {"expireat", kPAttrRead, 3, &expireat}, -// {"pexpireat", kPAttrRead, 3, &pexpireat}, -// {"persist", kPAttrRead, 2, &persist}, -// {"move", kPAttrWrite, 3, &move}, -// {"keys", kPAttrRead, 2, &keys}, -// {"randomkey", kPAttrRead, 1, &randomkey}, -// {"rename", kPAttrWrite, 3, &rename}, -// {"renamenx", kPAttrWrite, 3, &renamenx}, -// {"scan", kPAttrRead, -2, &scan}, -// {"sort", kPAttrRead, -2, &sort}, -// -// // server -// {"dbsize", kPAttrRead, 1, &dbsize}, -// {"bgsave", kPAttrRead, 1, &bgsave}, -// {"save", kPAttrRead, 1, &save}, -// {"lastsave", kPAttrRead, 1, &lastsave}, -// {"flushdb", kPAttrWrite, 1, &flushdb}, -// {"flushall", kPAttrWrite, 1, &flushall}, -// {"client", kPAttrRead, -2, &client}, -// {"debug", kPAttrRead, -2, &debug}, -// {"shutdown", kPAttrRead, -1, &shutdown}, -// {"ping", kPAttrRead, 1, &ping}, -// {"echo", kPAttrRead, 2, &echo}, -// {"info", kPAttrRead, -1, &info}, -// {"monitor", kPAttrRead, 1, &monitor}, -// {"auth", kPAttrRead, 2, &auth}, -// {"slowlog", kPAttrRead, -2, &slowlog}, -// // {"config", PAttr_read, -3, &config}, -// -// // string -// {"strlen", kPAttrRead, 2, &strlen}, -// {"mset", kPAttrWrite, -3, &mset}, -// {"msetnx", kPAttrWrite, -3, &msetnx}, -// {"setnx", kPAttrWrite, 3, &setnx}, -// {"setex", kPAttrWrite, 4, &setex}, -// {"psetex", kPAttrWrite, 4, &psetex}, -// {"getset", kPAttrWrite, 3, &getset}, -// {"mget", kPAttrRead, -2, &mget}, -// {"append", kPAttrWrite, 3, &append}, -// {"bitcount", kPAttrRead, -2, &bitcount}, -// // {"bitop", PAttr_write, -4, &bitop}, -// {"getbit", kPAttrRead, 3, &getbit}, -// {"setbit", kPAttrWrite, 4, &setbit}, -// {"incr", kPAttrWrite, 2, &incr}, -// {"decr", kPAttrWrite, 2, &decr}, -// {"incrby", kPAttrWrite, 3, &incrby}, -// {"incrbyfloat", kPAttrWrite, 3, &incrbyfloat}, -// {"decrby", kPAttrWrite, 3, &decrby}, -// {"getrange", kPAttrRead, 4, &getrange}, -// {"setrange", kPAttrWrite, 4, &setrange}, -// -// // list -// {"lpush", kPAttrWrite, -3, &lpush}, -// {"rpush", kPAttrWrite, -3, &rpush}, -// {"lpushx", kPAttrWrite, -3, &lpushx}, -// {"rpushx", kPAttrWrite, -3, &rpushx}, -// {"lpop", kPAttrWrite, 2, &lpop}, -// {"rpop", kPAttrWrite, 2, &rpop}, -// {"lindex", kPAttrRead, 3, &lindex}, -// {"llen", kPAttrRead, 2, &llen}, -// {"lset", kPAttrWrite, 4, &lset}, -// {"ltrim", kPAttrWrite, 4, <rim}, -// {"lrange", kPAttrRead, 4, &lrange}, -// {"linsert", kPAttrWrite, 5, &linsert}, -// {"lrem", kPAttrWrite, 4, &lrem}, -// {"rpoplpush", kPAttrWrite, 3, &rpoplpush}, -// {"blpop", kPAttrWrite, -3, &blpop}, -// {"brpop", kPAttrWrite, -3, &brpop}, -// {"brpoplpush", kPAttrWrite, 4, &brpoplpush}, -// -// // hash -// {"hget", kPAttrRead, 3, &hget}, -// {"hgetall", kPAttrRead, 2, &hgetall}, -// {"hmget", kPAttrRead, -3, &hmget}, -// {"hset", kPAttrWrite, 4, &hset}, -// {"hsetnx", kPAttrWrite, 4, &hsetnx}, -// {"hmset", kPAttrWrite, -4, &hmset}, -// {"hlen", kPAttrRead, 2, &hlen}, -// {"hexists", kPAttrRead, 3, &hexists}, -// {"hkeys", kPAttrRead, 2, &hkeys}, -// {"hvals", kPAttrRead, 2, &hvals}, -// {"hdel", kPAttrWrite, -3, &hdel}, -// {"hincrby", kPAttrWrite, 4, &hincrby}, -// {"hincrbyfloat", kPAttrWrite, 4, &hincrbyfloat}, -// {"hscan", kPAttrRead, -3, &hscan}, -// {"hstrlen", kPAttrRead, 3, &hstrlen}, -// -// // set -// {"sadd", kPAttrWrite, -3, &sadd}, -// {"scard", kPAttrRead, 2, &scard}, -// {"sismember", kPAttrRead, 3, &sismember}, -// {"srem", kPAttrWrite, -3, &srem}, -// {"smembers", kPAttrRead, 2, &smembers}, -// {"sdiff", kPAttrRead, -2, &sdiff}, -// {"sdiffstore", kPAttrWrite, -3, &sdiffstore}, -// {"sinter", kPAttrRead, -2, &sinter}, -// {"sinterstore", kPAttrWrite, -3, &sinterstore}, -// {"sunion", kPAttrRead, -2, &sunion}, -// {"sunionstore", kPAttrWrite, -3, &sunionstore}, -// {"smove", kPAttrWrite, 4, &smove}, -// {"spop", kPAttrWrite, 2, &spop}, -// {"srandmember", kPAttrRead, 2, &srandmember}, -// {"sscan", kPAttrRead, -3, &sscan}, -// -// // zset -// {"zadd", kPAttrWrite, -4, &zadd}, -// {"zcard", kPAttrRead, 2, &zcard}, -// {"zrank", kPAttrRead, 3, &zrank}, -// {"zrevrank", kPAttrRead, 3, &zrevrank}, -// {"zrem", kPAttrWrite, -3, &zrem}, -// {"zincrby", kPAttrWrite, 4, &zincrby}, -// {"zscore", kPAttrRead, 3, &zscore}, -// {"zrange", kPAttrRead, -4, &zrange}, -// {"zrevrange", kPAttrRead, -4, &zrevrange}, -// {"zrangebyscore", kPAttrRead, -4, &zrangebyscore}, -// {"zrevrangebyscore", kPAttrRead, -4, &zrevrangebyscore}, -// {"zremrangebyrank", kPAttrWrite, 4, &zremrangebyrank}, -// {"zremrangebyscore", kPAttrWrite, 4, &zremrangebyscore}, -// -// // pubsub -// {"subscribe", kPAttrRead, -2, &subscribe}, -// {"unsubscribe", kPAttrRead, -1, &unsubscribe}, -// {"publish", kPAttrRead, 3, &publish}, -// {"psubscribe", kPAttrRead, -2, &psubscribe}, -// {"punsubscribe", kPAttrRead, -1, &punsubscribe}, -// {"pubsub", kPAttrRead, -2, &pubsub}, -// -// // multi -// {"watch", kPAttrRead, -2, &watch}, -// {"unwatch", kPAttrRead, 1, &unwatch}, -// {"multi", kPAttrRead, 1, &multi}, -// {"exec", kPAttrRead, 1, &exec}, -// {"discard", kPAttrRead, 1, &discard}, -// -// // replication -// {"sync", kPAttrRead, 1, &sync}, -// {"psync", kPAttrRead, 1, &sync}, -// {"slaveof", kPAttrRead, 3, &slaveof}, -// {"replconf", kPAttrRead, -3, &replconf}, -// -// // help -// {"cmdlist", kPAttrRead, 1, &cmdlist}, -// }; -// -// Delegate g_infoCollector; -// -// std::map PCommandTable::s_handlers; -// -// PCommandTable::PCommandTable() { Init(); } -// -// void PCommandTable::Init() { -// for (const auto& info : s_info) { -// s_handlers[info.cmd] = &info; -// } -// -// g_infoCollector += OnMemoryInfoCollect; -// g_infoCollector += OnServerInfoCollect; -// g_infoCollector += OnClientInfoCollect; -// g_infoCollector += std::bind(&PReplication::OnInfoCommand, &PREPL, std::placeholders::_1); -// } -// -// const PCommandInfo* PCommandTable::GetCommandInfo(const PString& cmd) { -// auto it(s_handlers.find(cmd)); -// if (it != s_handlers.end()) { -// return it->second; -// } -// -// return nullptr; -// } -// -// bool PCommandTable::AliasCommand(const std::map& aliases) { -// for (const auto& pair : aliases) { -// if (!AliasCommand(pair.first, pair.second)) { -// return false; -// } -// } -// -// return true; -// } -// -// bool PCommandTable::AliasCommand(const PString& oldKey, const PString& newKey) { -// auto info = DelCommand(oldKey); -// if (!info) { -// return false; -// } -// -// return AddCommand(newKey, info); -// } -// -// const PCommandInfo* PCommandTable::DelCommand(const PString& cmd) { -// auto it(s_handlers.find(cmd)); -// if (it != s_handlers.end()) { -// auto p = it->second; -// s_handlers.erase(it); -// return p; -// } -// -// return nullptr; -// } -// -// bool PCommandTable::AddCommand(const PString& cmd, const PCommandInfo* info) { -// if (cmd.empty() || cmd == "\"\"") { -// return true; -// } -// -// return s_handlers.insert(std::make_pair(cmd, info)).second; -// } -// -// PError PCommandTable::ExecuteCmd(const std::vector& params, const PCommandInfo* info, UnboundedBuffer* -// reply) { -// if (params.empty()) { -// ReplyError(kPErrorParam, reply); -// return kPErrorParam; -// } -// -// if (!info) { -// ReplyError(kPErrorUnknowCmd, reply); -// return kPErrorUnknowCmd; -// } -// -// if (!info->CheckParamsCount(static_cast(params.size()))) { -// ReplyError(kPErrorParam, reply); -// return kPErrorParam; -// } -// -// return info->handler(params, reply); -// } -// -// PError PCommandTable::ExecuteCmd(const std::vector& params, UnboundedBuffer* reply) { -// if (params.empty()) { -// ReplyError(kPErrorParam, reply); -// return kPErrorParam; -// } -// -// auto it(s_handlers.find(params[0])); -// if (it == s_handlers.end()) { -// ReplyError(kPErrorUnknowCmd, reply); -// return kPErrorUnknowCmd; -// } -// -// const PCommandInfo* info = it->second; -// if (!info->CheckParamsCount(static_cast(params.size()))) { -// ReplyError(kPErrorParam, reply); -// return kPErrorParam; -// } -// -// return info->handler(params, reply); -// } -// -// bool PCommandInfo::CheckParamsCount(int nParams) const { -// if (params > 0) { -// return params == nParams; -// } -// -// return nParams + params >= 0; -// } -// -// PError cmdlist(const std::vector& params, UnboundedBuffer* reply) { -// PreFormatMultiBulk(PCommandTable::s_handlers.size(), reply); -// for (const auto& kv : PCommandTable::s_handlers) { -// FormatBulk(kv.first, reply); -// } -// -// return kPErrorOK; -// } - -} // namespace pikiwidb diff --git a/src/command.h b/src/command.h deleted file mode 100644 index 402094f5e..000000000 --- a/src/command.h +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - */ - -#pragma once - -#include -#include - -#include "common.h" -#include "delegate.h" - -namespace pikiwidb { - -enum PCommandAttr { - kPAttrRead = 0x1, - kPAttrWrite = 0x1 << 1, -}; - -class UnboundedBuffer; -using PCommandHandler = PError(const std::vector& params, UnboundedBuffer* reply); - -// key commands -PCommandHandler type; -PCommandHandler exists; -PCommandHandler del; -PCommandHandler expire; -PCommandHandler pexpire; -PCommandHandler expireat; -PCommandHandler pexpireat; -PCommandHandler ttl; -PCommandHandler pttl; -PCommandHandler persist; -PCommandHandler move; -PCommandHandler keys; -PCommandHandler randomkey; -PCommandHandler rename; -PCommandHandler renamenx; -PCommandHandler scan; -PCommandHandler sort; - -// server commands -PCommandHandler dbsize; -PCommandHandler bgsave; -PCommandHandler save; -PCommandHandler lastsave; -PCommandHandler flushdb; -PCommandHandler flushall; -PCommandHandler client; -PCommandHandler debug; -PCommandHandler shutdown; -PCommandHandler ping; -PCommandHandler echo; -PCommandHandler info; -PCommandHandler monitor; -PCommandHandler auth; -PCommandHandler slowlog; -PCommandHandler config; - -// string commands -PCommandHandler set; -PCommandHandler get; -PCommandHandler getrange; -PCommandHandler setrange; -PCommandHandler getset; -PCommandHandler append; -PCommandHandler bitcount; -PCommandHandler bitop; -PCommandHandler getbit; -PCommandHandler setbit; -PCommandHandler incr; -PCommandHandler incrby; -PCommandHandler incrbyfloat; -PCommandHandler decr; -PCommandHandler decrby; -PCommandHandler mget; -PCommandHandler mset; -PCommandHandler msetnx; -PCommandHandler setnx; -PCommandHandler setex; -PCommandHandler psetex; -PCommandHandler strlen; - -// list commands -PCommandHandler lpush; -PCommandHandler rpush; -PCommandHandler lpushx; -PCommandHandler rpushx; -PCommandHandler lpop; -PCommandHandler rpop; -PCommandHandler lindex; -PCommandHandler llen; -PCommandHandler lset; -PCommandHandler ltrim; -PCommandHandler lrange; -PCommandHandler linsert; -PCommandHandler lrem; -PCommandHandler rpoplpush; -PCommandHandler blpop; -PCommandHandler brpop; -PCommandHandler brpoplpush; - -// hash commands -PCommandHandler hget; -PCommandHandler hmget; -PCommandHandler hgetall; -PCommandHandler hset; -PCommandHandler hsetnx; -PCommandHandler hmset; -PCommandHandler hlen; -PCommandHandler hexists; -PCommandHandler hkeys; -PCommandHandler hvals; -PCommandHandler hdel; -PCommandHandler hincrby; -PCommandHandler hincrbyfloat; -PCommandHandler hscan; -PCommandHandler hstrlen; - -// set commands -PCommandHandler sadd; -PCommandHandler scard; -PCommandHandler srem; -PCommandHandler sismember; -PCommandHandler smembers; -PCommandHandler sdiff; -PCommandHandler sdiffstore; -PCommandHandler sinter; -PCommandHandler sinterstore; -PCommandHandler sunion; -PCommandHandler sunionstore; -PCommandHandler smove; -PCommandHandler spop; -PCommandHandler srandmember; -PCommandHandler sscan; - -// zset -PCommandHandler zadd; -PCommandHandler zcard; -PCommandHandler zrank; -PCommandHandler zrevrank; -PCommandHandler zrem; -PCommandHandler zincrby; -PCommandHandler zscore; -PCommandHandler zrange; -PCommandHandler zrevrange; -PCommandHandler zrangebyscore; -PCommandHandler zrevrangebyscore; -PCommandHandler zremrangebyrank; -PCommandHandler zremrangebyscore; - -// pubsub -PCommandHandler subscribe; -PCommandHandler unsubscribe; -PCommandHandler publish; -PCommandHandler psubscribe; -PCommandHandler punsubscribe; -PCommandHandler pubsub; - -// multi -PCommandHandler watch; -PCommandHandler unwatch; -PCommandHandler multi; -PCommandHandler exec; -PCommandHandler discard; - -// replication -PCommandHandler sync; -PCommandHandler slaveof; -PCommandHandler replconf; - -// modules -PCommandHandler module; - -// help -PCommandHandler cmdlist; - -// extern Delegate g_infoCollector; -// extern void OnMemoryInfoCollect(UnboundedBuffer&); -// extern void OnServerInfoCollect(UnboundedBuffer&); -// extern void OnClientInfoCollect(UnboundedBuffer&); -// -// struct PCommandInfo { -// PString cmd; -// int attr = -1; -// int params = -1; -// PCommandHandler* handler = nullptr; -// bool CheckParamsCount(int nParams) const; -// }; -// -// class PCommandTable { -// public: -// PCommandTable(); -// -// static void Init(); -// -// static const PCommandInfo* GetCommandInfo(const PString& cmd); -// static PError ExecuteCmd(const std::vector& params, const PCommandInfo* info, -// UnboundedBuffer* reply = nullptr); -// static PError ExecuteCmd(const std::vector& params, UnboundedBuffer* reply = nullptr); -// -// static bool AliasCommand(const std::map& aliases); -// static bool AliasCommand(const PString& oldKey, const PString& newKey); -// -// static bool AddCommand(const PString& cmd, const PCommandInfo* info); -// static const PCommandInfo* DelCommand(const PString& cmd); -// -// friend PCommandHandler cmdlist; -// -// private: -// static const PCommandInfo s_info[]; -// -// static std::map s_handlers; -// }; - -} // namespace pikiwidb diff --git a/src/common.cc b/src/common.cc index 5125c6821..e8f72bb0e 100644 --- a/src/common.cc +++ b/src/common.cc @@ -45,42 +45,6 @@ struct PErrorInfo g_errorInfo[] = { {sizeof "-ERR module already loaded\r\n" - 1, "-ERR module already loaded\r\n"}, }; -bool IsValidNumber(const PString& str) { - size_t slen = str.size(); - if (slen == 0 || slen > 20 || (str[0] != '-' && !isdigit(str[0]))) { - return false; - } - - size_t pos = 0; - if (str[0] == '-') { - if (slen == 1) { - return false; // "-" is not a valid number - } - pos = 1; // skip the sign - } - - // "0", "-0" is a valid number, but "01", "001", etc. are not - if (str[pos] == '0' && slen > pos + 1) { - return false; - } - - for (; pos < slen; ++pos) { - if (!isdigit(str[pos])) { - return false; - } - } - - // TODO: - // @jettcc - // If this method is used to determine whether a numeric string is valid, - // it should consider whether the string exceeds the range of int64, - // that is, the string should be a valid long long number. - - return true; -} - -int Double2Str(char* ptr, std::size_t nBytes, double val) { return snprintf(ptr, nBytes - 1, "%.6g", val); } - int StrToLongDouble(const char* s, size_t slen, long double* ldval) { char* pEnd; std::string t(s, slen); @@ -98,144 +62,6 @@ int StrToLongDouble(const char* s, size_t slen, long double* ldval) { return 0; } -int LongDoubleToStr(long double ldval, std::string* value) { - if (isnan(ldval)) { - return -1; - } else if (isinf(ldval)) { - if (ldval > 0) { - *value = "inf"; - } else { - *value = "-inf"; - } - return -1; - } else { - std::ostringstream oss; - oss << std::setprecision(15) << ldval; - *value = oss.str(); - - // Remove trailing zeroes after the '.' - size_t dotPos = value->find('.'); - if (dotPos != std::string::npos) { - value->erase(value->find_last_not_of('0') + 1, std::string::npos); - if (value->back() == '.') { - value->pop_back(); - } - } - return 0; - } -} - -bool TryStr2Long(const char* ptr, size_t nBytes, long& val) { - bool negtive = false; - size_t i = 0; - - if (ptr[0] == '-' || ptr[0] == '+') { - if (nBytes <= 1 || !isdigit(ptr[1])) { - return false; - } - - negtive = (ptr[0] == '-'); - i = 1; - } - - val = 0; - for (; i < nBytes; ++i) { - if (!isdigit(ptr[i])) { - break; - } - - if (!negtive && val > std::numeric_limits::max() / 10) { - std::cerr << "long will overflow " << val << std::endl; - return false; - } - - if (negtive && val > (-(std::numeric_limits::min() + 1)) / 10) { - std::cerr << "long will underflow " << val << std::endl; - return false; - } - - val *= 10; - - if (!negtive && val > std::numeric_limits::max() - (ptr[i] - '0')) { - std::cerr << "long will overflow " << val << std::endl; - return false; - } - - if (negtive && (val - 1) > (-(std::numeric_limits::min() + 1)) - (ptr[i] - '0')) { - std::cerr << "long will underflow " << val << std::endl; - return false; - } - - val += ptr[i] - '0'; - } - - if (negtive) { - val *= -1; - } - - return true; -} -// to be delete : https://github.com/OpenAtomFoundation/pikiwidb/pull/141#issue-2095887990 -// @578223592 -bool Strtol(const char* ptr, size_t nBytes, long* outVal) { - if (nBytes == 0 || nBytes > 20) { // include the sign - return false; - } - - errno = 0; - char* pEnd = 0; - *outVal = strtol(ptr, &pEnd, 0); - - if (errno == ERANGE || errno == EINVAL) { - return false; - } - - return pEnd == ptr + nBytes; -} - -bool Strtof(const char* ptr, size_t nBytes, float* outVal) { - if (nBytes == 0 || nBytes > 20) { - return false; - } - - errno = 0; - char* pEnd = 0; - *outVal = strtof(ptr, &pEnd); - - if (errno == ERANGE || errno == EINVAL) { - return false; - } - - return pEnd == ptr + nBytes; -} - -bool Strtod(const char* ptr, size_t nBytes, double* outVal) { - if (nBytes == 0 || nBytes > 20) { - return false; - } - - errno = 0; - char* pEnd = 0; - *outVal = strtod(ptr, &pEnd); - - if (errno == ERANGE || errno == EINVAL) { - return false; - } - - return pEnd == ptr + nBytes; -} - -const char* Strstr(const char* ptr, size_t nBytes, const char* pattern, size_t nBytes2) { - if (!pattern || *pattern == 0) { - return nullptr; - } - - const char* ret = std::search(ptr, ptr + nBytes, pattern, pattern + nBytes2); - return ret == ptr + nBytes ? nullptr : ret; -} - -const char* SearchCRLF(const char* ptr, size_t nBytes) { return Strstr(ptr, nBytes, CRLF, 2); } - size_t FormatInt(long value, UnboundedBuffer* reply) { if (!reply) { return 0; @@ -245,33 +71,19 @@ size_t FormatInt(long value, UnboundedBuffer* reply) { int len = snprintf(val, sizeof val, "%ld" CRLF, value); size_t oldSize = reply->ReadableSize(); - reply->PushData(":", 1); + reply->PushData(":"); reply->PushData(val, len); return reply->ReadableSize() - oldSize; } -size_t FormatSingle(const char* str, size_t len, UnboundedBuffer* reply) { - if (!reply) { - return 0; - } - size_t oldSize = reply->ReadableSize(); - reply->PushData("+", 1); - reply->PushData(str, len); - reply->PushData(CRLF, 2); - - return reply->ReadableSize() - oldSize; -} - -size_t FormatSingle(const PString& str, UnboundedBuffer* reply) { return FormatSingle(str.c_str(), str.size(), reply); } - size_t FormatBulk(const char* str, size_t len, UnboundedBuffer* reply) { if (!reply) { return 0; } size_t oldSize = reply->ReadableSize(); - reply->PushData("$", 1); + reply->PushData("$"); char val[32]; int tmp = snprintf(val, sizeof val - 1, "%lu" CRLF, len); @@ -294,7 +106,7 @@ size_t PreFormatMultiBulk(size_t nBulk, UnboundedBuffer* reply) { } size_t oldSize = reply->ReadableSize(); - reply->PushData("*", 1); + reply->PushData("*"); char val[32]; int tmp = snprintf(val, sizeof val - 1, "%lu" CRLF, nBulk); @@ -303,8 +115,6 @@ size_t PreFormatMultiBulk(size_t nBulk, UnboundedBuffer* reply) { return reply->ReadableSize() - oldSize; } -std::size_t FormatEmptyBulk(UnboundedBuffer* reply) { return reply->PushData("$0" CRLF CRLF, 6); } - void ReplyError(PError err, UnboundedBuffer* reply) { if (!reply) { return; @@ -315,61 +125,13 @@ void ReplyError(PError err, UnboundedBuffer* reply) { reply->PushData(info.errorStr, info.len); } -size_t FormatNull(UnboundedBuffer* reply) { - if (!reply) { - return 0; - } - - size_t oldSize = reply->ReadableSize(); - reply->PushData("$-1" CRLF, 5); - - return reply->ReadableSize() - oldSize; -} - -size_t FormatNullArray(UnboundedBuffer* reply) { - if (!reply) { - return 0; - } - - size_t oldSize = reply->ReadableSize(); - reply->PushData("*-1" CRLF, 5); - - return reply->ReadableSize() - oldSize; -} - size_t FormatOK(UnboundedBuffer* reply) { if (!reply) { return 0; } size_t oldSize = reply->ReadableSize(); - reply->PushData("+OK" CRLF, 5); - - return reply->ReadableSize() - oldSize; -} - -size_t Format1(UnboundedBuffer* reply) { - if (!reply) { - return 0; - } - - const char* val = ":1\r\n"; - - size_t oldSize = reply->ReadableSize(); - reply->PushData(val, 4); - - return reply->ReadableSize() - oldSize; -} - -size_t Format0(UnboundedBuffer* reply) { - if (!reply) { - return 0; - } - - const char* val = ":0\r\n"; - - size_t oldSize = reply->ReadableSize(); - reply->PushData(val, 4); + reply->PushData("+OK" CRLF); return reply->ReadableSize() - oldSize; } @@ -437,21 +199,23 @@ std::vector SplitString(const PString& str, char seperator) { return results; } -bool NotGlobRegex(const char* pattern, std::size_t plen) { - for (std::size_t i(0); i < plen; ++i) { - if (pattern[i] == '?' || pattern[i] == '\\' || pattern[i] == '[' || pattern[i] == ']' || pattern[i] == '*' || - pattern[i] == '^' || pattern[i] == '-') { - return false; // may be regex, may not, who cares? - } +std::string MergeString(const std::vector& values, char delimiter) { + std::string result(*values.at(0)); + for (int i = 0; i < values.size() - 1; i++) { + result += delimiter; + result += *values.at(i + 1); } + return result; +} - return true; // must not be regex +std::string MergeString(const std::vector& values, char delimiter) { + std::string result(*values.at(0)); + for (int i = 0; i < values.size() - 1; i++) { + result += delimiter; + std::string s(*values.at(i + 1)); + result += s; + } + return result; } } // namespace pikiwidb - -int64_t Now() { - using namespace std::chrono; - auto now = system_clock::now(); - return duration_cast(now.time_since_epoch()).count(); -} diff --git a/src/common.h b/src/common.h index ba2555e2e..dd64c6a20 100644 --- a/src/common.h +++ b/src/common.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -25,57 +26,6 @@ const int kStringMaxBytes = 1 * 1024 * 1024 * 1024; #define PIKIWIDB_SCAN_STEP_LENGTH 1000 -enum PType { - kPTypeInvalid, - kPTypeString, - kPTypeList, - kPTypeSet, - kPTypeSortedSet, - kPTypeHash, - // < 16 -}; - -enum PEncode { - kPEncodeInvalid, - - kPEncodeRaw, // string - kPEncodeInt, // string as int - - kPEncodeList, - - kPEncodeSet, - kPEncodeHash, - - kPEncodeZset, -}; - -inline const char* EncodingStringInfo(unsigned encode) { - switch (encode) { - case kPEncodeRaw: - return "raw"; - - case kPEncodeInt: - return "int"; - - case kPEncodeList: - return "list"; - - case kPEncodeSet: - return "set"; - - case kPEncodeHash: - return "hash"; - - case kPEncodeZset: - return "zset"; - - default: - break; - } - - return "unknown"; -} - enum PError { kPErrorNop = -1, kPErrorOK = 0, @@ -106,124 +56,94 @@ extern struct PErrorInfo { const char* errorStr; } g_errorInfo[]; -template -inline std::size_t Number2Str(char* ptr, std::size_t nBytes, T val) { - if (!ptr || nBytes < 2) { - return 0; - } - - if (val == 0) { - ptr[0] = '0'; - ptr[1] = 0; - return 1; - } - - bool negative = false; - if (val < 0) { - negative = true; - val = -val; - } - - std::size_t off = 0; - while (val > 0) { - if (off >= nBytes) { - return 0; - } - - ptr[off++] = val % 10 + '0'; - val /= 10; - } - - if (negative) { - if (off >= nBytes) { - return 0; - } - - ptr[off++] = '-'; - } - - std::reverse(ptr, ptr + off); - ptr[off] = 0; - - return off; -} - -bool IsValidNumber(const PString& str); - -int Double2Str(char* ptr, std::size_t nBytes, double val); int StrToLongDouble(const char* s, size_t slen, long double* ldval); -int LongDoubleToStr(long double ldval, std::string* value); -bool TryStr2Long(const char* ptr, std::size_t nBytes, long& val); // only for decimal -bool Strtol(const char* ptr, std::size_t nBytes, long* outVal); -bool Strtoll(const char* ptr, std::size_t nBytes, long long* outVal); -bool Strtof(const char* ptr, std::size_t nBytes, float* outVal); -bool Strtod(const char* ptr, std::size_t nBytes, double* outVal); -const char* Strstr(const char* ptr, std::size_t nBytes, const char* pattern, std::size_t nBytes2); -const char* SearchCRLF(const char* ptr, std::size_t nBytes); class UnboundedBuffer; std::size_t FormatInt(long value, UnboundedBuffer* reply); -std::size_t FormatSingle(const char* str, std::size_t len, UnboundedBuffer* reply); -std::size_t FormatSingle(const PString& str, UnboundedBuffer* reply); std::size_t FormatBulk(const char* str, std::size_t len, UnboundedBuffer* reply); std::size_t FormatBulk(const PString& str, UnboundedBuffer* reply); std::size_t PreFormatMultiBulk(std::size_t nBulk, UnboundedBuffer* reply); -std::size_t FormatEmptyBulk(UnboundedBuffer* reply); -std::size_t FormatNull(UnboundedBuffer* reply); -std::size_t FormatNullArray(UnboundedBuffer* reply); std::size_t FormatOK(UnboundedBuffer* reply); -std::size_t Format1(UnboundedBuffer* reply); -std::size_t Format0(UnboundedBuffer* reply); void ReplyError(PError err, UnboundedBuffer* reply); -inline void AdjustIndex(long& start, long& end, size_t size) { - if (size == 0) { - end = 0, start = 1; - return; - } +enum class PParseResult : int8_t { + kOK, + kWait, + kError, +}; + +PParseResult GetIntUntilCRLF(const char*& ptr, std::size_t nBytes, int& val); - if (start < 0) { - start += size; +class AtomicString { + public: + AtomicString() = default; + ~AtomicString() = default; + AtomicString(std::string str) { + std::lock_guard lock(mutex_); + str_ = std::move(str); + } + AtomicString(std::string&& str) { + std::lock_guard lock(mutex_); + str_ = std::move(str); + } + AtomicString(const std::string& str) { + std::lock_guard lock(mutex_); + str_ = str; } - if (start < 0) { - start = 0; + AtomicString(const char* c) { + std::lock_guard lock(mutex_); + str_ = std::string(c); + }; + AtomicString& operator=(const std::string& str) { + std::lock_guard lock(mutex_); + str_ = str; + return *this; } - if (end < 0) { - end += size; + AtomicString& operator=(std::string&& str) { + std::lock_guard lock(mutex_); + str_ = std::move(str); + return *this; + } + operator std::string() { + std::shared_lock lock(mutex_); + return str_; } - if (end >= static_cast(size)) { - end = size - 1; + operator std::string() const { + std::shared_lock lock(mutex_); + return str_; } -} -struct NocaseComp { - bool operator()(const PString& s1, const PString& s2) const { return strcasecmp(s1.c_str(), s2.c_str()) < 0; } + bool empty() const { + std::shared_lock lock(mutex_); + return str_.empty(); + } - bool operator()(const char* s1, const PString& s2) const { return strcasecmp(s1, s2.c_str()) < 0; } + std::string ToString() const { + std::shared_lock lock(mutex_); + return str_; + } - bool operator()(const PString& s1, const char* s2) const { return strcasecmp(s1.c_str(), s2) < 0; } + private: + mutable std::shared_mutex mutex_; + std::string str_; }; -enum class PParseResult : int8_t { - kOK, - kWait, - kError, -}; +std::vector SplitString(const PString& str, char seperator); -PParseResult GetIntUntilCRLF(const char*& ptr, std::size_t nBytes, int& val); +std::string MergeString(const std::vector& values, char delimiter); -std::vector SplitString(const PString& str, char seperator); +std::string MergeString(const std::vector& values, char delimiter); // The defer class for C++11 class ExecuteOnScopeExit { public: - ExecuteOnScopeExit() {} + ExecuteOnScopeExit() = default; - ExecuteOnScopeExit(ExecuteOnScopeExit&& e) { func_ = std::move(e.func_); } + ExecuteOnScopeExit(ExecuteOnScopeExit&& e) noexcept { func_ = std::move(e.func_); } ExecuteOnScopeExit(const ExecuteOnScopeExit& e) = delete; void operator=(const ExecuteOnScopeExit& f) = delete; @@ -249,8 +169,4 @@ class ExecuteOnScopeExit { #define DEFER _MAKE_DEFER_HELPER_(__LINE__) -bool NotGlobRegex(const char* pattern, std::size_t plen); - } // namespace pikiwidb - -int64_t Now(); diff --git a/src/config.cc b/src/config.cc index dbbe20979..e8f1e03ff 100644 --- a/src/config.cc +++ b/src/config.cc @@ -6,14 +6,26 @@ */ #include +#include +#include #include #include "config.h" -#include "config_parser.h" +#include "pstd/pstd_string.h" +#include "store.h" namespace pikiwidb { -static void EraseQuotes(PString& str) { +constexpr uint16_t PORT_LIMIT_MAX = 65535; +constexpr uint16_t PORT_LIMIT_MIN = 1; +constexpr int DBNUMBER_MAX = 16; +constexpr int THREAD_MAX = 129; +constexpr int ROCKSDB_INSTANCE_NUMBER_MAX = 10; + +PConfig g_config; + +// preprocess func +static void EraseQuotes(std::string& str) { // convert "hello" to hello if (str.size() < 2) { return; @@ -24,196 +36,184 @@ static void EraseQuotes(PString& str) { } } -extern std::vector SplitString(const PString& str, char seperator); - -PConfig g_config; - -PConfig::PConfig() { - daemonize = false; - pidfile = "/var/run/pikiwidb.pid"; - - ip = "127.0.0.1"; - port = 9221; - timeout = 0; - dbpath = "./db"; - - loglevel = "notice"; - logdir = "stdout"; - - databases = 16; - - // rdb - saveseconds = 999999999; - savechanges = 999999999; - rdbcompression = true; - rdbchecksum = true; - rdbfullname = "./dump.rdb"; - - maxclients = 10000; - - // slow log - slowlogtime = 0; - slowlogmaxlen = 128; - - hz = 10; - - includefile = ""; - - maxmemory = 2 * 1024 * 1024 * 1024UL; - maxmemorySamples = 5; - noeviction = true; - - backend = kBackEndRocksDB; - backendPath = "dump"; - backendHz = 10; - - max_client_response_size = 1073741824; +static Status CheckYesNo(const std::string& value) { + if (!pstd::StringEqualCaseInsensitive(value, "yes") && !pstd::StringEqualCaseInsensitive(value, "no")) { + return Status::InvalidArgument("The value must be yes or no."); + } + return Status::OK(); +} - db_instance_num = 3; +static Status CheckLogLevel(const std::string& value) { + if (!pstd::StringEqualCaseInsensitive(value, "debug") && !pstd::StringEqualCaseInsensitive(value, "verbose") && + !pstd::StringEqualCaseInsensitive(value, "notice") && !pstd::StringEqualCaseInsensitive(value, "warning")) { + return Status::InvalidArgument("The value must be debug / verbose / notice / warning."); + } + return Status::OK(); +} - rocksdb_ttl_second = 0; - rocksdb_periodic_second = 0; +Status BaseValue::Set(const std::string& value, bool init_stage) { + if (!init_stage && !rewritable_) { + return Status::NotSupported("Dynamic modification is not supported."); + } + auto value_copy = value; + EraseQuotes(value_copy); + auto s = check(value_copy); + if (!s.ok()) { + return s; + } + // TODO(dingxiaoshuai) Support RocksDB config change Dynamically + return SetValue(value_copy); } -bool LoadPikiwiDBConfig(const char* cfgFile, PConfig& cfg) { - ConfigParser parser; - if (!parser.Load(cfgFile)) { - return false; +Status StringValue::SetValue(const std::string& value) { + auto values = SplitString(value, delimiter_); + if (values.size() != values_.size()) { + return Status::InvalidArgument("The number of parameters does not match."); + } + for (int i = 0; i < values_.size(); i++) { + *values_[i] = std::move(values[i]); } + return Status::OK(); +} - if (parser.GetData("daemonize") == "yes") { - cfg.daemonize = true; +Status BoolValue::SetValue(const std::string& value) { + if (pstd::StringEqualCaseInsensitive(value, "yes")) { + value_->store(true); } else { - cfg.daemonize = false; + value_->store(false); } + return Status::OK(); +} - cfg.pidfile = parser.GetData("pidfile", cfg.pidfile); +template +Status NumberValue::SetValue(const std::string& value) { + T v; + auto [ptr, ec] = std::from_chars(value.data(), value.data() + value.length(), v); + if (ec != std::errc()) { + return Status::InvalidArgument("Failed to convert to a number."); + } + if (v < value_min_) { + v = value_min_; + } + if (v > value_max_) { + v = value_max_; + } + value_->store(v); + return Status::OK(); +} - cfg.ip = parser.GetData("bind", cfg.ip); - cfg.port = parser.GetData("port"); - cfg.timeout = parser.GetData("timeout"); - cfg.dbpath = parser.GetData("db-path"); +PConfig::PConfig() { + AddBool("daemonize", &CheckYesNo, false, &daemonize); + AddString("ip", false, {&ip}); + AddNumberWihLimit("port", false, &port, PORT_LIMIT_MIN, PORT_LIMIT_MAX); + AddNumber("timeout", true, &timeout); + AddString("db-path", false, {&db_path}); + AddStrinWithFunc("loglevel", &CheckLogLevel, false, {&log_level}); + AddString("logfile", false, {&log_dir}); + AddNumberWihLimit("databases", false, &databases, 1, DBNUMBER_MAX); + AddString("requirepass", true, {&password}); + AddNumber("maxclients", true, &max_clients); + AddNumberWihLimit("worker-threads", false, &worker_threads_num, 1, THREAD_MAX); + AddNumberWihLimit("slave-threads", false, &worker_threads_num, 1, THREAD_MAX); + AddNumber("slowlog-log-slower-than", true, &slow_log_time); + AddNumber("slowlog-max-len", true, &slow_log_max_len); + AddNumberWihLimit("db-instance-num", true, &db_instance_num, 1, ROCKSDB_INSTANCE_NUMBER_MAX); + AddNumberWihLimit("fast-cmd-threads-num", false, &fast_cmd_threads_num, 1, THREAD_MAX); + AddNumberWihLimit("slow-cmd-threads-num", false, &slow_cmd_threads_num, 1, THREAD_MAX); + AddNumber("max-client-response-size", true, &max_client_response_size); + AddString("runid", false, {&run_id}); + AddNumber("small-compaction-threshold", true, &small_compaction_threshold); + AddNumber("small-compaction-duration-threshold", true, &small_compaction_duration_threshold); + + // rocksdb config + AddNumber("rocksdb-max-subcompactions", false, &rocksdb_max_subcompactions); + AddNumber("rocksdb-max-background-jobs", false, &rocksdb_max_background_jobs); + AddNumber("rocksdb-max-write-buffer-number", false, &rocksdb_max_write_buffer_number); + AddNumber("rocksdb-min-write-buffer-number-to-merge", false, &rocksdb_min_write_buffer_number_to_merge); + AddNumber("rocksdb-write-buffer-size", false, &rocksdb_write_buffer_size); + AddNumber("rocksdb-level0-file-num-compaction-trigger", false, &rocksdb_level0_file_num_compaction_trigger); + AddNumber("rocksdb-number-levels", true, &rocksdb_num_levels); + AddBool("rocksdb-enable-pipelined-write", CheckYesNo, false, &rocksdb_enable_pipelined_write); + AddNumber("rocksdb-level0-slowdown-writes-trigger", false, &rocksdb_level0_slowdown_writes_trigger); + AddNumber("rocksdb-level0-stop-writes-trigger", false, &rocksdb_level0_stop_writes_trigger); + AddNumber("rocksdb-level0-slowdown-writes-trigger", false, &rocksdb_level0_slowdown_writes_trigger); +} - cfg.loglevel = parser.GetData("loglevel", cfg.loglevel); - cfg.logdir = parser.GetData("logfile", cfg.logdir); - EraseQuotes(cfg.logdir); - if (cfg.logdir.empty()) { - cfg.logdir = "stdout"; +bool PConfig::LoadFromFile(const std::string& file_name) { + config_file_name_ = file_name; + if (!parser_.Load(file_name.c_str())) { + return false; } - cfg.databases = parser.GetData("databases", cfg.databases); - cfg.password = parser.GetData("requirepass"); - EraseQuotes(cfg.password); - - // alias command - { - std::vector alias(SplitString(parser.GetData("rename-command"), ' ')); - if (alias.size() % 2 == 0) { - for (auto it(alias.begin()); it != alias.end();) { - const PString& oldCmd = *(it++); - const PString& newCmd = *(it++); - cfg.aliases[oldCmd] = newCmd; + // During the initialization phase, so there is no need to hold a lock. + for (auto& [key, value] : parser_.GetMap()) { + if (auto iter = config_map_.find(key); iter != config_map_.end()) { + auto& v = config_map_[key]; + auto s = v->Set(value.at(0), true); + if (!s.ok()) { + return false; } } } - // load rdb config - std::vector saveInfo(SplitString(parser.GetData("save"), ' ')); - if (!saveInfo.empty() && saveInfo.size() != 2) { - EraseQuotes(saveInfo[0]); - if (!(saveInfo.size() == 1 && saveInfo[0].empty())) { - std::cerr << "bad format save rdb interval, bad string " << parser.GetData("save") << std::endl; - return false; - } - } else if (!saveInfo.empty()) { - cfg.saveseconds = std::stoi(saveInfo[0]); - cfg.savechanges = std::stoi(saveInfo[1]); + // Handle separately + std::vector master(SplitString(parser_.GetData("slaveof"), ' ')); + if (master.size() == 2) { + master_ip = std::move(master[0]); + master_port = static_cast(std::stoi(master[1])); } - if (cfg.saveseconds == 0) { - cfg.saveseconds = 999999999; - } - if (cfg.savechanges == 0) { - cfg.savechanges = 999999999; + std::vector alias(SplitString(parser_.GetData("rename-command"), ' ')); + if (alias.size() % 2 == 0) { + for (auto it(alias.begin()); it != alias.end();) { + const PString& oldCmd = *(it++); + const PString& newCmd = *(it++); + aliases[oldCmd] = newCmd; + } } - cfg.rdbcompression = (parser.GetData("rdbcompression") == "yes"); - cfg.rdbchecksum = (parser.GetData("rdbchecksum") == "yes"); - - cfg.rdbfullname = parser.GetData("dir", "./") + parser.GetData("dbfilename", "dump.rdb"); - - cfg.maxclients = parser.GetData("maxclients", 10000); - - cfg.slowlogtime = parser.GetData("slowlog-log-slower-than", 0); - cfg.slowlogmaxlen = parser.GetData("slowlog-max-len", cfg.slowlogmaxlen); - - cfg.hz = parser.GetData("hz", 10); + return true; +} - // load master ip port - std::vector master(SplitString(parser.GetData("slaveof"), ' ')); - if (master.size() == 2) { - cfg.masterIp = std::move(master[0]); - cfg.masterPort = static_cast(std::stoi(master[1])); +void PConfig::Get(const std::string& key, std::vector* values) const { + values->clear(); + for (const auto& [k, v] : config_map_) { + if (key == "*" || pstd::StringMatch(key.c_str(), k.c_str(), 1)) { + values->emplace_back(k); + values->emplace_back(v->Value()); + } } - cfg.masterauth = parser.GetData("masterauth"); - - // load modules' names - cfg.modules = parser.GetDataVector("loadmodule"); - - cfg.includefile = parser.GetData("include"); // TODO multi files include - - // lru cache - cfg.maxmemory = parser.GetData("maxmemory", 2 * 1024 * 1024 * 1024UL); - cfg.maxmemorySamples = parser.GetData("maxmemory-samples", 5); - cfg.noeviction = (parser.GetData("maxmemory-policy", "noeviction") == "noeviction"); - - // worker threads - cfg.worker_threads_num = parser.GetData("worker-threads", 1); - - // slave threads - cfg.slave_threads_num = parser.GetData("slave-threads", 1); - - // backend - cfg.backend = parser.GetData("backend", kBackEndNone); - cfg.backendPath = parser.GetData("backendpath", cfg.backendPath); - EraseQuotes(cfg.backendPath); - cfg.backendHz = parser.GetData("backendhz", 10); - - cfg.max_client_response_size = parser.GetData("max-client-response-size", 1073741824); - - cfg.db_instance_num = parser.GetData("db-instance-num", 3); - cfg.rocksdb_ttl_second = parser.GetData("rocksdb-ttl-second"); - cfg.rocksdb_periodic_second = parser.GetData("rocksdb-periodic-second"); - - return cfg.CheckArgs(); } -bool PConfig::CheckArgs() const { -#define RETURN_IF_FAIL(cond) \ - if (!(cond)) { \ - std::cerr << #cond " failed\n"; \ - return false; \ - } - - RETURN_IF_FAIL(port > 0); - RETURN_IF_FAIL(databases > 0); - RETURN_IF_FAIL(maxclients > 0); - RETURN_IF_FAIL(hz > 0 && hz < 500); - RETURN_IF_FAIL(maxmemory >= 512 * 1024 * 1024UL); - RETURN_IF_FAIL(maxmemorySamples > 0 && maxmemorySamples < 10); - RETURN_IF_FAIL(worker_threads_num > 0 && worker_threads_num < 129); // as redis - RETURN_IF_FAIL(backend >= kBackEndNone && backend < kBackEndMax); - RETURN_IF_FAIL(backendHz >= 1 && backendHz <= 50); - RETURN_IF_FAIL(db_instance_num >= 1); - RETURN_IF_FAIL(rocksdb_ttl_second > 0); - RETURN_IF_FAIL(rocksdb_periodic_second > 0); - RETURN_IF_FAIL(max_client_response_size > 0); - -#undef RETURN_IF_FAIL +Status PConfig::Set(std::string key, const std::string& value, bool init_stage) { + std::transform(key.begin(), key.end(), key.begin(), ::tolower); + auto iter = config_map_.find(key); + if (iter == config_map_.end()) { + return Status::NotFound("Non-existent configuration items."); + } + return iter->second->Set(value, init_stage); +} - return true; +rocksdb::Options PConfig::GetRocksDBOptions() { + rocksdb::Options options; + options.create_if_missing = true; + options.create_missing_column_families = true; + options.max_subcompactions = rocksdb_max_subcompactions; + options.max_background_jobs = rocksdb_max_background_jobs; + options.max_write_buffer_number = rocksdb_max_write_buffer_number; + options.min_write_buffer_number_to_merge = rocksdb_min_write_buffer_number_to_merge; + options.write_buffer_size = rocksdb_write_buffer_size; + options.level0_file_num_compaction_trigger = rocksdb_level0_file_num_compaction_trigger; + options.num_levels = rocksdb_num_levels; + options.enable_pipelined_write = rocksdb_enable_pipelined_write; + options.level0_slowdown_writes_trigger = rocksdb_level0_slowdown_writes_trigger; + options.level0_stop_writes_trigger = rocksdb_level0_stop_writes_trigger; + return options; } -bool PConfig::CheckPassword(const PString& pwd) const { return password.empty() || password == pwd; } +rocksdb::BlockBasedTableOptions PConfig::GetRocksDBBlockBasedTableOptions() { + rocksdb::BlockBasedTableOptions options; + return options; +} } // namespace pikiwidb diff --git a/src/config.h b/src/config.h index 14aecf70e..d0dd2b041 100644 --- a/src/config.h +++ b/src/config.h @@ -7,94 +7,195 @@ #pragma once -#include +#include +#include +#include +#include #include +#include +#include #include +#include "rocksdb/options.h" +#include "rocksdb/table.h" + #include "common.h" +#include "net/config_parser.h" namespace pikiwidb { -enum BackEndType { - kBackEndNone = 0, - kBackEndRocksDB = 1, - kBackEndMax = 2, -}; - -struct PConfig { - bool daemonize; - PString pidfile; - - PString ip; - unsigned short port; +using Status = rocksdb::Status; +using CheckFunc = std::function; +class PConfig; +extern PConfig g_config; - int timeout; +class BaseValue { + public: + BaseValue(const std::string& key, CheckFunc check_func_ptr, bool rewritable = false) + : key_(key), custom_check_func_ptr_(check_func_ptr), rewritable_(rewritable) {} - PString dbpath; + virtual ~BaseValue() = default; - PString loglevel; - PString logdir; // the log directory, differ from redis + const std::string& Key() const { return key_; } - int databases; + virtual std::string Value() const = 0; - // auth - PString password; + Status Set(const std::string& value, bool force); - std::map aliases; + protected: + virtual Status SetValue(const std::string&) = 0; + Status check(const std::string& value) { + if (!custom_check_func_ptr_) { + return Status::OK(); + } + return custom_check_func_ptr_(value); + } - // @ rdb - // save seconds changes - int saveseconds; - int savechanges; - bool rdbcompression; // yes - bool rdbchecksum; // yes - PString rdbfullname; // ./dump.rdb + protected: + std::string key_; + CheckFunc custom_check_func_ptr_ = nullptr; + bool rewritable_ = false; +}; - int maxclients; // 10000 +class StringValue : public BaseValue { + public: + StringValue(const std::string& key, CheckFunc check_func_ptr, bool rewritable, + const std::vector& value_ptr_vec, char delimiter = ' ') + : BaseValue(key, check_func_ptr, rewritable), values_(value_ptr_vec), delimiter_(delimiter) { + assert(!values_.empty()); + } + ~StringValue() override = default; - int slowlogtime; // 1000 microseconds - int slowlogmaxlen; // 128 + std::string Value() const override { return MergeString(values_, delimiter_); }; - int hz; // 10 [1,500] + private: + Status SetValue(const std::string& value) override; - PString masterIp; - unsigned short masterPort; // replication - PString masterauth; + std::vector values_; + char delimiter_ = 0; +}; - PString runid; +template +class NumberValue : public BaseValue { + public: + NumberValue(const std::string& key, CheckFunc check_func_ptr, bool rewritable, std::atomic* value_ptr, + T min = std::numeric_limits::min(), T max = std::numeric_limits::max()) + : BaseValue(key, check_func_ptr, rewritable), value_(value_ptr), value_min_(min), value_max_(max) { + assert(value_ != nullptr); + assert(value_min_ <= value_max_); + }; - PString includefile; // the template config + std::string Value() const override { return std::to_string(value_->load()); } - std::vector modules; // modules + private: + Status SetValue(const std::string& value) override; - // use redis as cache, level db as backup - uint64_t maxmemory; // default 2GB - int maxmemorySamples; // default 5 - bool noeviction; // default true + std::atomic* value_ = nullptr; + T value_min_; + T value_max_; +}; - // THREADED I/O - int worker_threads_num; +class BoolValue : public BaseValue { + public: + BoolValue(const std::string& key, CheckFunc check_func_ptr, bool rewritable, std::atomic* value_ptr) + : BaseValue(key, check_func_ptr, rewritable), value_(value_ptr) { + assert(value_ != nullptr); + }; - // THREADED SLAVE - int slave_threads_num; + std::string Value() const override { return value_->load() ? "yes" : "no"; }; - int backend; // enum BackEndType - PString backendPath; - int backendHz; // the frequency of dump to backend + private: + Status SetValue(const std::string& value) override; + std::atomic* value_ = nullptr; +}; - int64_t max_client_response_size; +using ValuePrt = std::unique_ptr; +using ConfigMap = std::unordered_map; - int db_instance_num; - uint64_t rocksdb_ttl_second; - uint64_t rocksdb_periodic_second; +class PConfig { + public: PConfig(); - - bool CheckArgs() const; - bool CheckPassword(const PString& pwd) const; + ~PConfig() = default; + bool LoadFromFile(const std::string& file_name); + const std::string& ConfigFileName() const { return config_file_name_; } + void Get(const std::string&, std::vector*) const; + Status Set(std::string, const std::string&, bool force = false); + + public: + std::atomic_uint32_t timeout = 0; + // auth + AtomicString password; + AtomicString master_auth; + AtomicString master_ip; + std::map aliases; + std::atomic_uint32_t max_clients = 10000; // 10000 + std::atomic_uint32_t slow_log_time = 1000; // 1000 microseconds + std::atomic_uint32_t slow_log_max_len = 128; // 128 + std::atomic_uint32_t master_port; // replication + AtomicString include_file; // the template config + std::vector modules; // modules + std::atomic_int32_t fast_cmd_threads_num = 4; + std::atomic_int32_t slow_cmd_threads_num = 4; + std::atomic_uint64_t max_client_response_size = 1073741824; + std::atomic_uint64_t small_compaction_threshold = 604800; + std::atomic_uint64_t small_compaction_duration_threshold = 259200; + + std::atomic_bool daemonize = false; + AtomicString pid_file = "./pikiwidb.pid"; + AtomicString ip = "127.0.0.1"; + std::atomic_uint16_t port = 9221; + AtomicString db_path = "./db/"; + AtomicString log_dir = "stdout"; // the log directory, differ from redis + AtomicString log_level = "warning"; + AtomicString run_id; + std::atomic databases = 16; + std::atomic_uint32_t worker_threads_num = 2; + std::atomic_uint32_t slave_threads_num = 2; + std::atomic db_instance_num = 3; + + std::atomic_uint32_t rocksdb_max_subcompactions = 0; + // default 2 + std::atomic_int rocksdb_max_background_jobs = 4; + // default 2 + std::atomic rocksdb_max_write_buffer_number = 2; + // default 2 + std::atomic_int rocksdb_min_write_buffer_number_to_merge = 2; + // default 64M + std::atomic rocksdb_write_buffer_size = 64 << 20; + std::atomic_int rocksdb_level0_file_num_compaction_trigger = 4; + std::atomic_int rocksdb_num_levels = 7; + std::atomic_bool rocksdb_enable_pipelined_write = false; + std::atomic_int rocksdb_level0_slowdown_writes_trigger = 20; + std::atomic_int rocksdb_level0_stop_writes_trigger = 36; + + rocksdb::Options GetRocksDBOptions(); + + rocksdb::BlockBasedTableOptions GetRocksDBBlockBasedTableOptions(); + + private: + inline void AddString(const std::string& key, bool rewritable, std::vector values_ptr_vector) { + config_map_.emplace(key, std::make_unique(key, nullptr, rewritable, values_ptr_vector)); + } + inline void AddStrinWithFunc(const std::string& key, const CheckFunc& checkfunc, bool rewritable, + std::vector values_ptr_vector) { + config_map_.emplace(key, std::make_unique(key, checkfunc, rewritable, values_ptr_vector)); + } + inline void AddBool(const std::string& key, const CheckFunc& checkfunc, bool rewritable, + std::atomic* value_ptr) { + config_map_.emplace(key, std::make_unique(key, checkfunc, rewritable, value_ptr)); + } + template + inline void AddNumber(const std::string& key, bool rewritable, std::atomic* value_ptr) { + config_map_.emplace(key, std::make_unique>(key, nullptr, rewritable, value_ptr)); + } + template + inline void AddNumberWihLimit(const std::string& key, bool rewritable, std::atomic* value_ptr, T min, T max) { + config_map_.emplace(key, std::make_unique>(key, nullptr, rewritable, value_ptr, min, max)); + } + + private: + ConfigParser parser_; + ConfigMap config_map_; + std::string config_file_name_; }; - -extern PConfig g_config; - -extern bool LoadPikiwiDBConfig(const char* cfgFile, PConfig& cfg); - } // namespace pikiwidb diff --git a/src/db.cpp b/src/db.cc similarity index 57% rename from src/db.cpp rename to src/db.cc index ba0c4ba09..6fdbfe5ee 100644 --- a/src/db.cpp +++ b/src/db.cc @@ -14,19 +14,25 @@ namespace pikiwidb { DB::DB(int db_id, const std::string &db_path) : db_id_(db_id), db_path_(db_path + std::to_string(db_id) + '/') { storage::StorageOptions storage_options; - storage_options.options.create_if_missing = true; + storage_options.options = g_config.GetRocksDBOptions(); + // some options obj for all RocksDB in one DB. + auto cap = storage_options.db_instance_num * kColumnNum * storage_options.options.write_buffer_size * + storage_options.options.max_write_buffer_number; + storage_options.options.write_buffer_manager = std::make_shared(cap); + + storage_options.table_options = g_config.GetRocksDBBlockBasedTableOptions(); + + storage_options.small_compaction_threshold = g_config.small_compaction_threshold.load(); + storage_options.small_compaction_duration_threshold = g_config.small_compaction_duration_threshold.load(); storage_options.db_instance_num = g_config.db_instance_num; storage_options.db_id = db_id; - // options for CF - storage_options.options.ttl = g_config.rocksdb_ttl_second; - storage_options.options.periodic_compaction_seconds = g_config.rocksdb_periodic_second; storage_ = std::make_unique(); if (auto s = storage_->Open(storage_options, db_path_); !s.ok()) { ERROR("Storage open failed! {}", s.ToString()); abort(); } opened_ = true; - INFO("Open DB{} success!", db_id); } + } // namespace pikiwidb diff --git a/src/db.h b/src/db.h index cdb0081a8..24e983171 100644 --- a/src/db.h +++ b/src/db.h @@ -5,8 +5,7 @@ * of patent rights can be found in the PATENTS file in the same directory. */ -#ifndef PIKIWIDB_DB_H -#define PIKIWIDB_DB_H +#pragma once #include @@ -15,9 +14,11 @@ #include "storage/storage.h" namespace pikiwidb { +constexpr int kColumnNum = 10; class DB { public: DB(int db_id, const std::string& db_path); + std::unique_ptr& GetStorage() { return storage_; } void Lock() { storage_mutex_.lock(); } @@ -29,7 +30,7 @@ class DB { void UnLockShared() { storage_mutex_.unlock_shared(); } private: - const int db_id_; + const int db_id_ = 0; const std::string db_path_; /** @@ -53,6 +54,5 @@ class DB { int64_t last_checkpoint_time_ = -1; bool last_checkpoint_success_ = false; }; -} // namespace pikiwidb -#endif // PIKIWIDB_DB_H +} // namespace pikiwidb diff --git a/src/io_thread_pool.cc b/src/io_thread_pool.cc index a5332e571..0aa0a5015 100644 --- a/src/io_thread_pool.cc +++ b/src/io_thread_pool.cc @@ -60,7 +60,9 @@ void IOThreadPool::Run(int ac, char* av[]) { base_.Run(); for (auto& w : worker_threads_) { - w.join(); + if (w.joinable()) { + w.join(); + } } worker_threads_.clear(); @@ -168,4 +170,72 @@ void IOThreadPool::Reset() { BaseLoop()->Reset(); } +void WorkIOThreadPool::PushWriteTask(std::shared_ptr client) { + auto pos = ++counter_ % worker_num_; + std::unique_lock lock(*writeMutex_[pos]); + + writeQueue_[pos].emplace_back(client); + writeCond_[pos]->notify_one(); +} + +void WorkIOThreadPool::StartWorkers() { + // only called by main thread + assert(state_ == State::kNone); + + IOThreadPool::StartWorkers(); + + writeMutex_.reserve(worker_num_); + writeCond_.reserve(worker_num_); + writeQueue_.reserve(worker_num_); + for (size_t index = 0; index < worker_num_; ++index) { + writeMutex_.emplace_back(std::make_unique()); + writeCond_.emplace_back(std::make_unique()); + writeQueue_.emplace_back(); + + std::thread t([this, index]() { + while (writeRunning_) { + std::unique_lock lock(*writeMutex_[index]); + while (writeQueue_[index].empty()) { + if (!writeRunning_) { + break; + } + writeCond_[index]->wait(lock); + } + if (!writeRunning_) { + break; + } + auto client = writeQueue_[index].front(); + if (client->State() == ClientState::kOK) { + client->WriteReply2Client(); + } + writeQueue_[index].pop_front(); + } + INFO("worker write thread {}, goodbye...", index); + }); + + INFO("worker write thread {}, starting...", index); + writeThreads_.push_back(std::move(t)); + } +} + +void WorkIOThreadPool::Exit() { + IOThreadPool::Exit(); + + writeRunning_ = false; + int i = 0; + for (auto& cond : writeCond_) { + std::unique_lock lock(*writeMutex_[i++]); + cond->notify_all(); + } + for (auto& wt : writeThreads_) { + if (wt.joinable()) { + wt.join(); + } + } + writeThreads_.clear(); + writeCond_.clear(); + writeQueue_.clear(); + writeMutex_.clear(); +} + } // namespace pikiwidb diff --git a/src/io_thread_pool.h b/src/io_thread_pool.h index 09e32d2de..3bdd5ca03 100644 --- a/src/io_thread_pool.h +++ b/src/io_thread_pool.h @@ -8,10 +8,13 @@ #pragma once #include +#include #include #include #include +#include "client.h" +#include "cmd_thread_pool.h" #include "net/event_loop.h" #include "net/http_client.h" #include "net/http_server.h" @@ -27,7 +30,7 @@ class IOThreadPool { bool Init(const char* ip, int port, const NewTcpConnectionCallback& ccb); void Run(int argc, char* argv[]); - void Exit(); + virtual void Exit(); bool IsExit() const; EventLoop* BaseLoop(); @@ -54,11 +57,13 @@ class IOThreadPool { // HTTP client std::shared_ptr ConnectHTTP(const char* ip, int port, EventLoop* loop = nullptr); + virtual void PushWriteTask(std::shared_ptr /*unused*/){}; + // for unittest only void Reset(); - private: - void StartWorkers(); + protected: + virtual void StartWorkers(); static const size_t kMaxWorkers; @@ -82,4 +87,24 @@ class IOThreadPool { std::atomic state_{State::kNone}; }; -} // namespace pikiwidb \ No newline at end of file +class WorkIOThreadPool : public IOThreadPool { + public: + WorkIOThreadPool() = default; + ~WorkIOThreadPool() = default; + + void Exit() override; + void PushWriteTask(std::shared_ptr client) override; + + private: + void StartWorkers() override; + + private: + std::vector writeThreads_; + std::vector> writeMutex_; + std::vector> writeCond_; + std::vector>> writeQueue_; + std::atomic counter_ = 0; + bool writeRunning_ = true; +}; + +} // namespace pikiwidb diff --git a/src/multi.cc b/src/multi.cc index d4cc0bbf0..de52f1798 100644 --- a/src/multi.cc +++ b/src/multi.cc @@ -10,154 +10,154 @@ #include "log.h" #include "store.h" -namespace pikiwidb { - -PMulti& PMulti::Instance() { - static PMulti mt; - return mt; -} - -void PMulti::Watch(PClient* client, int dbno, const PString& key) { - if (client->Watch(dbno, key)) { - Clients& cls = clients_[dbno][key]; - cls.push_back(std::static_pointer_cast(client->shared_from_this())); - } -} - -bool PMulti::Multi(PClient* client) { - if (client->IsFlagOn(kClientFlagMulti)) { - return false; - } - - client->ClearMulti(); - client->SetFlag(kClientFlagMulti); - return true; -} - -bool PMulti::Exec(PClient* client) { return client->Exec(); } - -void PMulti::Discard(PClient* client) { - client->ClearMulti(); - client->ClearWatch(); -} - -void PMulti::NotifyDirty(int dbno, const PString& key) { - auto tmpDBIter = clients_.find(dbno); - if (tmpDBIter == clients_.end()) { - return; - } - - auto& dbWatchedKeys = tmpDBIter->second; - auto it = dbWatchedKeys.find(key); - if (it == dbWatchedKeys.end()) { - return; - } - - Clients& cls = it->second; - for (auto itCli(cls.begin()); itCli != cls.end();) { - auto client(itCli->lock()); - if (!client) { - WARN("Erase not exist client when notify dirty key[{}]", key); - itCli = cls.erase(itCli); - } else { - if (client.get() != PClient::Current() && client->NotifyDirty(dbno, key)) { - WARN("Erase dirty client {} when notify dirty key[{}]", client->GetName(), key); - itCli = cls.erase(itCli); - itCli = cls.erase(itCli); - } else { - ++itCli; - } - } - } - - if (cls.empty()) { - dbWatchedKeys.erase(it); - } -} - -void PMulti::NotifyDirtyAll(int dbno) { - if (dbno == -1) { - for (auto& db_set : clients_) { - for (auto& key_clients : db_set.second) { - std::for_each(key_clients.second.begin(), key_clients.second.end(), [&](const std::weak_ptr& wcli) { - auto scli = wcli.lock(); - if (scli) { - scli->SetFlag(kClientFlagDirty); - } - }); - } - } - } else { - auto it = clients_.find(dbno); - if (it != clients_.end()) { - for (auto& key_clients : it->second) { - std::for_each(key_clients.second.begin(), key_clients.second.end(), [&](const std::weak_ptr& wcli) { - auto scli = wcli.lock(); - if (scli) { - scli->SetFlag(kClientFlagDirty); - } - }); - } - } - } -} - -// multi commands -PError watch(const std::vector& params, UnboundedBuffer* reply) { - PClient* client = PClient::Current(); - if (client->IsFlagOn(kClientFlagMulti)) { - ReplyError(kPErrorWatch, reply); - return kPErrorWatch; - } - - std::for_each(++params.begin(), params.end(), - [client](const PString& s) { PMulti::Instance().Watch(client, client->GetCurrentDB(), s); }); - - FormatOK(reply); - return kPErrorOK; -} - -PError unwatch(const std::vector& params, UnboundedBuffer* reply) { - PClient* client = PClient::Current(); - client->ClearWatch(); - FormatOK(reply); - return kPErrorOK; -} - -PError multi(const std::vector& params, UnboundedBuffer* reply) { - PClient* client = PClient::Current(); - if (PMulti::Instance().Multi(client)) { - FormatOK(reply); - } else { - reply->PushData("-ERR MULTI calls can not be nested\r\n", sizeof "-ERR MULTI calls can not be nested\r\n" - 1); - } - - return kPErrorOK; -} - -PError exec(const std::vector& params, UnboundedBuffer* reply) { - PClient* client = PClient::Current(); - if (!client->IsFlagOn(kClientFlagMulti)) { - ReplyError(kPErrorNoMulti, reply); - return kPErrorNoMulti; - } - if (!PMulti::Instance().Exec(client)) { - ReplyError(kPErrorDirtyExec, reply); - return kPErrorDirtyExec; - } - return kPErrorOK; -} - -PError discard(const std::vector& params, UnboundedBuffer* reply) { - PClient* client = PClient::Current(); - if (!client->IsFlagOn(kClientFlagMulti)) { - reply->PushData("-ERR DISCARD without MULTI\r\n", sizeof "-ERR DISCARD without MULTI\r\n" - 1); - } else { - PMulti::Instance().Discard(client); - FormatOK(reply); - } - - return kPErrorOK; -} - -} // namespace pikiwidb +// namespace pikiwidb { +// +// PMulti& PMulti::Instance() { +// static PMulti mt; +// return mt; +// } +// +// void PMulti::Watch(PClient* client, int dbno, const PString& key) { +// if (client->Watch(dbno, key)) { +// Clients& cls = clients_[dbno][key]; +// cls.push_back(std::static_pointer_cast(client->shared_from_this())); +// } +// } +// +// bool PMulti::Multi(PClient* client) { +// if (client->IsFlagOn(kClientFlagMulti)) { +// return false; +// } +// +// client->ClearMulti(); +// client->SetFlag(kClientFlagMulti); +// return true; +// } +// +// bool PMulti::Exec(PClient* client) { return client->Exec(); } +// +// void PMulti::Discard(PClient* client) { +// client->ClearMulti(); +// client->ClearWatch(); +// } +// +// void PMulti::NotifyDirty(int dbno, const PString& key) { +// auto tmpDBIter = clients_.find(dbno); +// if (tmpDBIter == clients_.end()) { +// return; +// } +// +// auto& dbWatchedKeys = tmpDBIter->second; +// auto it = dbWatchedKeys.find(key); +// if (it == dbWatchedKeys.end()) { +// return; +// } +// +// Clients& cls = it->second; +// for (auto itCli(cls.begin()); itCli != cls.end();) { +// auto client(itCli->lock()); +// if (!client) { +// WARN("Erase not exist client when notify dirty key[{}]", key); +// itCli = cls.erase(itCli); +// } else { +// if (client.get() != PClient::Current() && client->NotifyDirty(dbno, key)) { +// WARN("Erase dirty client {} when notify dirty key[{}]", client->GetName(), key); +// itCli = cls.erase(itCli); +// itCli = cls.erase(itCli); +// } else { +// ++itCli; +// } +// } +// } +// +// if (cls.empty()) { +// dbWatchedKeys.erase(it); +// } +// } +// +// void PMulti::NotifyDirtyAll(int dbno) { +// if (dbno == -1) { +// for (auto& db_set : clients_) { +// for (auto& key_clients : db_set.second) { +// std::for_each(key_clients.second.begin(), key_clients.second.end(), [&](const std::weak_ptr& wcli) { +// auto scli = wcli.lock(); +// if (scli) { +// scli->SetFlag(kClientFlagDirty); +// } +// }); +// } +// } +// } else { +// auto it = clients_.find(dbno); +// if (it != clients_.end()) { +// for (auto& key_clients : it->second) { +// std::for_each(key_clients.second.begin(), key_clients.second.end(), [&](const std::weak_ptr& wcli) { +// auto scli = wcli.lock(); +// if (scli) { +// scli->SetFlag(kClientFlagDirty); +// } +// }); +// } +// } +// } +// } +// +//// multi commands +// PError watch(const std::vector& params, UnboundedBuffer* reply) { +// PClient* client = PClient::Current(); +// if (client->IsFlagOn(kClientFlagMulti)) { +// ReplyError(kPErrorWatch, reply); +// return kPErrorWatch; +// } +// +// std::for_each(++params.begin(), params.end(), +// [client](const PString& s) { PMulti::Instance().Watch(client, client->GetCurrentDB(), s); }); +// +// FormatOK(reply); +// return kPErrorOK; +// } +// +// PError unwatch(const std::vector& params, UnboundedBuffer* reply) { +// PClient* client = PClient::Current(); +// client->ClearWatch(); +// FormatOK(reply); +// return kPErrorOK; +// } +// +// PError multi(const std::vector& params, UnboundedBuffer* reply) { +// PClient* client = PClient::Current(); +// if (PMulti::Instance().Multi(client)) { +// FormatOK(reply); +// } else { +// reply->PushData("-ERR MULTI calls can not be nested\r\n"); +// } +// +// return kPErrorOK; +// } +// +// PError exec(const std::vector& params, UnboundedBuffer* reply) { +// PClient* client = PClient::Current(); +// if (!client->IsFlagOn(kClientFlagMulti)) { +// ReplyError(kPErrorNoMulti, reply); +// return kPErrorNoMulti; +// } +// if (!PMulti::Instance().Exec(client)) { +// ReplyError(kPErrorDirtyExec, reply); +// return kPErrorDirtyExec; +// } +// return kPErrorOK; +// } +// +// PError discard(const std::vector& params, UnboundedBuffer* reply) { +// PClient* client = PClient::Current(); +// if (!client->IsFlagOn(kClientFlagMulti)) { +// reply->PushData("-ERR DISCARD without MULTI\r\n"); +// } else { +// PMulti::Instance().Discard(client); +// FormatOK(reply); +// } +// +// return kPErrorOK; +// } +// +// } // namespace pikiwidb diff --git a/src/multi.h b/src/multi.h index a5f04e2ae..8e3525303 100644 --- a/src/multi.h +++ b/src/multi.h @@ -14,31 +14,31 @@ #include "common.h" -namespace pikiwidb { - -class PClient; -class PMulti { - public: - static PMulti& Instance(); - - PMulti(const PMulti&) = delete; - void operator=(const PMulti&) = delete; - - void Watch(PClient* client, int dbno, const PString& key); - bool Multi(PClient* client); - bool Exec(PClient* client); - void Discard(PClient* client); - - void NotifyDirty(int dbno, const PString& key); - void NotifyDirtyAll(int dbno); - - private: - PMulti() {} - - using Clients = std::vector >; - using WatchedClients = std::map >; - - WatchedClients clients_; -}; - -} // namespace pikiwidb +// namespace pikiwidb { +// +// class PClient; +// class PMulti { +// public: +// static PMulti& Instance(); +// +// PMulti(const PMulti&) = delete; +// void operator=(const PMulti&) = delete; +// +// void Watch(PClient* client, int dbno, const PString& key); +// bool Multi(PClient* client); +// bool Exec(PClient* client); +// void Discard(PClient* client); +// +// void NotifyDirty(int dbno, const PString& key); +// void NotifyDirtyAll(int dbno); +// +// private: +// PMulti() {} +// +// using Clients = std::vector >; +// using WatchedClients = std::map >; +// +// WatchedClients clients_; +// }; +// +// } // namespace pikiwidb diff --git a/src/net/config_parser.h b/src/net/config_parser.h index 2b4373cff..7783ecba9 100644 --- a/src/net/config_parser.h +++ b/src/net/config_parser.h @@ -18,6 +18,8 @@ class ConfigParser { public: + using Data = std::map>; + bool Load(const char* FileName); template @@ -25,6 +27,8 @@ class ConfigParser { const std::vector& GetDataVector(const char* key) const; + const Data& GetMap() { return data_; } + #ifdef CONFIG_DEBUG void Print() { std::cout << "//////////////////" << std::endl; @@ -37,8 +41,6 @@ class ConfigParser { #endif private: - typedef std::map > Data; - Data data_; template diff --git a/src/net/delegate.h b/src/net/delegate.h index 82165740f..79c5d00ab 100644 --- a/src/net/delegate.h +++ b/src/net/delegate.h @@ -14,7 +14,7 @@ class Delegate; template class Delegate { public: - typedef Delegate Self; + using Self = Delegate; Delegate() = default; @@ -26,7 +26,7 @@ class Delegate { connect(std::forward(f)); } - Delegate(Self&& other) : funcs_(std::move(other.funcs_)) {} + Delegate(Self&& other) noexcept : funcs_(std::move(other.funcs_)) {} template Self& operator+=(F&& f) { diff --git a/src/net/event_obj.h b/src/net/event_obj.h index 810c85971..729b65264 100644 --- a/src/net/event_obj.h +++ b/src/net/event_obj.h @@ -20,9 +20,9 @@ using EventLoopSelector = std::function; class EventObject : public std::enable_shared_from_this { public: /// Constructor, printf is for debug, you can comment it - EventObject() {} + EventObject() = default; /// Destructor, printf is for debug, you can comment it - virtual ~EventObject() {} + virtual ~EventObject() = default; EventObject(const EventObject&) = delete; void operator=(const EventObject&) = delete; diff --git a/src/net/http_client.cc b/src/net/http_client.cc index 916669f41..db5c337f8 100644 --- a/src/net/http_client.cc +++ b/src/net/http_client.cc @@ -10,7 +10,7 @@ HttpClient::HttpClient() : parser_(HTTP_RESPONSE) {} void HttpClient::OnConnect(TcpConnection* conn) { assert(loop_ == conn->GetEventLoop()); - INFO("HttpClient::OnConnect to {}:{} in loop {}", conn->GetPeerIp(), conn->GetPeerPort(), loop_->GetName()); + INFO("HttpClient::OnConnect to {}:{} in loop {}", conn->GetPeerIP(), conn->GetPeerPort(), loop_->GetName()); never_connected_ = false; conn_ = std::static_pointer_cast(conn->shared_from_this()); diff --git a/src/net/http_client.h b/src/net/http_client.h index 76c203e7e..57f0fcc73 100644 --- a/src/net/http_client.h +++ b/src/net/http_client.h @@ -68,4 +68,4 @@ class HttpClient : public std::enable_shared_from_this { EventLoop* loop_ = nullptr; }; -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/net/http_server.h b/src/net/http_server.h index 9690b54f7..ad67baf3f 100644 --- a/src/net/http_server.h +++ b/src/net/http_server.h @@ -54,4 +54,4 @@ class HttpContext : public std::enable_shared_from_this { HttpServer* server_ = nullptr; }; -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/net/libevent_reactor.h b/src/net/libevent_reactor.h index 0478b9412..1f6db8686 100644 --- a/src/net/libevent_reactor.h +++ b/src/net/libevent_reactor.h @@ -17,7 +17,7 @@ namespace internal { class LibeventReactor : public Reactor { public: LibeventReactor(); - virtual ~LibeventReactor() {} + ~LibeventReactor() override = default; bool Register(EventObject* obj, int events) override; void Unregister(EventObject* obj) override; @@ -57,8 +57,8 @@ class LibeventReactor : public Reactor { explicit Object(EventObject* evobj) : ev_obj(evobj) {} ~Object() = default; - bool IsReadEnabled() const { return !!read_event.get(); } - bool IsWriteEnabled() const { return !!write_event.get(); } + bool IsReadEnabled() const { return !!read_event; } + bool IsWriteEnabled() const { return !!write_event; } std::unique_ptr read_event; std::unique_ptr write_event; diff --git a/src/net/lzf/lzf.h b/src/net/lzf/lzf.h index 1801c6457..fa4b3467d 100644 --- a/src/net/lzf/lzf.h +++ b/src/net/lzf/lzf.h @@ -72,7 +72,7 @@ * and lzf_c.c. * */ -unsigned int lzf_compress(const void *const in_data, unsigned int in_len, void *out_data, unsigned int out_len); +unsigned int lzf_compress(const void *in_data, unsigned int in_len, void *out_data, unsigned int out_len); /* * Decompress data compressed with some version of the lzf_compress @@ -89,4 +89,4 @@ unsigned int lzf_compress(const void *const in_data, unsigned int in_len, void * * * This function is very fast, about as fast as a copying loop. */ -unsigned int lzf_decompress(const void *const in_data, unsigned int in_len, void *out_data, unsigned int out_len); +unsigned int lzf_decompress(const void *in_data, unsigned int in_len, void *out_data, unsigned int out_len); diff --git a/src/net/pipe_obj.h b/src/net/pipe_obj.h index 801e8e668..6ac142f42 100644 --- a/src/net/pipe_obj.h +++ b/src/net/pipe_obj.h @@ -2,13 +2,12 @@ #include "event_obj.h" -namespace pikiwidb { -namespace internal { +namespace pikiwidb::internal { class PipeObject : public EventObject { public: PipeObject(); - ~PipeObject(); + ~PipeObject() override; PipeObject(const PipeObject&) = delete; void operator=(const PipeObject&) = delete; @@ -25,5 +24,4 @@ class PipeObject : public EventObject { int write_fd_; }; -} // end namespace internal -} // namespace pikiwidb +} // namespace pikiwidb::internal diff --git a/src/net/reactor.h b/src/net/reactor.h index 630f7acef..ad7381f8b 100644 --- a/src/net/reactor.h +++ b/src/net/reactor.h @@ -1,13 +1,14 @@ #pragma once -#include +#include +#include #include // #include "util.h" namespace pikiwidb { -typedef int64_t TimerId; +using TimerId = int64_t; class EventObject; /// Reactor interface diff --git a/src/net/tcp_connection.h b/src/net/tcp_connection.h index 7d94ae92a..5bfb7f354 100644 --- a/src/net/tcp_connection.h +++ b/src/net/tcp_connection.h @@ -29,7 +29,7 @@ using TcpDisconnectCallback = std::function; class TcpConnection : public EventObject { public: explicit TcpConnection(EventLoop* loop); - ~TcpConnection(); + ~TcpConnection() override; // init tcp object by result of ::accept void OnAccept(int fd, const std::string& peer_ip, int peer_port); @@ -56,7 +56,7 @@ class TcpConnection : public EventObject { void ResetEventLoop(EventLoop* new_loop); EventLoop* SelectSlaveEventLoop(); EventLoop* GetEventLoop() const { return loop_; } - const std::string& GetPeerIp() const { return peer_ip_; } + const std::string& GetPeerIP() const { return peer_ip_; } int GetPeerPort() const { return peer_port_; } const sockaddr_in& PeerAddr() const { return peer_addr_; } diff --git a/src/net/tcp_listener.h b/src/net/tcp_listener.h index afca5f5fe..4afc8d6f4 100644 --- a/src/net/tcp_listener.h +++ b/src/net/tcp_listener.h @@ -10,7 +10,7 @@ class EventLoop; class TcpListener : public EventObject { public: explicit TcpListener(EventLoop* loop); - ~TcpListener(); + ~TcpListener() override; bool Bind(const char* ip, int port); int Fd() const override; diff --git a/src/net/unbounded_buffer.cc b/src/net/unbounded_buffer.cc index 33aa1328b..5e51ac100 100644 --- a/src/net/unbounded_buffer.cc +++ b/src/net/unbounded_buffer.cc @@ -23,6 +23,8 @@ std::size_t UnboundedBuffer::PushData(const void* pData, std::size_t nSize) { return nBytes; } +std::size_t UnboundedBuffer::PushData(const std::string_view& data) { return PushData(data.data(), data.size()); } + std::size_t UnboundedBuffer::PushDataAt(const void* pData, std::size_t nSize, std::size_t offset) { if (!pData || nSize == 0) { return 0; @@ -128,7 +130,7 @@ void UnboundedBuffer::Swap(UnboundedBuffer& buf) { int main() { UnboundedBuffer buf; - std::size_t ret = buf.PushData("hello", 5); + std::size_t ret = buf.PushData("hello"); assert (ret == 5); char tmp[10]; @@ -138,11 +140,11 @@ int main() assert(buf.IsEmpty()); - ret = buf.PushData("world", 5); + ret = buf.PushData("world"); assert (ret == 5); - ret = buf.PushData("abcde", 5); + ret = buf.PushData("abcde"); assert (ret == 5); - ret = buf.PeekData(tmp, 5); + ret = buf.PeekData(tmp); assert(tmp[0] == 'w'); buf.Clear(); diff --git a/src/net/unbounded_buffer.h b/src/net/unbounded_buffer.h index eb8e6b9b6..f5cf37ff5 100644 --- a/src/net/unbounded_buffer.h +++ b/src/net/unbounded_buffer.h @@ -8,16 +8,18 @@ #pragma once #include +#include #include namespace pikiwidb { class UnboundedBuffer { public: - UnboundedBuffer() : readPos_(0), writePos_(0) {} + UnboundedBuffer() = default; std::size_t PushDataAt(const void* pData, std::size_t nSize, std::size_t offset = 0); std::size_t PushData(const void* pData, std::size_t nSize); + std::size_t PushData(const std::string_view& data); std::size_t Write(const void* pData, std::size_t nSize); void AdjustWritePtr(std::size_t nBytes) { writePos_ += nBytes; } diff --git a/src/net/util.h b/src/net/util.h index 92a12f76d..39827a696 100644 --- a/src/net/util.h +++ b/src/net/util.h @@ -55,17 +55,23 @@ class ThreadGuard { explicit ThreadGuard(std::thread&& t) : thread_(std::move(t)) {} ThreadGuard& operator=(std::thread&& t) { - if (&t != &thread_) thread_ = std::move(t); + if (&t != &thread_) { + thread_ = std::move(t); + } return *this; } void join() { - if (thread_.joinable()) thread_.join(); + if (thread_.joinable()) { + thread_.join(); + } } ~ThreadGuard() { - if (thread_.joinable()) thread_.join(); + if (thread_.joinable()) { + thread_.join(); + } } private: @@ -74,12 +80,12 @@ class ThreadGuard { inline std::string GetSockaddrIp(const struct sockaddr_in* addr) { char tmp[128]; - const char* ip = inet_ntop(AF_INET, &addr->sin_addr, tmp, (socklen_t)(sizeof tmp)); + const char* ip = inet_ntop(AF_INET, &addr->sin_addr, tmp, static_cast(sizeof tmp)); if (!ip) { - return std::string(); + return {}; } - return std::string(ip); + return {ip}; } inline std::string GetSockaddrIp(const struct sockaddr* sa) { @@ -137,8 +143,8 @@ inline std::string& Trim(std::string& s) { return s; } - s.erase(0, s.find_first_not_of(" ")); - s.erase(s.find_last_not_of(" ") + 1); + s.erase(0, s.find_first_not_of(' ')); + s.erase(s.find_last_not_of(' ') + 1); return s; } @@ -208,8 +214,8 @@ struct SocketAddr { std::string GetIP() const { char tmp[32]; - const char* res = inet_ntop(AF_INET, &addr_.sin_addr, tmp, (socklen_t)(sizeof tmp)); - return std::string(res); + const char* res = inet_ntop(AF_INET, &addr_.sin_addr, tmp, static_cast(sizeof tmp)); + return {res}; } uint16_t GetPort() const { return ntohs(addr_.sin_port); } diff --git a/src/pikiwidb.cc b/src/pikiwidb.cc index e0c9f5ba7..87b51747c 100644 --- a/src/pikiwidb.cc +++ b/src/pikiwidb.cc @@ -15,7 +15,6 @@ #include #include "log.h" -#include "rocksdb/db.h" #include "client.h" #include "store.h" @@ -29,6 +28,7 @@ #include "pstd_util.h" std::unique_ptr g_pikiwidb; +using namespace pikiwidb; static void IntSigHandle(const int sig) { INFO("Catch Signal {}, cleanup...", sig); @@ -63,19 +63,21 @@ bool PikiwiDB::ParseArgs(int ac, char* av[]) { cfg_file_ = av[i]; continue; } else if (strncasecmp(av[i], "-v", 2) == 0 || strncasecmp(av[i], "--version", 9) == 0) { - std::cerr << "PikiwiDB Server v=" << kPIKIWIDB_VERSION << " bits=" << (sizeof(void*) == 8 ? 64 : 32) << std::endl; + std::cerr << "PikiwiDB Server version: " << KPIKIWIDB_VERSION << " bits=" << (sizeof(void*) == 8 ? 64 : 32) + << std::endl; + std::cerr << "PikiwiDB Server Build Type: " << KPIKIWIDB_BUILD_TYPE << std::endl; + std::cerr << "PikiwiDB Server Build Date: " << KPIKIWIDB_BUILD_DATE << std::endl; + std::cerr << "PikiwiDB Server Build GIT SHA: " << KPIKIWIDB_GIT_COMMIT_ID << std::endl; exit(0); - return true; } else if (strncasecmp(av[i], "-h", 2) == 0 || strncasecmp(av[i], "--help", 6) == 0) { Usage(); exit(0); - return true; } else if (strncasecmp(av[i], "--port", 6) == 0) { if (++i == ac) { return false; } - port_ = static_cast(std::atoi(av[i])); + port_ = static_cast(std::atoi(av[i])); } else if (strncasecmp(av[i], "--loglevel", 10) == 0) { if (++i == ac) { return false; @@ -97,74 +99,8 @@ bool PikiwiDB::ParseArgs(int ac, char* av[]) { return true; } -static void PdbCron() { - // using namespace pikiwidb; - // - // if (g_qdbPid != -1) { - // return; - // } - // - // if (Now() > (g_lastPDBSave + static_cast(g_config.saveseconds)) * 1000UL) - // { - // int ret = fork(); - // if (ret == 0) { - // { - // PDBSaver qdb; - // qdb.Save(g_config.rdbfullname.c_str()); - // std::cerr << "ServerCron child save rdb done, exiting child\n"; - // } // make qdb to be destructed before exit - // _exit(0); - // } else if (ret == -1) { - // ERROR("fork qdb save process failed"); - // } else { - // g_qdbPid = ret; - // } - // - // INFO("ServerCron save rdb file {}", g_config.rdbfullname); - // } -} - -static void LoadDBFromDisk() { - // using namespace pikiwidb; - // - // PDBLoader loader; - // loader.Load(g_config.rdbfullname.c_str()); - //} - // - // static void CheckChild() { - // using namespace pikiwidb; - // - // if (g_qdbPid == -1) { - // return; - // } - // - // int statloc = 0; - // pid_t pid = wait3(&statloc, WNOHANG, nullptr); - // - // if (pid != 0 && pid != -1) { - // int exit = WEXITSTATUS(statloc); - // int signal = 0; - // - // if (WIFSIGNALED(statloc)) { - // signal = WTERMSIG(statloc); - // } - // - // if (pid == g_qdbPid) { - // PDBSaver::SaveDoneHandler(exit, signal); - // if (PREPL.IsBgsaving()) { - // PREPL.OnRdbSaveDone(); - // } else { - // PREPL.TryBgsave(); - // } - // } else { - // ERROR("{} is not rdb process", pid); - // assert(!!!"Is there any back process except rdb?"); - // } - // } -} - void PikiwiDB::OnNewConnection(pikiwidb::TcpConnection* obj) { - INFO("New connection from {}:{}", obj->GetPeerIp(), obj->GetPeerPort()); + INFO("New connection from {}:{}", obj->GetPeerIP(), obj->GetPeerPort()); auto client = std::make_shared(obj); obj->SetContext(client); @@ -174,67 +110,65 @@ void PikiwiDB::OnNewConnection(pikiwidb::TcpConnection* obj) { auto msg_cb = std::bind(&pikiwidb::PClient::HandlePackets, client.get(), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); obj->SetMessageCallback(msg_cb); - obj->SetOnDisconnect([](pikiwidb::TcpConnection* obj) { INFO("disconnect from {}", obj->GetPeerIp()); }); + obj->SetOnDisconnect([](pikiwidb::TcpConnection* obj) { + INFO("disconnect from {}", obj->GetPeerIP()); + obj->GetContext()->SetState(pikiwidb::ClientState::kClosed); + }); obj->SetNodelay(true); obj->SetEventLoopSelector([this]() { return worker_threads_.ChooseNextWorkerEventLoop(); }); obj->SetSlaveEventLoopSelector([this]() { return slave_threads_.ChooseNextWorkerEventLoop(); }); } bool PikiwiDB::Init() { - using namespace pikiwidb; - char runid[kRunidSize + 1] = ""; getRandomHexChars(runid, kRunidSize); - g_config.runid.assign(runid, kRunidSize); + g_config.Set("runid", {runid, kRunidSize}, true); if (port_ != 0) { - g_config.port = port_; + g_config.Set("port", std::to_string(port_), true); } if (!log_level_.empty()) { - g_config.loglevel = log_level_; - } - - if (!master_.empty()) { - g_config.masterIp = master_; - g_config.masterPort = master_port_; + g_config.Set("log-level", log_level_, true); } NewTcpConnectionCallback cb = std::bind(&PikiwiDB::OnNewConnection, this, std::placeholders::_1); - if (!worker_threads_.Init(g_config.ip.c_str(), g_config.port, cb)) { + if (!worker_threads_.Init(g_config.ip.ToString().c_str(), g_config.port.load(), cb)) { + ERROR("worker_threads Init failed. IP = {} Port = {}", g_config.ip.ToString(), g_config.port.load()); return false; } - auto num = g_config.worker_threads_num + g_config.slave_threads_num; + auto num = g_config.worker_threads_num.load() + g_config.slave_threads_num.load(); auto kMaxWorkerNum = IOThreadPool::GetMaxWorkerNum(); if (num > kMaxWorkerNum) { ERROR("number of threads can't exceeds {}, now is {}", kMaxWorkerNum, num); return false; } - worker_threads_.SetWorkerNum(static_cast(g_config.worker_threads_num)); - slave_threads_.SetWorkerNum(static_cast(g_config.slave_threads_num)); + worker_threads_.SetWorkerNum(static_cast(g_config.worker_threads_num.load())); + slave_threads_.SetWorkerNum(static_cast(g_config.slave_threads_num.load())); - PSTORE.Init(g_config.databases); - - // Only if there is no backend, load rdb - if (g_config.backend == pikiwidb::kBackEndNone) { - LoadDBFromDisk(); + // now we only use fast cmd thread pool + auto status = cmd_threads_.Init(g_config.fast_cmd_threads_num.load(), 0, "pikiwidb-cmd"); + if (!status.ok()) { + ERROR("init cmd thread pool failed: {}", status.ToString()); + return false; } - PSlowLog::Instance().SetThreshold(g_config.slowlogtime); - PSlowLog::Instance().SetLogLimit(static_cast(g_config.slowlogmaxlen)); + PSTORE.Init(g_config.databases); + + PSlowLog::Instance().SetThreshold(g_config.slow_log_time.load()); + PSlowLog::Instance().SetLogLimit(static_cast(g_config.slow_log_max_len.load())); // init base loop auto loop = worker_threads_.BaseLoop(); - loop->ScheduleRepeatedly(1000 / pikiwidb::g_config.hz, PdbCron); loop->ScheduleRepeatedly(1000, &PReplication::Cron, &PREPL); // master ip - if (!g_config.masterIp.empty()) { - PREPL.SetMasterAddr(g_config.masterIp.c_str(), g_config.masterPort); + if (!g_config.ip.empty()) { + PREPL.SetMasterAddr(g_config.master_ip.ToString().c_str(), g_config.master_port.load()); } - cmd_table_manager_.InitCmdTable(); + // cmd_table_manager_.InitCmdTable(); return true; } @@ -243,6 +177,8 @@ void PikiwiDB::Run() { worker_threads_.SetName("pikiwi-main"); slave_threads_.SetName("pikiwi-slave"); + cmd_threads_.Start(); + std::thread t([this]() { auto slave_loop = slave_threads_.BaseLoop(); slave_loop->Init(); @@ -251,16 +187,19 @@ void PikiwiDB::Run() { worker_threads_.Run(0, nullptr); - t.join(); // wait for slave thread exit + if (t.joinable()) { + t.join(); // wait for slave thread exit + } INFO("server exit running"); } void PikiwiDB::Stop() { slave_threads_.Exit(); worker_threads_.Exit(); + cmd_threads_.Stop(); } -pikiwidb::CmdTableManager& PikiwiDB::GetCmdTableManager() { return cmd_table_manager_; } +// pikiwidb::CmdTableManager& PikiwiDB::GetCmdTableManager() { return cmd_table_manager_; } static void InitLogs() { logger::Init("logs/pikiwidb_server.log"); @@ -291,7 +230,6 @@ static void closeStd() { } int main(int ac, char* av[]) { - [[maybe_unused]] rocksdb::DB* db; g_pikiwidb = std::make_unique(); if (!g_pikiwidb->ParseArgs(ac - 1, av + 1)) { @@ -300,7 +238,7 @@ int main(int ac, char* av[]) { } if (!g_pikiwidb->GetConfigName().empty()) { - if (!LoadPikiwiDBConfig(g_pikiwidb->GetConfigName().c_str(), pikiwidb::g_config)) { + if (!g_config.LoadFromFile(g_pikiwidb->GetConfigName())) { std::cerr << "Load config file [" << g_pikiwidb->GetConfigName() << "] failed!\n"; return -1; } @@ -308,11 +246,11 @@ int main(int ac, char* av[]) { // output logo to console char logo[512] = ""; - snprintf(logo, sizeof logo - 1, pikiwidbLogo, kPIKIWIDB_VERSION, static_cast(sizeof(void*)) * 8, - static_cast(pikiwidb::g_config.port)); + snprintf(logo, sizeof logo - 1, pikiwidbLogo, KPIKIWIDB_VERSION, static_cast(sizeof(void*)) * 8, + static_cast(g_config.port)); std::cout << logo; - if (pikiwidb::g_config.daemonize) { + if (g_config.daemonize.load()) { daemonize(); } @@ -320,7 +258,7 @@ int main(int ac, char* av[]) { SignalSetup(); InitLogs(); - if (pikiwidb::g_config.daemonize) { + if (g_config.daemonize.load()) { closeStd(); } diff --git a/src/pikiwidb.h b/src/pikiwidb.h index 4dfdb685b..5201fb875 100644 --- a/src/pikiwidb.h +++ b/src/pikiwidb.h @@ -6,12 +6,19 @@ */ #include "cmd_table_manager.h" +#include "cmd_thread_pool.h" #include "common.h" #include "event_loop.h" #include "io_thread_pool.h" #include "tcp_connection.h" -#define kPIKIWIDB_VERSION "4.0.0" +#define KPIKIWIDB_VERSION "4.0.0" + +#ifdef BUILD_DEBUG +# define KPIKIWIDB_BUILD_TYPE "DEBUG" +#else +# define KPIKIWIDB_BUILD_TYPE "RELEASE" +#endif class PikiwiDB final { public: @@ -28,7 +35,12 @@ class PikiwiDB final { void OnNewConnection(pikiwidb::TcpConnection* obj); - pikiwidb::CmdTableManager& GetCmdTableManager(); + // pikiwidb::CmdTableManager& GetCmdTableManager(); + uint32_t GetCmdID() { return ++cmd_id_; }; + + void SubmitFast(const std::shared_ptr& runner) { cmd_threads_.SubmitFast(runner); } + + void PushWriteTask(const std::shared_ptr& client) { worker_threads_.PushWriteTask(client); } public: PString cfg_file_; @@ -41,9 +53,12 @@ class PikiwiDB final { static const uint32_t kRunidSize; private: - pikiwidb::IOThreadPool worker_threads_; + pikiwidb::WorkIOThreadPool worker_threads_; pikiwidb::IOThreadPool slave_threads_; - pikiwidb::CmdTableManager cmd_table_manager_; + pikiwidb::CmdThreadPool cmd_threads_; + // pikiwidb::CmdTableManager cmd_table_manager_; + + uint32_t cmd_id_ = 0; }; extern std::unique_ptr g_pikiwidb; diff --git a/src/pstd/lock_mgr.h b/src/pstd/lock_mgr.h index c84fa0715..59a30ef17 100644 --- a/src/pstd/lock_mgr.h +++ b/src/pstd/lock_mgr.h @@ -13,9 +13,7 @@ #include "mutex.h" #include "noncopyable.h" -namespace pstd { - -namespace lock { +namespace pstd::lock { struct LockMap; struct LockMapStripe; @@ -52,5 +50,4 @@ class LockMgr : public pstd::noncopyable { void UnLockKey(const std::string& key, const std::shared_ptr& stripe); }; -} // namespace lock -} // namespace pstd +} // namespace pstd::lock diff --git a/src/pstd/memory_file.cc b/src/pstd/memory_file.cc index c08feb4c9..dae9f645b 100644 --- a/src/pstd/memory_file.cc +++ b/src/pstd/memory_file.cc @@ -27,7 +27,7 @@ InputMemoryFile::InputMemoryFile() : file_(kInvalidFile), pMemory_(kInvalidAddr) InputMemoryFile::~InputMemoryFile() { Close(); } -bool InputMemoryFile::_MapReadOnly() { +bool InputMemoryFile::MapReadOnly() { assert(file_ != kInvalidFile); assert(size_ == 0); @@ -35,7 +35,7 @@ bool InputMemoryFile::_MapReadOnly() { fstat(file_, &st); size_ = st.st_size; - pMemory_ = (char*)::mmap(0, size_, PROT_READ, MAP_PRIVATE, file_, 0); + pMemory_ = static_cast(::mmap(nullptr, size_, PROT_READ, MAP_PRIVATE, file_, 0)); return pMemory_ != kInvalidAddr; } @@ -51,7 +51,7 @@ bool InputMemoryFile::Open(const char* file) { } offset_ = 0; - return _MapReadOnly(); + return MapReadOnly(); } void InputMemoryFile::Close() { @@ -91,7 +91,7 @@ OutputMemoryFile::OutputMemoryFile() : file_(kInvalidFile), pMemory_(kInvalidAdd OutputMemoryFile::~OutputMemoryFile() { Close(); } -void OutputMemoryFile::_ExtendFileSize(size_t size) { +void OutputMemoryFile::ExtendFileSize(size_t size) { assert(file_ != kInvalidFile); if (size > size_) { @@ -123,7 +123,7 @@ bool OutputMemoryFile::Open(const char* file, bool bAppend) { } ::ftruncate(file_, size_); - return _MapWriteOnly(); + return MapWriteOnly(); } void OutputMemoryFile::Close() { @@ -155,7 +155,7 @@ bool OutputMemoryFile::Sync() { return true; } -bool OutputMemoryFile::_MapWriteOnly() { +bool OutputMemoryFile::MapWriteOnly() { if (size_ == 0 || file_ == kInvalidFile) { return false; } @@ -166,7 +166,7 @@ bool OutputMemoryFile::_MapWriteOnly() { ::munmap(m_pMemory, m_size); } #endif - pMemory_ = (char*)::mmap(0, size_, PROT_WRITE, MAP_SHARED, file_, 0); + pMemory_ = static_cast(::mmap(nullptr, size_, PROT_WRITE, MAP_SHARED, file_, 0)); return (pMemory_ != kInvalidAddr); } @@ -182,7 +182,7 @@ void OutputMemoryFile::Truncate(std::size_t size) { offset_ = size_; } - _MapWriteOnly(); + MapWriteOnly(); } void OutputMemoryFile::TruncateTailZero() { @@ -191,8 +191,9 @@ void OutputMemoryFile::TruncateTailZero() { } size_t tail = size_; - while (tail > 0 && pMemory_[--tail] == '\0') + while (tail > 0 && pMemory_[--tail] == '\0') { ; + } ++tail; @@ -203,14 +204,14 @@ bool OutputMemoryFile::IsOpen() const { return file_ != kInvalidFile; } // consumer void OutputMemoryFile::Write(const void* data, size_t len) { - _AssureSpace(len); + AssureSpace(len); ::memcpy(pMemory_ + offset_, data, len); offset_ += len; assert(offset_ <= size_); } -void OutputMemoryFile::_AssureSpace(size_t size) { +void OutputMemoryFile::AssureSpace(size_t size) { size_t newSize = size_; while (offset_ + size > newSize) { @@ -221,7 +222,7 @@ void OutputMemoryFile::_AssureSpace(size_t size) { } } - _ExtendFileSize(newSize); + ExtendFileSize(newSize); } -} // namespace pstd \ No newline at end of file +} // namespace pstd diff --git a/src/pstd/memory_file.h b/src/pstd/memory_file.h index 1abd057fd..893f3c753 100644 --- a/src/pstd/memory_file.h +++ b/src/pstd/memory_file.h @@ -27,12 +27,12 @@ class InputMemoryFile { bool IsOpen() const; private: - bool _MapReadOnly(); + bool MapReadOnly(); - int file_; - char* pMemory_; - std::size_t offset_; - std::size_t size_; + int file_ = -1; + char* pMemory_ = nullptr; + std::size_t offset_ = 0; + std::size_t size_ = 0; }; template @@ -65,15 +65,15 @@ class OutputMemoryFile { bool IsOpen() const; private: - bool _MapWriteOnly(); - void _ExtendFileSize(std::size_t size); - void _AssureSpace(std::size_t size); - - int file_; - char* pMemory_; - std::size_t offset_; - std::size_t size_; - std::size_t syncPos_; + bool MapWriteOnly(); + void ExtendFileSize(std::size_t size); + void AssureSpace(std::size_t size); + + int file_ = -1; + char* pMemory_ = nullptr; + std::size_t offset_ = 0; + std::size_t size_ = 0; + std::size_t syncPos_ = 0; }; template @@ -81,4 +81,4 @@ inline void OutputMemoryFile::Write(const T& t) { this->Write(&t, sizeof t); } -} // namespace pstd \ No newline at end of file +} // namespace pstd diff --git a/src/pstd/mutex_impl.h b/src/pstd/mutex_impl.h index b2008903c..4c2e2afb4 100644 --- a/src/pstd/mutex_impl.h +++ b/src/pstd/mutex_impl.h @@ -11,13 +11,11 @@ #include -namespace pstd { -namespace lock { +namespace pstd::lock { // Default implementation of MutexFactory. class MutexFactoryImpl : public MutexFactory { public: std::shared_ptr AllocateMutex() override; std::shared_ptr AllocateCondVar() override; }; -} // namespace lock -} // namespace pstd +} // namespace pstd::lock diff --git a/src/pstd/pikiwidb_slot.cc b/src/pstd/pikiwidb_slot.cc index bfaddca66..f8cba7695 100644 --- a/src/pstd/pikiwidb_slot.cc +++ b/src/pstd/pikiwidb_slot.cc @@ -50,4 +50,4 @@ uint32_t GetSlotsID(const std::string &str, uint32_t *pcrc, int *phastag) { *phastag = hastag; } return static_cast(crc); -} \ No newline at end of file +} diff --git a/src/pstd/pikiwidb_slot.h b/src/pstd/pikiwidb_slot.h index 4892590ca..09c615819 100644 --- a/src/pstd/pikiwidb_slot.h +++ b/src/pstd/pikiwidb_slot.h @@ -6,7 +6,7 @@ #ifndef PIKIWIDB_SLOT_H_ #define PIKIWIDB_SLOT_H_ -#include +#include #include #include @@ -16,4 +16,4 @@ uint32_t GetSlotID(const std::string& str); // get db instance number of the key uint32_t GetSlotsID(const std::string& str, uint32_t* pcrc, int* phastag); -#endif \ No newline at end of file +#endif diff --git a/src/pstd/pstd_hash.cc b/src/pstd/pstd_hash.cc old mode 100755 new mode 100644 index a492515a4..d846ef4f1 --- a/src/pstd/pstd_hash.cc +++ b/src/pstd/pstd_hash.cc @@ -629,4 +629,4 @@ std::string md5(const std::string& str, bool raw) { return md5.hexdigest(); } -} // namespace pstd \ No newline at end of file +} // namespace pstd diff --git a/src/pstd/pstd_hash.h b/src/pstd/pstd_hash.h old mode 100755 new mode 100644 index abf9f0a0d..7c5ec55fb --- a/src/pstd/pstd_hash.h +++ b/src/pstd/pstd_hash.h @@ -85,4 +85,4 @@ std::string sha256(const std::string& input, bool raw = false); bool isSha256(const std::string& input); -} // namespace pstd \ No newline at end of file +} // namespace pstd diff --git a/src/pstd/pstd_slice.h b/src/pstd/pstd_slice.h old mode 100755 new mode 100644 index 16febd2aa..f487af359 --- a/src/pstd/pstd_slice.h +++ b/src/pstd/pstd_slice.h @@ -70,7 +70,7 @@ class Slice { } // Return a string that contains the copy of the referenced data. - std::string ToString() const { return std::string(data_, size_); } + std::string ToString() const { return {data_, size_}; } // Three-way comparison. Returns value: // < 0 iff "*this" < "b", @@ -108,4 +108,4 @@ inline int Slice::compare(const Slice& b) const { return r; } -} // namespace pstd \ No newline at end of file +} // namespace pstd diff --git a/src/pstd/pstd_status.cc b/src/pstd/pstd_status.cc old mode 100755 new mode 100644 index c198b5aa0..7b9ed3afd --- a/src/pstd/pstd_status.cc +++ b/src/pstd/pstd_status.cc @@ -92,4 +92,4 @@ std::string Status::ToString() const { } } -} // namespace pstd \ No newline at end of file +} // namespace pstd diff --git a/src/pstd/pstd_status.h b/src/pstd/pstd_status.h old mode 100755 new mode 100644 index 49051a45e..f0560c18c --- a/src/pstd/pstd_status.h +++ b/src/pstd/pstd_status.h @@ -126,4 +126,4 @@ inline void Status::operator=(const Status& s) { } } -} // namespace pstd \ No newline at end of file +} // namespace pstd diff --git a/src/pstd/pstd_string.cc b/src/pstd/pstd_string.cc old mode 100755 new mode 100644 index c10e99255..165b1d5d8 --- a/src/pstd/pstd_string.cc +++ b/src/pstd/pstd_string.cc @@ -619,4 +619,37 @@ bool StringHasSpaces(const std::string& str) { return std::count_if(str.begin(), str.end(), [](unsigned char c) { return std::isspace(c); }); } -} // namespace pstd \ No newline at end of file +bool IsValidNumber(const std::string& str) { + size_t slen = str.size(); + if (slen == 0 || slen > 20 || (str[0] != '-' && !isdigit(str[0]))) { + return false; + } + + size_t pos = 0; + if (str[0] == '-') { + if (slen == 1) { + return false; // "-" is not a valid number + } + pos = 1; // skip the sign + } + + // "0", "-0" is a valid number, but "01", "001", etc. are not + if (str[pos] == '0' && slen > pos + 1) { + return false; + } + + for (; pos < slen; ++pos) { + if (!isdigit(str[pos])) { + return false; + } + } + + // @jettcc + // If this method is used to determine whether a numeric string is valid, + // it should consider whether the string exceeds the range of int64, + // that is, the string should be a valid long long number. + + return true; +} + +} // namespace pstd diff --git a/src/pstd/pstd_string.h b/src/pstd/pstd_string.h index ed8411bb4..d1ccb14fb 100755 --- a/src/pstd/pstd_string.h +++ b/src/pstd/pstd_string.h @@ -93,4 +93,6 @@ std::string RandomStringWithNumber(size_t len); bool StringHasSpaces(const std::string& str); +bool IsValidNumber(const std::string& str); + } // namespace pstd diff --git a/src/pstd/pstd_util.cc b/src/pstd/pstd_util.cc index 8af2b8ded..e9458d01c 100644 --- a/src/pstd/pstd_util.cc +++ b/src/pstd/pstd_util.cc @@ -28,4 +28,4 @@ double RandomDouble() { return dis(gen); } -} // namespace pstd \ No newline at end of file +} // namespace pstd diff --git a/src/pstd/pstd_util.h b/src/pstd/pstd_util.h index 96b7e4cca..ce998aae0 100644 --- a/src/pstd/pstd_util.h +++ b/src/pstd/pstd_util.h @@ -63,4 +63,4 @@ inline int64_t UnixNanoTimestamp() { .count(); } -} // namespace pstd \ No newline at end of file +} // namespace pstd diff --git a/src/pstd/tests/pstd_string_test.cc b/src/pstd/tests/pstd_string_test.cc index 206278110..4bf0d589e 100644 --- a/src/pstd/tests/pstd_string_test.cc +++ b/src/pstd/tests/pstd_string_test.cc @@ -146,4 +146,4 @@ TEST_F(StringTest, test_string2l) { int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); -} \ No newline at end of file +} diff --git a/src/pstd/thread_pool.cc b/src/pstd/thread_pool.cc index eed946353..72a30edd8 100644 --- a/src/pstd/thread_pool.cc +++ b/src/pstd/thread_pool.cc @@ -12,7 +12,7 @@ namespace pstd { thread_local bool ThreadPool::working_ = true; ThreadPool::ThreadPool() : waiters_(0), shutdown_(false) { - monitor_ = std::thread([this]() { this->_MonitorRoutine(); }); + monitor_ = std::thread([this]() { this->MonitorRoutine(); }); maxIdleThread_ = std::max(1U, std::thread::hardware_concurrency()); pendingStopSignal_ = 0; } @@ -52,12 +52,12 @@ void ThreadPool::JoinAll() { } } -void ThreadPool::_CreateWorker() { - std::thread t([this]() { this->_WorkerRoutine(); }); +void ThreadPool::CreateWorker() { + std::thread t([this]() { this->WorkerRoutine(); }); worker_threads_.push_back(std::move(t)); } -void ThreadPool::_WorkerRoutine() { +void ThreadPool::WorkerRoutine() { working_ = true; while (working_) { @@ -85,7 +85,7 @@ void ThreadPool::_WorkerRoutine() { --pendingStopSignal_; } -void ThreadPool::_MonitorRoutine() { +void ThreadPool::MonitorRoutine() { while (!shutdown_) { std::this_thread::sleep_for(std::chrono::seconds(1)); @@ -100,11 +100,11 @@ void ThreadPool::_MonitorRoutine() { nw -= pendingStopSignal_; while (nw-- > maxIdleThread_) { - tasks_.push_back([this]() { working_ = false; }); + tasks_.emplace_back([this]() { working_ = false; }); cond_.notify_one(); ++pendingStopSignal_; } } } -} // namespace pstd \ No newline at end of file +} // namespace pstd diff --git a/src/pstd/thread_pool.h b/src/pstd/thread_pool.h index 4f1bbd931..77f188f4f 100644 --- a/src/pstd/thread_pool.h +++ b/src/pstd/thread_pool.h @@ -33,9 +33,9 @@ class ThreadPool final { void SetMaxIdleThread(unsigned int m); private: - void _CreateWorker(); - void _WorkerRoutine(); - void _MonitorRoutine(); + void CreateWorker(); + void WorkerRoutine(); + void MonitorRoutine(); std::thread monitor_; std::atomic maxIdleThread_; @@ -68,7 +68,7 @@ auto ThreadPool::ExecuteTask(F&& f, Args&&... args) -> std::future std::futureget_future(); } -} // namespace pstd \ No newline at end of file +} // namespace pstd diff --git a/src/pubsub.h b/src/pubsub.h index 256cc19db..a9309c4fd 100644 --- a/src/pubsub.h +++ b/src/pubsub.h @@ -34,7 +34,7 @@ class PPubsub { std::size_t PUnSubscribe(PClient* client, const PString& pchannel); // introspect - void PubsubChannels(std::vector& res, const char* pattern = 0) const; + void PubsubChannels(std::vector& res, const char* pattern = nullptr) const; std::size_t PubsubNumsub(const PString& channel) const; std::size_t PubsubNumpat() const; @@ -42,7 +42,7 @@ class PPubsub { void RecycleClients(PString& startChannel, PString& startPattern); private: - PPubsub() {} + PPubsub() = default; using Clients = std::set, std::owner_less > >; using ChannelClients = std::map; diff --git a/src/replication.cc b/src/replication.cc index 8dcdac7ad..3c10aac92 100644 --- a/src/replication.cc +++ b/src/replication.cc @@ -13,6 +13,7 @@ #include "log.h" #include "net/util.h" #include "pikiwidb.h" +#include "pstd/pstd_string.h" #include "replication.h" namespace pikiwidb { @@ -59,7 +60,7 @@ void PReplication::OnRdbSaveDone() { if (cli->GetSlaveInfo()->state == kPSlaveStateWaitBgsaveEnd) { cli->GetSlaveInfo()->state = kPSlaveStateOnline; - if (!rdb.IsOpen() && !rdb.Open(g_config.rdbfullname.c_str())) { + if (!rdb.IsOpen()) { ERROR("can not open rdb when replication\n"); return; // fatal error; } @@ -174,7 +175,7 @@ void PReplication::Cron() { if (masterInfo_.addr.IsValid()) { switch (masterInfo_.state) { case kPReplStateNone: { - if (masterInfo_.addr.GetIP() == g_config.ip && masterInfo_.addr.GetPort() == g_config.port) { + if (masterInfo_.addr.GetIP() == g_config.ip.ToString() && masterInfo_.addr.GetPort() == g_config.port) { ERROR("Fix config, master addr is self addr!"); assert(!!!"wrong config for master addr"); } @@ -207,14 +208,14 @@ void PReplication::Cron() { } break; case kPReplStateConnected: - if (!g_config.masterauth.empty()) { + if (!g_config.master_auth.empty()) { if (auto master = master_.lock()) { UnboundedBuffer req; - req.PushData("auth ", 5); - req.PushData(g_config.masterauth.data(), g_config.masterauth.size()); - req.PushData("\r\n", 2); + req.PushData("auth "); + req.PushData(g_config.master_auth.ToString().data(), g_config.master_auth.ToString().size()); + req.PushData("\r\n"); master->SendPacket(req); - INFO("send auth with password {}", g_config.masterauth); + INFO("send auth with password {}", g_config.master_auth.ToString()); masterInfo_.state = kPReplStateWaitAuth; break; @@ -231,11 +232,11 @@ void PReplication::Cron() { } else if (master->GetAuth()) { // send replconf char req[128]; - auto len = snprintf(req, sizeof req - 1, "replconf listening-port %hu\r\n", g_config.port); + auto len = snprintf(req, sizeof req - 1, "replconf listening-port %hu\r\n", g_config.port.load()); master->SendPacket(req, len); masterInfo_.state = kPReplStateWaitReplconf; - INFO("Send replconf listening-port {}", g_config.port); + INFO("Send replconf listening-port {}", g_config.port.load()); } else { WARN("Haven't auth to master yet, or check masterauth password"); } @@ -317,7 +318,7 @@ PReplState PReplication::GetMasterState() const { return masterInfo_.state; } SocketAddr PReplication::GetMasterAddr() const { return masterInfo_.addr; } -void PReplication::SetMasterAddr(const char* ip, unsigned short port) { +void PReplication::SetMasterAddr(const char* ip, uint16_t port) { if (ip) { masterInfo_.addr.Init(ip, port); } else { @@ -338,7 +339,7 @@ PError replconf(const std::vector& params, UnboundedBuffer* reply) { for (size_t i = 1; i < params.size(); i += 2) { if (strncasecmp(params[i].c_str(), "listening-port", 14) == 0) { long port; - if (!TryStr2Long(params[i + 1].c_str(), params[i + 1].size(), port)) { + if (!pstd::String2int(params[i + 1].c_str(), params[i + 1].size(), &port)) { ReplyError(kPErrorParam, reply); return kPErrorParam; } @@ -350,7 +351,7 @@ PError replconf(const std::vector& params, UnboundedBuffer* reply) { info = client->GetSlaveInfo(); PREPL.AddSlave(client); } - info->listenPort = static_cast(port); + info->listenPort = static_cast(port); } else { break; } @@ -413,7 +414,7 @@ void PReplication::OnInfoCommand(UnboundedBuffer& res) { } if (!res.IsEmpty()) { - res.PushData("\r\n", 2); + res.PushData("\r\n"); } res.PushData(buf, n); @@ -429,8 +430,8 @@ PError slaveof(const std::vector& params, UnboundedBuffer* reply) { PREPL.SetMasterAddr(nullptr, 0); } else { long tmpPort = 0; - Strtol(params[2].c_str(), params[2].size(), &tmpPort); - unsigned short port = static_cast(tmpPort); + pstd::String2int(params[2].c_str(), params[2].size(), &tmpPort); + uint16_t port = static_cast(tmpPort); SocketAddr reqMaster(params[1].c_str(), port); diff --git a/src/replication.h b/src/replication.h index 142656788..9d390f8b4 100644 --- a/src/replication.h +++ b/src/replication.h @@ -68,7 +68,7 @@ enum PSlaveState { struct PSlaveInfo { PSlaveState state; - unsigned short listenPort; // slave listening port + uint16_t listenPort; // slave listening port PSlaveInfo() : state(kPSlaveStateNone), listenPort(0) {} }; @@ -96,7 +96,7 @@ struct PMasterInfo { PMasterInfo() { state = kPReplStateNone; downSince = 0; - rdbSize = std::size_t(-1); + rdbSize = static_cast(-1); rdbRecved = 0; } }; @@ -129,7 +129,7 @@ class PReplication { void SaveTmpRdb(const char* data, std::size_t& len); void SetMaster(const std::shared_ptr& cli); void SetMasterState(PReplState s); - void SetMasterAddr(const char* ip, unsigned short port); + void SetMasterAddr(const char* ip, uint16_t port); void SetRdbSize(std::size_t s); PReplState GetMasterState() const; SocketAddr GetMasterAddr() const; diff --git a/src/slow_log.h b/src/slow_log.h index cda749adf..255ee5666 100644 --- a/src/slow_log.h +++ b/src/slow_log.h @@ -22,7 +22,7 @@ struct SlowLogItem { SlowLogItem() : used(0) {} - SlowLogItem(SlowLogItem&& item) : used(item.used), cmds(std::move(item.cmds)) {} + SlowLogItem(SlowLogItem&& item) noexcept : used(item.used), cmds(std::move(item.cmds)) {} }; class PSlowLog { diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index 5203e3bb3..c1a0daf16 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -58,8 +58,8 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ rocksdb::BlockBasedTableOptions table_ops(storage_options.table_options); table_ops.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); + // Set up separate configuration for RocksDB rocksdb::DBOptions db_ops(storage_options.options); - db_ops.create_missing_column_families = true; // string column-family options rocksdb::ColumnFamilyOptions string_cf_ops(storage_options.options); diff --git a/src/store.cc b/src/store.cc index bd2b6c684..22cf42001 100644 --- a/src/store.cc +++ b/src/store.cc @@ -20,20 +20,13 @@ PStore& PStore::Instance() { } void PStore::Init(int dbNum) { - if (g_config.backend == kBackEndNone) { - return; - } - backends_.reserve(dbNum); - - if (g_config.backend == kBackEndRocksDB) { - for (int i = 0; i < dbNum; i++) { - auto db = std::make_unique(i, g_config.dbpath); - backends_.push_back(std::move(db)); - } - } else { - ERROR("unsupport backend!"); + for (int i = 0; i < dbNum; i++) { + auto db = std::make_unique(i, g_config.db_path); + backends_.push_back(std::move(db)); + INFO("Open DB_{} success!", i); } + INFO("STORE Init success!"); } } // namespace pikiwidb diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 000000000..96abac0bf --- /dev/null +++ b/tests/README.md @@ -0,0 +1,4 @@ +### PikiwiDB test + +* 在 PikiwiDB 目录下执行 `./pikatests.sh geo` 测试PikiwiDB GEO命令 +* 如果是`unit/type`接口, 例如 SET, 执行 `./pikiwidbtests.sh type/set` 测试PikiwiDB SET命令 \ No newline at end of file diff --git a/tests/admin_test.go b/tests/admin_test.go index fff95789e..348cd999a 100644 --- a/tests/admin_test.go +++ b/tests/admin_test.go @@ -68,6 +68,26 @@ var _ = Describe("Admin", Ordered, func() { Expect(client.Info(ctx).Val()).NotTo(Equal("FooBar")) }) + It("Cmd Shutdown", func() { + Expect(client.Shutdown(ctx).Err()).NotTo(HaveOccurred()) + + // PikiwiDB does not support the Ping command right now + // wait for 5 seconds and then ping server + // time.Sleep(5 * time.Second) + // Expect(client.Ping(ctx).Err()).To(HaveOccurred()) + + // restart server + config := util.GetConfPath(false, 0) + s = util.StartServer(config, map[string]string{"port": strconv.Itoa(7777)}, true) + Expect(s).NotTo(Equal(nil)) + + // PikiwiDB does not support the Ping command right now + // wait for 5 seconds and then ping server + // time.Sleep(5 * time.Second) + // client = s.NewClient() + // Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred()) + }) + It("Cmd Select", func() { var outRangeNumber = 100 @@ -102,4 +122,34 @@ var _ = Describe("Admin", Ordered, func() { Expect(eDel).NotTo(HaveOccurred()) Expect(rDel).To(Equal(int64(1))) }) + + It("Cmd Config", func() { + res := client.ConfigGet(ctx, "timeout") + Expect(res.Err()).NotTo(HaveOccurred()) + Expect(res.Val()).To(Equal(map[string]string{"timeout": "0"})) + + res = client.ConfigGet(ctx, "daemonize") + Expect(res.Err()).NotTo(HaveOccurred()) + Expect(res.Val()).To(Equal(map[string]string{"daemonize": "no"})) + + resSet := client.ConfigSet(ctx, "timeout", "60") + Expect(resSet.Err()).NotTo(HaveOccurred()) + Expect(resSet.Val()).To(Equal("OK")) + + resSet = client.ConfigSet(ctx, "daemonize", "yes") + Expect(resSet.Err()).To(MatchError("ERR Invalid Argument")) + + res = client.ConfigGet(ctx, "timeout") + Expect(res.Err()).NotTo(HaveOccurred()) + Expect(res.Val()).To(Equal(map[string]string{"timeout": "60"})) + + res = client.ConfigGet(ctx, "time*") + Expect(res.Err()).NotTo(HaveOccurred()) + Expect(res.Val()).To(Equal(map[string]string{"timeout": "60"})) + }) + + It("PING", func() { + ping := client.Ping(ctx) + Expect(ping.Err()).NotTo(HaveOccurred()) + }) }) diff --git a/tests/assets/default.conf b/tests/assets/default.conf new file mode 100644 index 000000000..f80c86d5c --- /dev/null +++ b/tests/assets/default.conf @@ -0,0 +1,351 @@ +# PikiwiDB configuration file example + +# By default PikiwiDB does not run as a daemon. Use 'yes' if you need it. +daemonize no + +# Accept connections on the specified port, default is 9221. +# port 0 is not permitted. +port 9221 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +# bind 127.0.0.1 + + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# Directory to store the data of PikiwiDB. +db-path ./db/ + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel warning + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 3 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +#save 900 1 +#save 300 10 +#save 60000 1000000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes # not support + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes # PikiwiDB always use compression for rdb file + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes # PikiwiDB always check sum for rdb file + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +# slaveof 127.0.0.1 6379 + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth foobar + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +# slave-serve-stale-data yes # not support yet + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes # PikiwiDB always set slave read only + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# Limit the maximum number of bytes returned to the client, currently only the hgetall command will be restricted +# By default the size is 1073741824. +# max-client-response-size 1073741824 + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 # not support yet + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +#requirepass foobar + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +maxmemory 999999999999 + +# MAXMEMORY POLICY: how PikiwiDB will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# + +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# noeviction -> don't expire at all, just return an error on write operations +# The default is: +# +maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default PikiwiDB will check 5 keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +maxmemory-samples 5 + + +################################ THREADED I/O ################################# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. +# +worker-threads 2 +slave-threads 2 + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +#lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +############################### ADVANCED CONFIG ############################### + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeot, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are perforemd with the same frequency, but Redis checks for +# tasks to perform accordingly to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 +############################### BACKENDS CONFIG ############################### +# PikiwiDB is a in memory database, though it has aof and rdb for dump data to disk, it +# is very limited. Try use leveldb for real storage, pikiwidb as cache. The cache algorithm +# is like linux page cache, please google or read your favorite linux book +# 0 is default, no backend +# 1 is RocksDB, currently only support RocksDB +backend 1 +backendpath dump +# the frequency of dump to backend per second +backendhz 10 +# the rocksdb number per db +db-instance-num 5 +# default 86400 * 7 +rocksdb-ttl-second 604800 +# default 86400 * 3 +rocksdb-periodic-second 259200 diff --git a/tests/assets/encodings.rdb b/tests/assets/encodings.rdb new file mode 100644 index 000000000..9fd9b705d Binary files /dev/null and b/tests/assets/encodings.rdb differ diff --git a/tests/assets/hash-zipmap.rdb b/tests/assets/hash-zipmap.rdb new file mode 100644 index 000000000..27a42ed4b Binary files /dev/null and b/tests/assets/hash-zipmap.rdb differ diff --git a/tests/hash_test.go b/tests/hash_test.go index caf90762f..7dffb6306 100644 --- a/tests/hash_test.go +++ b/tests/hash_test.go @@ -229,7 +229,7 @@ var _ = Describe("Hash", Ordered, func() { hGet := client.HGet(ctx, "hash", "key") Expect(hGet.Err()).NotTo(HaveOccurred()) Expect(hGet.Val()).To(Equal("hello")) - }) + }) It("should HIncrBy", func() { hSet := client.HSet(ctx, "hash", "key", "5") @@ -331,4 +331,34 @@ var _ = Describe("Hash", Ordered, func() { res1 := client.HRandField(ctx, "not_exist_key", 1).Val() Expect(len(res1)).To(Equal(0)) }) + + It("should HExists", func() { + hSet := client.HSet(ctx, "hash", "key", "hello") + Expect(hSet.Err()).NotTo(HaveOccurred()) + + hExists := client.HExists(ctx, "hash", "key") + Expect(hExists.Err()).NotTo(HaveOccurred()) + Expect(hExists.Val()).To(Equal(true)) + + hExists = client.HExists(ctx, "hash", "key1") + Expect(hExists.Err()).NotTo(HaveOccurred()) + Expect(hExists.Val()).To(Equal(false)) + }) + + It("should HScan", func() { + hSet := client.HSet(ctx, "hScanTest", "key1", "value1") + Expect(hSet.Err()).NotTo(HaveOccurred()) + + hSet = client.HSet(ctx, "hScanTest", "key2", "value2") + Expect(hSet.Err()).NotTo(HaveOccurred()) + + hSet = client.HSet(ctx, "hScanTest", "key3", "value3") + Expect(hSet.Err()).NotTo(HaveOccurred()) + + hScan := client.HScan(ctx, "hScanTest", 0, "key*", 3) + Expect(hScan.Err()).NotTo(HaveOccurred()) + keys, cursor := hScan.Val() + Expect(cursor).To(Equal(uint64(0))) + Expect(keys).To(ConsistOf([]string{"key1", "value1", "key2", "value2", "key3", "value3"})) + }) }) diff --git a/tests/helpers/bg_complex_data.tcl b/tests/helpers/bg_complex_data.tcl new file mode 100644 index 000000000..dffd7c668 --- /dev/null +++ b/tests/helpers/bg_complex_data.tcl @@ -0,0 +1,10 @@ +source tests/support/redis.tcl +source tests/support/util.tcl + +proc bg_complex_data {host port db ops} { + set r [redis $host $port] + $r select $db + createComplexDataset $r $ops +} + +bg_complex_data [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3] diff --git a/tests/helpers/gen_write_load.tcl b/tests/helpers/gen_write_load.tcl new file mode 100644 index 000000000..6d1a34516 --- /dev/null +++ b/tests/helpers/gen_write_load.tcl @@ -0,0 +1,15 @@ +source tests/support/redis.tcl + +proc gen_write_load {host port seconds} { + set start_time [clock seconds] + set r [redis $host $port 1] + $r select 9 + while 1 { + $r set [expr rand()] [expr rand()] + if {[clock seconds]-$start_time > $seconds} { + exit 0 + } + } +} + +gen_write_load [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] diff --git a/tests/helpers/redis_queue.py b/tests/helpers/redis_queue.py new file mode 100644 index 000000000..9203c2d0d --- /dev/null +++ b/tests/helpers/redis_queue.py @@ -0,0 +1,111 @@ +import redis +import sys +import time +import threading +import signal + +START_FLAG = True + + +def enqueue(client: redis.Redis, queue_name: str): + while START_FLAG: + n = client.zcard(queue_name) + if n >= 1000: + time.sleep(0.1) + continue + now_ms = int(time.time() * 1000) + pipeline = client.pipeline(transaction=False) + for i in range(10): + score = now_ms << 5 | i + pipeline.zadd(queue_name, {str(score): score}) + pipeline.execute() + print("enqueue exit") + + +def dequeue(client: redis.Redis, queue_name: str): + loop = 0 + while START_FLAG: + start_time = time.time() + n = client.zcard(queue_name) + if n <= 10: + time.sleep(0.1) + continue + res = client.zremrangebyrank(queue_name, 0, 9) + latency = time.time() - start_time + loop += 1 + if loop % 20 == 0: + print("latency: {}ms".format(int(latency * 1000000)/1000)) + loop = 0 + print("dequeue exit") + + +def compact(client: redis.Redis, queue_name: str): + loop = 0 + while START_FLAG: + time.sleep(1) + loop += 1 + if loop % 60 == 0: + client.execute_command("compactrange", "db0", "zset", queue_name, queue_name) + print("compact queue {}".format(queue_name)) + loop = 0 + print("compact exit") + + +def auto_compact(client: redis.Redis): + client.config_set("max-cache-statistic-keys", 10000) + client.config_set("small-compaction-threshold", 10000) + client.config_set("small-compaction-duration-threshold", 10000) + + +def main(): + if len(sys.argv) != 5: + print("Usage: python redis_queue.py $redis_host $port $passwd [compact | auto_compact]") + sys.exit(1) + host = sys.argv[1] + port = int(sys.argv[2]) + passwd = sys.argv[3] + mode = sys.argv[4] + + thread_list = [] + queue_name = "test_queue" + + client_enqueue = redis.Redis(host=host, port=port, password=passwd) + t1 = threading.Thread(target=enqueue, args=(client_enqueue, queue_name)) + thread_list.append(t1) + + client_dequeue = redis.Redis(host=host, port=port, password=passwd) + t2 = threading.Thread(target=dequeue, args=(client_dequeue, queue_name)) + thread_list.append(t2) + + client_compact = redis.Redis(host=host, port=port, password=passwd) + if mode == "compact": + t3 = threading.Thread(target=compact, args=(client_compact, queue_name)) + thread_list.append(t3) + elif mode == "auto_compact": + auto_compact(client_compact) + else: + print("invalid compact mode: {}".format(mode)) + sys.exit(1) + + for t in thread_list: + t.start() + + def signal_handler(signal, frame): + print("revc signal: {}".format(signal)) + global START_FLAG + START_FLAG = False + for t in thread_list: + t.join() + print("exit") + sys.exit(0) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGQUIT, signal_handler) + + while True: + time.sleep(60) + + +if __name__ == "__main__": + main() diff --git a/tests/key_test.go b/tests/key_test.go index a0e5d874c..91a4a5db5 100644 --- a/tests/key_test.go +++ b/tests/key_test.go @@ -109,6 +109,69 @@ var _ = Describe("Keyspace", Ordered, func() { Expect(n).To(Equal(int64(0))) }) + It("Type", func() { + set := client.Set(ctx, "key", "value", 0) + Expect(set.Err()).NotTo(HaveOccurred()) + Expect(set.Val()).To(Equal("OK")) + + lPush := client.LPush(ctx, "mlist", "hello") + Expect(lPush.Err()).NotTo(HaveOccurred()) + Expect(lPush.Val()).To(Equal(int64(1))) + + sAdd := client.SAdd(ctx, "mset", "world") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + Expect(sAdd.Val()).To(Equal(int64(1))) + + Expect(client.Type(ctx, "key").Val()).To(Equal("string")) + Expect(client.Type(ctx, "mlist").Val()).To(Equal("list")) + Expect(client.Type(ctx, "mset").Val()).To(Equal("set")) + + Expect(client.Del(ctx, "key", "mlist", "mset").Err()).NotTo(HaveOccurred()) + }) + + It("Expire", func() { + Expect(client.Set(ctx, "key_3s", "value", 0).Val()).To(Equal("OK")) + Expect(client.Expire(ctx, "key_3s", 3*time.Second).Val()).To(Equal(true)) + Expect(client.TTL(ctx, "key_3s").Val()).NotTo(Equal(int64(-2))) + + time.Sleep(4 * time.Second) + Expect(client.TTL(ctx, "key_3s").Val()).To(Equal(time.Duration(-2))) + Expect(client.Get(ctx, "key_3s").Err()).To(MatchError(redis.Nil)) + Expect(client.Exists(ctx, "key_3s").Val()).To(Equal(int64(0))) + + Expect(client.Do(ctx, "expire", "foo", "bar").Err()).To(MatchError("ERR value is not an integer or out of range")) + + }) + + It("TTL", func() { + set := client.Set(ctx, "key1", "bcd", 10*time.Minute) + Expect(set.Err()).NotTo(HaveOccurred()) + Expect(set.Val()).To(Equal("OK")) + Expect(client.TTL(ctx, "key1").Val()).NotTo(Equal(int64(-2))) + + get := client.Get(ctx, "key1") + Expect(get.Err()).NotTo(HaveOccurred()) + Expect(get.Val()).To(Equal("bcd")) + Expect(client.TTL(ctx, "key1").Val()).NotTo(Equal(int64(-2))) + + _, err := client.Del(ctx, "key1").Result() + Expect(err).NotTo(HaveOccurred()) + + set1 := client.Set(ctx, "key1", "bcd", 10*time.Minute) + Expect(set1.Err()).NotTo(HaveOccurred()) + Expect(set1.Val()).To(Equal("OK")) + Expect(client.TTL(ctx, "key1").Val()).NotTo(Equal(int64(-2))) + + mGet := client.MGet(ctx, "key1") + Expect(mGet.Err()).NotTo(HaveOccurred()) + Expect(mGet.Val()).To(Equal([]interface{}{"bcd"})) + + Expect(client.TTL(ctx, "key1").Val()).NotTo(Equal(int64(-2))) + + Expect(client.Expire(ctx, "key1", 1*time.Second).Val()).To(Equal(true)) + time.Sleep(2 * time.Second) + }) + // pikiwidb should treat numbers other than base-10 as strings It("base", func() { set := client.Set(ctx, "key", "0b1", 0) @@ -240,4 +303,18 @@ var _ = Describe("Keyspace", Ordered, func() { // del keys Expect(client.Del(ctx, "a1", "k1", "k2", "k3", "k4", "k5").Err()).NotTo(HaveOccurred()) }) + + It("should pexpire", func() { + Expect(client.Set(ctx, DefaultKey, DefaultValue, 0).Val()).To(Equal(OK)) + Expect(client.PExpire(ctx, DefaultKey, 3000*time.Millisecond).Val()).To(Equal(true)) + Expect(client.PTTL(ctx, DefaultKey).Val()).NotTo(Equal(time.Duration(-2))) + + time.Sleep(4 * time.Second) + Expect(client.PTTL(ctx, DefaultKey).Val()).To(Equal(time.Duration(-2))) + Expect(client.Get(ctx, DefaultKey).Err()).To(MatchError(redis.Nil)) + Expect(client.Exists(ctx, DefaultKey).Val()).To(Equal(int64(0))) + + Expect(client.Do(ctx, "pexpire", DefaultKey, "err").Err()).To(MatchError("ERR value is not an integer or out of range")) + }) + }) diff --git a/tests/sentinel/run.tcl b/tests/sentinel/run.tcl new file mode 100644 index 000000000..f33029959 --- /dev/null +++ b/tests/sentinel/run.tcl @@ -0,0 +1,22 @@ +# Sentinel test suite. Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com +# This software is released under the BSD License. See the COPYING file for +# more information. + +cd tests/sentinel +source ../instances.tcl + +set ::instances_count 5 ; # How many instances we use at max. + +proc main {} { + parse_options + spawn_instance sentinel $::sentinel_base_port $::instances_count + spawn_instance redis $::redis_base_port $::instances_count + run_tests + cleanup +} + +if {[catch main e]} { + puts $::errorInfo + cleanup + exit 1 +} diff --git a/tests/sentinel/tests/00-base.tcl b/tests/sentinel/tests/00-base.tcl new file mode 100644 index 000000000..a79d0c371 --- /dev/null +++ b/tests/sentinel/tests/00-base.tcl @@ -0,0 +1,126 @@ +# Check the basic monitoring and failover capabilities. + +source "../tests/includes/init-tests.tcl" + +if {$::simulate_error} { + test "This test will fail" { + fail "Simulated error" + } +} + +test "Basic failover works if the master is down" { + set old_port [RI $master_id tcp_port] + set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + assert {[lindex $addr 1] == $old_port} + kill_instance redis $master_id + foreach_sentinel_id id { + wait_for_condition 1000 50 { + [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + } else { + fail "At least one Sentinel did not received failover info" + } + } + restart_instance redis $master_id + set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set master_id [get_instance_id_by_port redis [lindex $addr 1]] +} + +test "New master [join $addr {:}] role matches" { + assert {[RI $master_id role] eq {master}} +} + +test "All the other slaves now point to the new master" { + foreach_redis_id id { + if {$id != $master_id && $id != 0} { + wait_for_condition 1000 50 { + [RI $id master_port] == [lindex $addr 1] + } else { + fail "Redis ID $id not configured to replicate with new master" + } + } + } +} + +test "The old master eventually gets reconfigured as a slave" { + wait_for_condition 1000 50 { + [RI 0 master_port] == [lindex $addr 1] + } else { + fail "Old master not reconfigured as slave of new master" + } +} + +test "ODOWN is not possible without N (quorum) Sentinels reports" { + foreach_sentinel_id id { + S $id SENTINEL SET mymaster quorum [expr $sentinels+1] + } + set old_port [RI $master_id tcp_port] + set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + assert {[lindex $addr 1] == $old_port} + kill_instance redis $master_id + + # Make sure failover did not happened. + set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + assert {[lindex $addr 1] == $old_port} + restart_instance redis $master_id +} + +test "Failover is not possible without majority agreement" { + foreach_sentinel_id id { + S $id SENTINEL SET mymaster quorum $quorum + } + + # Crash majority of sentinels + for {set id 0} {$id < $quorum} {incr id} { + kill_instance sentinel $id + } + + # Kill the current master + kill_instance redis $master_id + + # Make sure failover did not happened. + set addr [S $quorum SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + assert {[lindex $addr 1] == $old_port} + restart_instance redis $master_id + + # Cleanup: restart Sentinels to monitor the master. + for {set id 0} {$id < $quorum} {incr id} { + restart_instance sentinel $id + } +} + +test "Failover works if we configure for absolute agreement" { + foreach_sentinel_id id { + S $id SENTINEL SET mymaster quorum $sentinels + } + + # Wait for Sentinels to monitor the master again + foreach_sentinel_id id { + wait_for_condition 1000 50 { + [dict get [S $id SENTINEL MASTER mymaster] info-refresh] < 100000 + } else { + fail "At least one Sentinel is not monitoring the master" + } + } + + kill_instance redis $master_id + + foreach_sentinel_id id { + wait_for_condition 1000 50 { + [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + } else { + fail "At least one Sentinel did not received failover info" + } + } + restart_instance redis $master_id + set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set master_id [get_instance_id_by_port redis [lindex $addr 1]] + + # Set the min ODOWN agreement back to strict majority. + foreach_sentinel_id id { + S $id SENTINEL SET mymaster quorum $quorum + } +} + +test "New master [join $addr {:}] role matches" { + assert {[RI $master_id role] eq {master}} +} diff --git a/tests/sentinel/tests/01-conf-update.tcl b/tests/sentinel/tests/01-conf-update.tcl new file mode 100644 index 000000000..4998104d2 --- /dev/null +++ b/tests/sentinel/tests/01-conf-update.tcl @@ -0,0 +1,39 @@ +# Test Sentinel configuration consistency after partitions heal. + +source "../tests/includes/init-tests.tcl" + +test "We can failover with Sentinel 1 crashed" { + set old_port [RI $master_id tcp_port] + set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + assert {[lindex $addr 1] == $old_port} + + # Crash Sentinel 1 + kill_instance sentinel 1 + + kill_instance redis $master_id + foreach_sentinel_id id { + if {$id != 1} { + wait_for_condition 1000 50 { + [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + } else { + fail "Sentinel $id did not received failover info" + } + } + } + restart_instance redis $master_id + set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set master_id [get_instance_id_by_port redis [lindex $addr 1]] +} + +test "After Sentinel 1 is restarted, its config gets updated" { + restart_instance sentinel 1 + wait_for_condition 1000 50 { + [lindex [S 1 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + } else { + fail "Restarted Sentinel did not received failover info" + } +} + +test "New master [join $addr {:}] role matches" { + assert {[RI $master_id role] eq {master}} +} diff --git a/tests/sentinel/tests/02-slaves-reconf.tcl b/tests/sentinel/tests/02-slaves-reconf.tcl new file mode 100644 index 000000000..fa15d2efd --- /dev/null +++ b/tests/sentinel/tests/02-slaves-reconf.tcl @@ -0,0 +1,84 @@ +# Check that slaves are reconfigured at a latter time if they are partitioned. +# +# Here we should test: +# 1) That slaves point to the new master after failover. +# 2) That partitioned slaves point to new master when they are partitioned +# away during failover and return at a latter time. + +source "../tests/includes/init-tests.tcl" + +proc 02_test_slaves_replication {} { + uplevel 1 { + test "Check that slaves replicate from current master" { + set master_port [RI $master_id tcp_port] + foreach_redis_id id { + if {$id == $master_id} continue + if {[instance_is_killed redis $id]} continue + wait_for_condition 1000 50 { + ([RI $id master_port] == $master_port) && + ([RI $id master_link_status] eq {up}) + } else { + fail "Redis slave $id is replicating from wrong master" + } + } + } + } +} + +proc 02_crash_and_failover {} { + uplevel 1 { + test "Crash the master and force a failover" { + set old_port [RI $master_id tcp_port] + set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + assert {[lindex $addr 1] == $old_port} + kill_instance redis $master_id + foreach_sentinel_id id { + wait_for_condition 1000 50 { + [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + } else { + fail "At least one Sentinel did not received failover info" + } + } + restart_instance redis $master_id + set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set master_id [get_instance_id_by_port redis [lindex $addr 1]] + } + } +} + +02_test_slaves_replication +02_crash_and_failover +02_test_slaves_replication + +test "Kill a slave instance" { + foreach_redis_id id { + if {$id == $master_id} continue + set killed_slave_id $id + kill_instance redis $id + break + } +} + +02_crash_and_failover +02_test_slaves_replication + +test "Wait for failover to end" { + set inprogress 1 + while {$inprogress} { + set inprogress 0 + foreach_sentinel_id id { + if {[dict exists [S $id SENTINEL MASTER mymaster] failover-state]} { + incr inprogress + } + } + if {$inprogress} {after 100} + } +} + +test "Restart killed slave and test replication of slaves again..." { + restart_instance redis $killed_slave_id +} + +# Now we check if the slave rejoining the partition is reconfigured even +# if the failover finished. +02_test_slaves_replication diff --git a/tests/sentinel/tests/03-runtime-reconf.tcl b/tests/sentinel/tests/03-runtime-reconf.tcl new file mode 100644 index 000000000..426596c37 --- /dev/null +++ b/tests/sentinel/tests/03-runtime-reconf.tcl @@ -0,0 +1 @@ +# Test runtime reconfiguration command SENTINEL SET. diff --git a/tests/sentinel/tests/04-slave-selection.tcl b/tests/sentinel/tests/04-slave-selection.tcl new file mode 100644 index 000000000..3d2ca6484 --- /dev/null +++ b/tests/sentinel/tests/04-slave-selection.tcl @@ -0,0 +1,5 @@ +# Test slave selection algorithm. +# +# This unit should test: +# 1) That when there are no suitable slaves no failover is performed. +# 2) That among the available slaves, the one with better offset is picked. diff --git a/tests/sentinel/tests/05-manual.tcl b/tests/sentinel/tests/05-manual.tcl new file mode 100644 index 000000000..1a60d814b --- /dev/null +++ b/tests/sentinel/tests/05-manual.tcl @@ -0,0 +1,44 @@ +# Test manual failover + +source "../tests/includes/init-tests.tcl" + +test "Manual failover works" { + set old_port [RI $master_id tcp_port] + set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + assert {[lindex $addr 1] == $old_port} + S 0 SENTINEL FAILOVER mymaster + foreach_sentinel_id id { + wait_for_condition 1000 50 { + [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + } else { + fail "At least one Sentinel did not received failover info" + } + } + set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set master_id [get_instance_id_by_port redis [lindex $addr 1]] +} + +test "New master [join $addr {:}] role matches" { + assert {[RI $master_id role] eq {master}} +} + +test "All the other slaves now point to the new master" { + foreach_redis_id id { + if {$id != $master_id && $id != 0} { + wait_for_condition 1000 50 { + [RI $id master_port] == [lindex $addr 1] + } else { + fail "Redis ID $id not configured to replicate with new master" + } + } + } +} + +test "The old master eventually gets reconfigured as a slave" { + wait_for_condition 1000 50 { + [RI 0 master_port] == [lindex $addr 1] + } else { + fail "Old master not reconfigured as slave of new master" + } +} + diff --git a/tests/sentinel/tests/includes/init-tests.tcl b/tests/sentinel/tests/includes/init-tests.tcl new file mode 100644 index 000000000..c8165dcfa --- /dev/null +++ b/tests/sentinel/tests/includes/init-tests.tcl @@ -0,0 +1,72 @@ +# Initialization tests -- most units will start including this. + +test "(init) Restart killed instances" { + foreach type {redis sentinel} { + foreach_${type}_id id { + if {[get_instance_attrib $type $id pid] == -1} { + puts -nonewline "$type/$id " + flush stdout + restart_instance $type $id + } + } + } +} + +test "(init) Remove old master entry from sentinels" { + foreach_sentinel_id id { + catch {S $id SENTINEL REMOVE mymaster} + } +} + +set redis_slaves 4 +test "(init) Create a master-slaves cluster of [expr $redis_slaves+1] instances" { + create_redis_master_slave_cluster [expr {$redis_slaves+1}] +} +set master_id 0 + +test "(init) Sentinels can start monitoring a master" { + set sentinels [llength $::sentinel_instances] + set quorum [expr {$sentinels/2+1}] + foreach_sentinel_id id { + S $id SENTINEL MONITOR mymaster \ + [get_instance_attrib redis $master_id host] \ + [get_instance_attrib redis $master_id port] $quorum + } + foreach_sentinel_id id { + assert {[S $id sentinel master mymaster] ne {}} + S $id SENTINEL SET mymaster down-after-milliseconds 2000 + S $id SENTINEL SET mymaster failover-timeout 20000 + S $id SENTINEL SET mymaster parallel-syncs 10 + } +} + +test "(init) Sentinels can talk with the master" { + foreach_sentinel_id id { + wait_for_condition 1000 50 { + [catch {S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster}] == 0 + } else { + fail "Sentinel $id can't talk with the master." + } + } +} + +test "(init) Sentinels are able to auto-discover other sentinels" { + set sentinels [llength $::sentinel_instances] + foreach_sentinel_id id { + wait_for_condition 1000 50 { + [dict get [S $id SENTINEL MASTER mymaster] num-other-sentinels] == ($sentinels-1) + } else { + fail "At least some sentinel can't detect some other sentinel" + } + } +} + +test "(init) Sentinels are able to auto-discover slaves" { + foreach_sentinel_id id { + wait_for_condition 1000 50 { + [dict get [S $id SENTINEL MASTER mymaster] num-slaves] == $redis_slaves + } else { + fail "At least some sentinel can't detect some slave" + } + } +} diff --git a/tests/sentinel/tmp/.gitignore b/tests/sentinel/tmp/.gitignore new file mode 100644 index 000000000..f581f73e2 --- /dev/null +++ b/tests/sentinel/tmp/.gitignore @@ -0,0 +1,2 @@ +redis_* +sentinel_* diff --git a/tests/set_test.go b/tests/set_test.go index 0944eaf20..d9191c3e3 100644 --- a/tests/set_test.go +++ b/tests/set_test.go @@ -227,18 +227,17 @@ var _ = Describe("Set", Ordered, func() { sCard := client.SCard(ctx, "setScard") Expect(sCard.Err()).NotTo(HaveOccurred()) Expect(sCard.Val()).To(Equal(int64(2))) - }) - - - It("should SPop", func() { - sAdd := client.SAdd(ctx, "setSpop", "one") - Expect(sAdd.Err()).NotTo(HaveOccurred()) - sAdd = client.SAdd(ctx, "setSpop", "two") - Expect(sAdd.Err()).NotTo(HaveOccurred()) - sAdd = client.SAdd(ctx, "setSpop", "three") - Expect(sAdd.Err()).NotTo(HaveOccurred()) - sAdd = client.SAdd(ctx, "setSpop", "four") - Expect(sAdd.Err()).NotTo(HaveOccurred()) + }) + + It("should SPop", func() { + sAdd := client.SAdd(ctx, "setSpop", "one") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "setSpop", "two") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "setSpop", "three") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "setSpop", "four") + Expect(sAdd.Err()).NotTo(HaveOccurred()) sAdd = client.SAdd(ctx, "setSpop", "five") Expect(sAdd.Err()).NotTo(HaveOccurred()) @@ -246,12 +245,11 @@ var _ = Describe("Set", Ordered, func() { Expect(sPopN.Err()).NotTo(HaveOccurred()) Expect(sPopN.Val()).To(HaveLen(3)) /* - sMembers := client.SMembers(ctx, "setSpop") - Expect(sMembers.Err()).NotTo(HaveOccurred()) - Expect(sMembers.Val()).To(HaveLen(2)) + sMembers := client.SMembers(ctx, "setSpop") + Expect(sMembers.Err()).NotTo(HaveOccurred()) + Expect(sMembers.Val()).To(HaveLen(2)) */ - }) It("should SMove", func() { @@ -292,27 +290,27 @@ var _ = Describe("Set", Ordered, func() { Expect(sRem.Err()).NotTo(HaveOccurred()) Expect(sRem.Val()).To(Equal(int64(0))) - // sMembers := client.SMembers(ctx, "set") - // Expect(sMembers.Err()).NotTo(HaveOccurred()) - // Expect(sMembers.Val()).To(ConsistOf([]string{"three", "two"})) + // sMembers := client.SMembers(ctx, "set") + // Expect(sMembers.Err()).NotTo(HaveOccurred()) + // Expect(sMembers.Val()).To(ConsistOf([]string{"three", "two"})) }) It("should SRandmember", func() { - sAdd := client.SAdd(ctx, "set", "one") - Expect(sAdd.Err()).NotTo(HaveOccurred()) - sAdd = client.SAdd(ctx, "set", "two") - Expect(sAdd.Err()).NotTo(HaveOccurred()) - sAdd = client.SAdd(ctx, "set", "three") - Expect(sAdd.Err()).NotTo(HaveOccurred()) - - member, err := client.SRandMember(ctx, "set").Result() - Expect(err).NotTo(HaveOccurred()) - Expect(member).NotTo(Equal("")) - - members, err := client.SRandMemberN(ctx, "set", 2).Result() - Expect(err).NotTo(HaveOccurred()) + sAdd := client.SAdd(ctx, "set", "one") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "set", "two") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "set", "three") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + + member, err := client.SRandMember(ctx, "set").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(member).NotTo(Equal("")) + + members, err := client.SRandMemberN(ctx, "set", 2).Result() + Expect(err).NotTo(HaveOccurred()) Expect(members).To(HaveLen(2)) - }) + }) It("should SMembers", func() { sAdd := client.SAdd(ctx, "setSMembers", "Hello") @@ -373,4 +371,44 @@ var _ = Describe("Set", Ordered, func() { Expect(sMembers.Val()).To(ConsistOf([]string{"a", "b"})) }) + It("should SScan", func() { + // add elements first + sAdd := client.SAdd(ctx, "setSScan1", "user1") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + Expect(sAdd.Val()).To(Equal(int64(1))) + + sAdd = client.SAdd(ctx, "setSScan1", "user2") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + Expect(sAdd.Val()).To(Equal(int64(1))) + + sAdd = client.SAdd(ctx, "setSScan1", "user3") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + Expect(sAdd.Val()).To(Equal(int64(1))) + + set := []string{"Hello", "World", "World"} + sAdd = client.SAdd(ctx, "setSScan1", set) + Expect(sAdd.Err()).NotTo(HaveOccurred()) + Expect(sAdd.Val()).To(Equal(int64(2))) + + // func (c Client) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + sScan := client.SScan(ctx, "setSScan1", 0, "*", 5) + Expect(sScan.Err()).NotTo(HaveOccurred()) + Expect(sScan.Val()).To(ConsistOf([]string{"user1", "user2", "user3", "Hello", "World"})) + + sScan = client.SScan(ctx, "setSScan1", 0, "user*", 5) + Expect(sScan.Err()).NotTo(HaveOccurred()) + Expect(sScan.Val()).To(ConsistOf([]string{"user1", "user2", "user3"})) + + sScan = client.SScan(ctx, "setSScan1", 0, "He*", 5) + Expect(sScan.Err()).NotTo(HaveOccurred()) + Expect(sScan.Val()).To(ConsistOf([]string{"Hello"})) + + // sScan=client.SScan(ctx,"setSScan1",0,"*",-1) + // Expect(sScan.Err()).To(HaveOccurred()) + + //del + del := client.Del(ctx, "setSScan1") + Expect(del.Err()).NotTo(HaveOccurred()) + }) + }) diff --git a/tests/string_test.go b/tests/string_test.go index 51a25117b..32748e05f 100644 --- a/tests/string_test.go +++ b/tests/string_test.go @@ -244,4 +244,27 @@ var _ = Describe("String", Ordered, func() { "", })) }) + + It("MSetnx & MGet", func() { + mSetnx := client.MSetNX(ctx, "keynx1", "hello1", "keynx2", "hello2") + Expect(mSetnx.Err()).NotTo(HaveOccurred()) + Expect(mSetnx.Val()).To(Equal(true)) + + mGet := client.MGet(ctx, "keynx1", "keynx2", "_") + Expect(mGet.Err()).NotTo(HaveOccurred()) + Expect(mGet.Val()).To(Equal([]interface{}{"hello1", "hello2", nil})) + + mSetnx = client.MSetNX(ctx, "keynx3", "hello3", "keynx2", "hello22") + Expect(mSetnx.Err()).NotTo(HaveOccurred()) + Expect(mSetnx.Val()).To(Equal(false)) + + mGet = client.MGet(ctx, "keynx2", "keynx3") + Expect(mGet.Err()).NotTo(HaveOccurred()) + Expect(mGet.Val()).To(Equal([]interface{}{"hello2", nil})) + + mSetnx = client.MSetNX(ctx, "keynx1") + Expect(mSetnx.Err()).To(HaveOccurred()) + Expect(mSetnx.Val()).To(Equal(false)) + }) + }) diff --git a/tests/support/redis.tcl b/tests/support/redis.tcl new file mode 100644 index 000000000..7c7836081 --- /dev/null +++ b/tests/support/redis.tcl @@ -0,0 +1,294 @@ +# Tcl clinet library - used by test-redis.tcl script for now +# Copyright (C) 2009 Salvatore Sanfilippo +# Released under the BSD license like Redis itself +# +# Example usage: +# +# set r [redis 127.0.0.1 6379] +# $r lpush mylist foo +# $r lpush mylist bar +# $r lrange mylist 0 -1 +# $r close +# +# Non blocking usage example: +# +# proc handlePong {r type reply} { +# puts "PONG $type '$reply'" +# if {$reply ne "PONG"} { +# $r ping [list handlePong] +# } +# } +# +# set r [redis] +# $r blocking 0 +# $r get fo [list handlePong] +# +# vwait forever + +package require Tcl 8.5 +package provide redis 0.1 + +namespace eval redis {} +set ::redis::id 0 +array set ::redis::fd {} +array set ::redis::addr {} +array set ::redis::blocking {} +array set ::redis::deferred {} +array set ::redis::reconnect {} +array set ::redis::callback {} +array set ::redis::state {} ;# State in non-blocking reply reading +array set ::redis::statestack {} ;# Stack of states, for nested mbulks + +proc redis {{server 127.0.0.1} {port 6379} {defer 0}} { + set fd [socket $server $port] + fconfigure $fd -translation binary + set id [incr ::redis::id] + set ::redis::fd($id) $fd + set ::redis::addr($id) [list $server $port] + set ::redis::blocking($id) 1 + set ::redis::deferred($id) $defer + set ::redis::reconnect($id) 0 + ::redis::redis_reset_state $id + interp alias {} ::redis::redisHandle$id {} ::redis::__dispatch__ $id +} + +# This is a wrapper to the actual dispatching procedure that handles +# reconnection if needed. +proc ::redis::__dispatch__ {id method args} { + set errorcode [catch {::redis::__dispatch__raw__ $id $method $args} retval] + if {$errorcode && $::redis::reconnect($id) && $::redis::fd($id) eq {}} { + # Try again if the connection was lost. + # FIXME: we don't re-select the previously selected DB, nor we check + # if we are inside a transaction that needs to be re-issued from + # scratch. + set errorcode [catch {::redis::__dispatch__raw__ $id $method $args} retval] + } + return -code $errorcode $retval +} + +proc ::redis::__dispatch__raw__ {id method argv} { + set fd $::redis::fd($id) + + # Reconnect the link if needed. + if {$fd eq {}} { + lassign $::redis::addr($id) host port + set ::redis::fd($id) [socket $host $port] + fconfigure $::redis::fd($id) -translation binary + set fd $::redis::fd($id) + } + + set blocking $::redis::blocking($id) + set deferred $::redis::deferred($id) + if {$blocking == 0} { + if {[llength $argv] == 0} { + error "Please provide a callback in non-blocking mode" + } + set callback [lindex $argv end] + set argv [lrange $argv 0 end-1] + } + if {[info command ::redis::__method__$method] eq {}} { + set cmd "*[expr {[llength $argv]+1}]\r\n" + append cmd "$[string length $method]\r\n$method\r\n" + foreach a $argv { + append cmd "$[string length $a]\r\n$a\r\n" + } + ::redis::redis_write $fd $cmd + if {[catch {flush $fd}]} { + set ::redis::fd($id) {} + return -code error "I/O error reading reply" + } + + if {!$deferred} { + if {$blocking} { + ::redis::redis_read_reply $id $fd + } else { + # Every well formed reply read will pop an element from this + # list and use it as a callback. So pipelining is supported + # in non blocking mode. + lappend ::redis::callback($id) $callback + fileevent $fd readable [list ::redis::redis_readable $fd $id] + } + } + } else { + uplevel 1 [list ::redis::__method__$method $id $fd] $argv + } +} + +proc ::redis::__method__blocking {id fd val} { + set ::redis::blocking($id) $val + fconfigure $fd -blocking $val +} + +proc ::redis::__method__reconnect {id fd val} { + set ::redis::reconnect($id) $val +} + +proc ::redis::__method__read {id fd} { + ::redis::redis_read_reply $id $fd +} + +proc ::redis::__method__write {id fd buf} { + ::redis::redis_write $fd $buf +} + +proc ::redis::__method__flush {id fd} { + flush $fd +} + +proc ::redis::__method__close {id fd} { + catch {close $fd} + catch {unset ::redis::fd($id)} + catch {unset ::redis::addr($id)} + catch {unset ::redis::blocking($id)} + catch {unset ::redis::deferred($id)} + catch {unset ::redis::reconnect($id)} + catch {unset ::redis::state($id)} + catch {unset ::redis::statestack($id)} + catch {unset ::redis::callback($id)} + catch {interp alias {} ::redis::redisHandle$id {}} +} + +proc ::redis::__method__channel {id fd} { + return $fd +} + +proc ::redis::__method__deferred {id fd val} { + set ::redis::deferred($id) $val +} + +proc ::redis::redis_write {fd buf} { + puts -nonewline $fd $buf +} + +proc ::redis::redis_writenl {fd buf} { + redis_write $fd $buf + redis_write $fd "\r\n" + flush $fd +} + +proc ::redis::redis_readnl {fd len} { + set buf [read $fd $len] + read $fd 2 ; # discard CR LF + return $buf +} + +proc ::redis::redis_bulk_read {fd} { + set count [redis_read_line $fd] + if {$count == -1} return {} + set buf [redis_readnl $fd $count] + return $buf +} + +proc ::redis::redis_multi_bulk_read {id fd} { + set count [redis_read_line $fd] + if {$count == -1} return {} + set l {} + set err {} + for {set i 0} {$i < $count} {incr i} { + if {[catch { + lappend l [redis_read_reply $id $fd] + } e] && $err eq {}} { + set err $e + } + } + if {$err ne {}} {return -code error $err} + return $l +} + +proc ::redis::redis_read_line fd { + string trim [gets $fd] +} + +proc ::redis::redis_read_reply {id fd} { + set type [read $fd 1] + switch -exact -- $type { + : - + + {redis_read_line $fd} + - {return -code error [redis_read_line $fd]} + $ {redis_bulk_read $fd} + * {redis_multi_bulk_read $id $fd} + default { + if {$type eq {}} { + set ::redis::fd($id) {} + return -code error "I/O error reading reply" + } + return -code error "Bad protocol, '$type' as reply type byte" + } + } +} + +proc ::redis::redis_reset_state id { + set ::redis::state($id) [dict create buf {} mbulk -1 bulk -1 reply {}] + set ::redis::statestack($id) {} +} + +proc ::redis::redis_call_callback {id type reply} { + set cb [lindex $::redis::callback($id) 0] + set ::redis::callback($id) [lrange $::redis::callback($id) 1 end] + uplevel #0 $cb [list ::redis::redisHandle$id $type $reply] + ::redis::redis_reset_state $id +} + +# Read a reply in non-blocking mode. +proc ::redis::redis_readable {fd id} { + if {[eof $fd]} { + redis_call_callback $id eof {} + ::redis::__method__close $id $fd + return + } + if {[dict get $::redis::state($id) bulk] == -1} { + set line [gets $fd] + if {$line eq {}} return ;# No complete line available, return + switch -exact -- [string index $line 0] { + : - + + {redis_call_callback $id reply [string range $line 1 end-1]} + - {redis_call_callback $id err [string range $line 1 end-1]} + $ { + dict set ::redis::state($id) bulk \ + [expr [string range $line 1 end-1]+2] + if {[dict get $::redis::state($id) bulk] == 1} { + # We got a $-1, hack the state to play well with this. + dict set ::redis::state($id) bulk 2 + dict set ::redis::state($id) buf "\r\n" + ::redis::redis_readable $fd $id + } + } + * { + dict set ::redis::state($id) mbulk [string range $line 1 end-1] + # Handle *-1 + if {[dict get $::redis::state($id) mbulk] == -1} { + redis_call_callback $id reply {} + } + } + default { + redis_call_callback $id err \ + "Bad protocol, $type as reply type byte" + } + } + } else { + set totlen [dict get $::redis::state($id) bulk] + set buflen [string length [dict get $::redis::state($id) buf]] + set toread [expr {$totlen-$buflen}] + set data [read $fd $toread] + set nread [string length $data] + dict append ::redis::state($id) buf $data + # Check if we read a complete bulk reply + if {[string length [dict get $::redis::state($id) buf]] == + [dict get $::redis::state($id) bulk]} { + if {[dict get $::redis::state($id) mbulk] == -1} { + redis_call_callback $id reply \ + [string range [dict get $::redis::state($id) buf] 0 end-2] + } else { + dict with ::redis::state($id) { + lappend reply [string range $buf 0 end-2] + incr mbulk -1 + set bulk -1 + } + if {[dict get $::redis::state($id) mbulk] == 0} { + redis_call_callback $id reply \ + [dict get $::redis::state($id) reply] + } + } + } + } +} diff --git a/tests/support/server.tcl b/tests/support/server.tcl new file mode 100644 index 000000000..d6ced093a --- /dev/null +++ b/tests/support/server.tcl @@ -0,0 +1,349 @@ +set ::global_overrides {} +set ::tags {} +set ::valgrind_errors {} + +proc start_server_error {config_file error} { + set err {} + append err "Cant' start the Redis server\n" + append err "CONFIGURATION:" + append err [exec cat $config_file] + append err "\nERROR:" + append err [string trim $error] + send_data_packet $::test_server_fd err $err +} + +proc check_valgrind_errors stderr { + set fd [open $stderr] + set buf [read $fd] + close $fd + + if {[regexp -- { at 0x} $buf] || + (![regexp -- {definitely lost: 0 bytes} $buf] && + ![regexp -- {no leaks are possible} $buf])} { + send_data_packet $::test_server_fd err "Valgrind error: $buf\n" + } +} + +proc kill_server config { + # nothing to kill when running against external server + if {$::external} return + + # nevermind if its already dead + if {![is_alive $config]} { return } + set pid [dict get $config pid] + + # check for leaks + if {![dict exists $config "skipleaks"]} { + catch { + if {[string match {*Darwin*} [exec uname -a]]} { + tags {"leaks"} { + test "Check for memory leaks (pid $pid)" { + set output {0 leaks} + catch {exec leaks $pid} output + if {[string match {*process does not exist*} $output] || + [string match {*cannot examine*} $output]} { + # In a few tests we kill the server process. + set output "0 leaks" + } + set output + } {*0 leaks*} + } + } + } + } + + # kill server and wait for the process to be totally exited + catch {exec kill $pid} + while {[is_alive $config]} { + incr wait 10 + + if {$wait >= 5000} { + puts "Forcing process $pid to exit..." + catch {exec kill -KILL $pid} + } elseif {$wait % 1000 == 0} { + puts "Waiting for process $pid to exit..." + } + after 10 + } + + # Check valgrind errors if needed + if {$::valgrind} { + check_valgrind_errors [dict get $config stderr] + } + + # Remove this pid from the set of active pids in the test server. + send_data_packet $::test_server_fd server-killed $pid +} + +proc is_alive config { + set pid [dict get $config pid] + if {[catch {exec ps -p $pid} err]} { + return 0 + } else { + return 1 + } +} + +proc ping_server {host port} { + set retval 0 + if {[catch { + set fd [socket $host $port] + fconfigure $fd -translation binary + puts $fd "PING\r\n" + flush $fd + set reply [gets $fd] + if {[string range $reply 0 0] eq {+} || + [string range $reply 0 0] eq {-}} { + set retval 1 + } + close $fd + } e]} { + if {$::verbose} { + puts -nonewline "." + } + } else { + if {$::verbose} { + puts -nonewline "ok" + } + } + return $retval +} + +# Return 1 if the server at the specified addr is reachable by PING, otherwise +# returns 0. Performs a try every 50 milliseconds for the specified number +# of retries. +proc server_is_up {host port retrynum} { + after 10 ;# Use a small delay to make likely a first-try success. + set retval 0 + while {[incr retrynum -1]} { + if {[catch {ping_server $host $port} ping]} { + set ping 0 + } + if {$ping} {return 1} + after 50 + } + return 0 +} + +# doesn't really belong here, but highly coupled to code in start_server +proc tags {tags code} { + set ::tags [concat $::tags $tags] + uplevel 1 $code + set ::tags [lrange $::tags 0 end-[llength $tags]] +} + +proc start_server {options {code undefined}} { + # If we are running against an external server, we just push the + # host/port pair in the stack the first time + if {$::external} { + if {[llength $::servers] == 0} { + set srv {} + dict set srv "host" $::host + dict set srv "port" $::port + set client [redis $::host $::port] + dict set srv "client" $client + # $client select 9 + + # append the server to the stack + lappend ::servers $srv + } + uplevel 1 $code + return + } + + # setup defaults + set baseconfig "default.conf" + set overrides {} + set tags {} + + # parse options + foreach {option value} $options { + switch $option { + "config" { + set baseconfig $value } + "overrides" { + set overrides $value } + "tags" { + set tags $value + set ::tags [concat $::tags $value] } + default { + error "Unknown option $option" } + } + } + + set data [split [exec cat "tests/assets/$baseconfig"] "\n"] + set config {} + foreach line $data { + if {[string length $line] == 0 || [string index $line 0] eq "#"} { + continue + } + + set parts [regexp -all -inline {\S+} $line] + set directive [lindex $parts 0] + + if {[llength $parts] > 1} { + set arguments [lrange $parts 1 end] + + set formatted_args {} + foreach arg $arguments { + lappend formatted_args [string trim $arg "{}"] + } + + dict set config $directive $formatted_args + } else { + dict set config $directive "" + } + } + + # use a different directory every time a server is started + dict set config dir [tmpdir server] + + # start every server on a different port + set ::port [find_available_port [expr {$::port+1}]] + dict set config port $::port + + # start every server on a different path + dict set config log-path ./logs$::port/ + dict set config db-path ./db$::port/ + # dict set config dump-path ./dump$::port/ + + # apply overrides from global space and arguments + foreach {directive arguments} [concat $::global_overrides $overrides] { + dict set config $directive $arguments + } + + # write new configuration to temporary file + set config_file [tmpfile redis.conf] + set fp [open $config_file w+] + foreach directive [dict keys $config] { + set value [dict get $config $directive] + if {[string is integer -strict $value]} { + puts $fp "$directive $value" + } else { + puts $fp "$directive $value" + } + } + close $fp + set stdout [format "%s/%s" [dict get $config "dir"] "stdout"] + set stderr [format "%s/%s" [dict get $config "dir"] "stderr"] + + if {$::valgrind} { + set pid [exec valgrind --suppressions=src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file > $stdout 2> $stderr &] + } else { + #set pid [exec src/redis-server -c $config_file > $stdout 2> $stderr &] + set pid [exec src/redis-server $config_file > $stdout 2> $stderr &] + } + + puts "Starting ---- " + + # Tell the test server about this new instance. + send_data_packet $::test_server_fd server-spawned $pid + + # check that the server actually started + # ugly but tries to be as fast as possible... + if {$::valgrind} {set retrynum 1000} else {set retrynum 100} + + if {$::verbose} { + puts -nonewline "=== ($tags) Starting server ${::host}:${::port} " + } + + if {$code ne "undefined"} { + set serverisup [server_is_up $::host $::port $retrynum] + } else { + set serverisup 1 + } + + if {$::verbose} { + puts "" + } + + if {!$serverisup} { + set err {} + append err [exec cat $stdout] "\n" [exec cat $stderr] + start_server_error $config_file $err + return + } + + puts "Before Wait" + # Wait for actual startup + #while {![info exists _pid]} { + # regexp {PID:\s(\d+)} [exec cat $stdout] _ _pid + # after 100 + #} + puts "After Wait" + + # setup properties to be able to initialize a client object + set host $::host + set port $::port + if {[dict exists $config bind]} { set host [dict get $config bind] } + if {[dict exists $config port]} { set port [dict get $config port] } + + # setup config dict + dict set srv "config_file" $config_file + dict set srv "config" $config + dict set srv "pid" $pid + dict set srv "host" $host + dict set srv "port" $port + dict set srv "stdout" $stdout + dict set srv "stderr" $stderr + + # if a block of code is supplied, we wait for the server to become + # available, create a client object and kill the server afterwards + if {$code ne "undefined"} { + set line [exec head -n1 $stdout] + if {[string match {*already in use*} $line]} { + error_and_quit $config_file $line + } + + # while 1 { + # # check that the server actually started and is ready for connections + # if {[exec grep "going to start" | wc -l < $stderr] > 0} { + # break + # } + # puts "Fuck YYB" + # after 10 + #} + + # append the server to the stack + lappend ::servers $srv + + # connect client (after server dict is put on the stack) + reconnect + + # execute provided block + set num_tests $::num_tests + if {[catch { uplevel 1 $code } error]} { + set backtrace $::errorInfo + + # Kill the server without checking for leaks + dict set srv "skipleaks" 1 + kill_server $srv + + # Print warnings from log + puts [format "\nLogged warnings (pid %d):" [dict get $srv "pid"]] + set warnings [warnings_from_file [dict get $srv "stdout"]] + if {[string length $warnings] > 0} { + puts "$warnings" + } else { + puts "(none)" + } + puts "" + + error $error $backtrace + } + + # Don't do the leak check when no tests were run + if {$num_tests == $::num_tests} { + dict set srv "skipleaks" 1 + } + + # pop the server object + set ::servers [lrange $::servers 0 end-1] + + set ::tags [lrange $::tags 0 end-[llength $tags]] + kill_server $srv + } else { + set ::tags [lrange $::tags 0 end-[llength $tags]] + set _ $srv + } +} diff --git a/tests/support/test.tcl b/tests/support/test.tcl new file mode 100644 index 000000000..d9b81f2d9 --- /dev/null +++ b/tests/support/test.tcl @@ -0,0 +1,153 @@ +set ::num_tests 0 +set ::num_passed 0 +set ::num_failed 0 +set ::tests_failed {} + +proc fail {msg} { + error "assertion:$msg" +} + +proc assert {condition} { + if {![uplevel 1 [list expr $condition]]} { + error "assertion:Expected condition '$condition' to be true ([uplevel 1 [list subst -nocommands $condition]])" + } +} + +proc assert_no_match {pattern value} { + if {[string match $pattern $value]} { + set context "(context: [info frame -1])" + error "assertion:Expected '$value' to not match '$pattern' $context" + } +} + +proc assert_failed {expected_err detail} { + if {$detail ne ""} { + set detail "(detail: $detail)" + } else { + set detail "(context: [info frame -2])" + } + error "assertion:$expected_err $detail" +} + +proc assert_not_equal {value expected {detail ""}} { + if {!($expected ne $value)} { + assert_failed "Expected '$value' not equal to '$expected'" $detail + } +} + +proc assert_match {pattern value} { + if {![string match $pattern $value]} { + error "assertion:Expected '$value' to match '$pattern'" + } +} + +proc assert_equal {expected value} { + if {$expected ne $value} { + error "assertion:Expected '$value' to be equal to '$expected'" + } +} + +proc assert_error {pattern code} { + if {[catch {uplevel 1 $code} error]} { + assert_match $pattern $error + } else { + error "assertion:Expected an error but nothing was caught" + } +} + +proc assert_encoding {enc key} { + # Swapped out values don't have an encoding, so make sure that + # the value is swapped in before checking the encoding. + set dbg [r debug object $key] + while {[string match "* swapped at:*" $dbg]} { + r debug swapin $key + set dbg [r debug object $key] + } + assert_match "* encoding:$enc *" $dbg +} + +proc assert_type {type key} { + assert_equal $type [r type $key] +} + +# Wait for the specified condition to be true, with the specified number of +# max retries and delay between retries. Otherwise the 'elsescript' is +# executed. +proc wait_for_condition {maxtries delay e _else_ elsescript} { + while {[incr maxtries -1] >= 0} { + set errcode [catch {uplevel 1 [list expr $e]} result] + if {$errcode == 0} { + if {$result} break + } else { + return -code $errcode $result + } + after $delay + } + if {$maxtries == -1} { + set errcode [catch [uplevel 1 $elsescript] result] + return -code $errcode $result + } +} + +proc test {name code {okpattern undefined}} { + # abort if tagged with a tag to deny + foreach tag $::denytags { + if {[lsearch $::tags $tag] >= 0} { + return + } + } + + # check if tagged with at least 1 tag to allow when there *is* a list + # of tags to allow, because default policy is to run everything + if {[llength $::allowtags] > 0} { + set matched 0 + foreach tag $::allowtags { + if {[lsearch $::tags $tag] >= 0} { + incr matched + } + } + if {$matched < 1} { + return + } + } + + incr ::num_tests + set details {} + lappend details "$name in $::curfile" + + send_data_packet $::test_server_fd testing $name + + if {[catch {set retval [uplevel 1 $code]} error]} { + if {[string match "assertion:*" $error]} { + set msg [string range $error 10 end] + lappend details $msg + lappend ::tests_failed $details + + incr ::num_failed + send_data_packet $::test_server_fd err [join $details "\n"] + } else { + # Re-raise, let handler up the stack take care of this. + error $error $::errorInfo + } + } else { + if {$okpattern eq "undefined" || $okpattern eq $retval || [string match $okpattern $retval]} { + incr ::num_passed + send_data_packet $::test_server_fd ok $name + } else { + set msg "Expected '$okpattern' to equal or match '$retval'" + lappend details $msg + lappend ::tests_failed $details + + incr ::num_failed + send_data_packet $::test_server_fd err [join $details "\n"] + } + } + + if {$::traceleaks} { + set output [exec leaks redis-server] + if {![string match {*0 leaks*} $output]} { + send_data_packet $::test_server_fd err "Detected a memory leak in test '$name': $output" + } + } +} + diff --git a/tests/support/tmpfile.tcl b/tests/support/tmpfile.tcl new file mode 100644 index 000000000..809f58730 --- /dev/null +++ b/tests/support/tmpfile.tcl @@ -0,0 +1,15 @@ +set ::tmpcounter 0 +set ::tmproot "./tests/tmp" +file mkdir $::tmproot + +# returns a dirname unique to this process to write to +proc tmpdir {basename} { + set dir [file join $::tmproot $basename.[pid].[incr ::tmpcounter]] + file mkdir $dir + set _ $dir +} + +# return a filename unique to this process to write to +proc tmpfile {basename} { + file join $::tmproot $basename.[pid].[incr ::tmpcounter] +} diff --git a/tests/support/util.tcl b/tests/support/util.tcl new file mode 100644 index 000000000..cc628784a --- /dev/null +++ b/tests/support/util.tcl @@ -0,0 +1,400 @@ +proc randstring {min max {type binary}} { + set len [expr {$min+int(rand()*($max-$min+1))}] + set output {} + if {$type eq {binary}} { + set minval 0 + set maxval 255 + } elseif {$type eq {alpha}} { + set minval 48 + set maxval 122 + } elseif {$type eq {compr}} { + set minval 48 + set maxval 52 + } + while {$len} { + append output [format "%c" [expr {$minval+int(rand()*($maxval-$minval+1))}]] + incr len -1 + } + return $output +} + +# Useful for some test +proc zlistAlikeSort {a b} { + if {[lindex $a 0] > [lindex $b 0]} {return 1} + if {[lindex $a 0] < [lindex $b 0]} {return -1} + string compare [lindex $a 1] [lindex $b 1] +} + +# Return all log lines starting with the first line that contains a warning. +# Generally, this will be an assertion error with a stack trace. +proc warnings_from_file {filename} { + set lines [split [exec cat $filename] "\n"] + set matched 0 + set logall 0 + set result {} + foreach line $lines { + if {[string match {*REDIS BUG REPORT START*} $line]} { + set logall 1 + } + if {[regexp {^\[\d+\]\s+\d+\s+\w+\s+\d{2}:\d{2}:\d{2} \#} $line]} { + set matched 1 + } + if {$logall || $matched} { + lappend result $line + } + } + join $result "\n" +} + +# Return value for INFO property +proc status {r property} { + if {[regexp "\r\n$property:(.*?)\r\n" [{*}$r info] _ value]} { + set _ $value + } +} + +proc waitForBgsave r { + while 1 { + if {[status r rdb_bgsave_in_progress] eq 1} { + if {$::verbose} { + puts -nonewline "\nWaiting for background save to finish... " + flush stdout + } + after 1000 + } else { + break + } + } +} + +proc waitForBgrewriteaof r { + while 1 { + if {[status r aof_rewrite_in_progress] eq 1} { + if {$::verbose} { + puts -nonewline "\nWaiting for background AOF rewrite to finish... " + flush stdout + } + after 1000 + } else { + break + } + } +} + +proc wait_for_sync r { + while 1 { + if {[status $r master_link_status] eq "down"} { + after 10 + } else { + break + } + } +} + +# Random integer between 0 and max (excluded). +proc randomInt {max} { + expr {int(rand()*$max)} +} + +# Random signed integer between -max and max (both extremes excluded). +proc randomSignedInt {max} { + set i [randomInt $max] + if {rand() > 0.5} { + set i -$i + } + return $i +} + +proc randpath args { + set path [expr {int(rand()*[llength $args])}] + uplevel 1 [lindex $args $path] +} + +proc randomValue {} { + randpath { + # Small enough to likely collide + randomSignedInt 1000 + } { + # 32 bit compressible signed/unsigned + randpath {randomSignedInt 2000000000} {randomSignedInt 4000000000} + } { + # 64 bit + randpath {randomSignedInt 1000000000000} + } { + # Random string + randpath {randstring 0 256 alpha} \ + {randstring 0 256 compr} \ + {randstring 0 256 binary} + } +} + +proc randomKey {} { + randpath { + # Small enough to likely collide + randomInt 1000 + } { + # 32 bit compressible signed/unsigned + randpath {randomInt 2000000000} {randomInt 4000000000} + } { + # 64 bit + randpath {randomInt 1000000000000} + } { + # Random string + randpath {randstring 1 256 alpha} \ + {randstring 1 256 compr} + } +} + +proc findKeyWithType {r type} { + for {set j 0} {$j < 20} {incr j} { + set k [{*}$r randomkey] + if {$k eq {}} { + return {} + } + if {[{*}$r type $k] eq $type} { + return $k + } + } + return {} +} + +proc createComplexDataset {r ops {opt {}}} { + for {set j 0} {$j < $ops} {incr j} { + set k [randomKey] + set k2 [randomKey] + set f [randomValue] + set v [randomValue] + + if {[lsearch -exact $opt useexpire] != -1} { + if {rand() < 0.1} { + {*}$r expire [randomKey] [randomInt 2] + } + } + + randpath { + set d [expr {rand()}] + } { + set d [expr {rand()}] + } { + set d [expr {rand()}] + } { + set d [expr {rand()}] + } { + set d [expr {rand()}] + } { + randpath {set d +inf} {set d -inf} + } + set t [{*}$r type $k] + + if {$t eq {none}} { + randpath { + {*}$r set $k $v + } { + {*}$r lpush $k $v + } { + {*}$r sadd $k $v + } { + {*}$r zadd $k $d $v + } { + {*}$r hset $k $f $v + } { + {*}$r del $k + } + set t [{*}$r type $k] + } + + switch $t { + {string} { + # Nothing to do + } + {list} { + randpath {{*}$r lpush $k $v} \ + {{*}$r rpush $k $v} \ + {{*}$r lrem $k 0 $v} \ + {{*}$r rpop $k} \ + {{*}$r lpop $k} + } + {set} { + randpath {{*}$r sadd $k $v} \ + {{*}$r srem $k $v} \ + { + set otherset [findKeyWithType {*}$r set] + if {$otherset ne {}} { + randpath { + {*}$r sunionstore $k2 $k $otherset + } { + {*}$r sinterstore $k2 $k $otherset + } { + {*}$r sdiffstore $k2 $k $otherset + } + } + } + } + {zset} { + randpath {{*}$r zadd $k $d $v} \ + {{*}$r zrem $k $v} \ + { + set otherzset [findKeyWithType {*}$r zset] + if {$otherzset ne {}} { + randpath { + {*}$r zunionstore $k2 2 $k $otherzset + } { + {*}$r zinterstore $k2 2 $k $otherzset + } + } + } + } + {hash} { + randpath {{*}$r hset $k $f $v} \ + {{*}$r hdel $k $f} + } + } + } +} + +proc formatCommand {args} { + set cmd "*[llength $args]\r\n" + foreach a $args { + append cmd "$[string length $a]\r\n$a\r\n" + } + set _ $cmd +} + +proc csvdump r { + set o {} + foreach k [lsort [{*}$r keys *]] { + set type [{*}$r type $k] + append o [csvstring $k] , [csvstring $type] , + switch $type { + string { + append o [csvstring [{*}$r get $k]] "\n" + } + list { + foreach e [{*}$r lrange $k 0 -1] { + append o [csvstring $e] , + } + append o "\n" + } + set { + foreach e [lsort [{*}$r smembers $k]] { + append o [csvstring $e] , + } + append o "\n" + } + zset { + foreach e [{*}$r zrange $k 0 -1 withscores] { + append o [csvstring $e] , + } + append o "\n" + } + hash { + set fields [{*}$r hgetall $k] + set newfields {} + foreach {k v} $fields { + lappend newfields [list $k $v] + } + set fields [lsort -index 0 $newfields] + foreach kv $fields { + append o [csvstring [lindex $kv 0]] , + append o [csvstring [lindex $kv 1]] , + } + append o "\n" + } + } + } + return $o +} + +proc csvstring s { + return "\"$s\"" +} + +proc roundFloat f { + format "%.10g" $f +} + +proc find_available_port start { + for {set j $start} {$j < $start+1024} {incr j} { + if {[catch { + set fd [socket 127.0.0.1 $j] + }]} { + return $j + } else { + close $fd + } + } + if {$j == $start+1024} { + error "Can't find a non busy port in the $start-[expr {$start+1023}] range." + } +} + +# Test if TERM looks like to support colors +proc color_term {} { + expr {[info exists ::env(TERM)] && [string match *xterm* $::env(TERM)]} +} + +proc colorstr {color str} { + if {[color_term]} { + set b 0 + if {[string range $color 0 4] eq {bold-}} { + set b 1 + set color [string range $color 5 end] + } + switch $color { + red {set colorcode {31}} + green {set colorcode {32}} + yellow {set colorcode {33}} + blue {set colorcode {34}} + magenta {set colorcode {35}} + cyan {set colorcode {36}} + white {set colorcode {37}} + default {set colorcode {37}} + } + if {$colorcode ne {}} { + return "\033\[$b;${colorcode};49m$str\033\[0m" + } + } else { + return $str + } +} + +# Execute a background process writing random data for the specified number +# of seconds to the specified Redis instance. +proc start_write_load {host port seconds} { + set tclsh [info nameofexecutable] + exec $tclsh tests/helpers/gen_write_load.tcl $host $port $seconds & +} + +# Stop a process generating write load executed with start_write_load. +proc stop_write_load {handle} { + catch {exec /bin/kill -9 $handle} +} + +# Mock debug populate +proc populate {size} { + for {set counter 0} {$counter < $size} {incr counter} { + r set "key:$counter" "key:$counter" + } +} + +proc wait_for_blocked_client {{idx 0}} { + wait_for_condition 50 100 { + [s $idx blocked_clients] ne 0 + } else { + fail "no blocked clients" + } +} + +# Shuffle a list with Fisher-Yates algorithm. +proc lshuffle {list} { + set n [llength $list] + while {$n>1} { + set j [expr {int(rand()*$n)}] + incr n -1 + if {$n==$j} continue + set v [lindex $list $j] + lset list $j [lindex $list $n] + lset list $n $v + } + return $list +} \ No newline at end of file diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl new file mode 100644 index 000000000..0665583b0 --- /dev/null +++ b/tests/test_helper.tcl @@ -0,0 +1,554 @@ +# Redis test suite. Copyright (C) 2009 Salvatore Sanfilippo antirez@gmail.com +# This software is released under the BSD License. See the COPYING file for +# more information. + +package require Tcl 8.5 + +set tcl_precision 17 +source tests/support/redis.tcl +source tests/support/server.tcl +source tests/support/tmpfile.tcl +source tests/support/test.tcl +source tests/support/util.tcl + +set ::all_tests { + # unit/printver + # unit/basic + # unit/scan + # unit/multi + # unit/quit + # unit/type/list + # unit/pubsub + # unit/slowlog + # unit/maxmemory + # unit/bitops + # unit/hyperloglog + # unit/type + # unit/acl + # unit/type/list-2 + # unit/type/list-3 + # unit/type/set + # unit/type/zset + # unit/type/string + unit/type/hash + # unit/expire + # unit/protocol + # unit/other + # unit/auth + # unit/sort + # unit/aofrw + # unit/scripting + # unit/introspection + # unit/limits + # unit/obuf-limits + # unit/dump + # unit/memefficiency + # unit/command + # unit/tcl/replication + # unit/tcl/replication-2 + # unit/tcl/replication-3 + # unit/tcl/replication-4 + # unit/tcl/replication-psync + # unit/tcl/aof + # unit/tcl/rdb + # unit/tcl/convert-zipmap-hash-on-load +} + +# because the comment not works in tcl list, use regsub to ignore the item starting with '#' +regsub -all {#.*?\n} $::all_tests {} ::all_tests + + +# Index to the next test to run in the ::all_tests list. +set ::next_test 0 + +set ::host 127.0.0.1 +set ::port 21111 +set ::traceleaks 0 +set ::valgrind 0 +set ::verbose 0 +set ::quiet 0 +set ::denytags {} +set ::allowtags {} +set ::external 0; # If "1" this means, we are running against external instance +set ::file ""; # If set, runs only the tests in this comma separated list +set ::curfile ""; # Hold the filename of the current suite +set ::accurate 0; # If true runs fuzz tests with more iterations +set ::force_failure 0 +set ::timeout 600; # 10 minutes without progresses will quit the test. +set ::last_progress [clock seconds] +set ::active_servers {} ; # Pids of active Redis instances. + +# Set to 1 when we are running in client mode. The Redis test uses a +# server-client model to run tests simultaneously. The server instance +# runs the specified number of client instances that will actually run tests. +# The server is responsible of showing the result to the user, and exit with +# the appropriate exit code depending on the test outcome. +set ::client 0 +set ::numclients 16 + +proc execute_tests name { + set path "tests/$name.tcl" + set ::curfile $path + source $path + send_data_packet $::test_server_fd done "$name" +} + +# Setup a list to hold a stack of server configs. When calls to start_server +# are nested, use "srv 0 pid" to get the pid of the inner server. To access +# outer servers, use "srv -1 pid" etcetera. +set ::servers {} +proc srv {args} { + set level 0 + if {[string is integer [lindex $args 0]]} { + set level [lindex $args 0] + set property [lindex $args 1] + } else { + set property [lindex $args 0] + } + set srv [lindex $::servers end+$level] + dict get $srv $property +} + +# Provide easy access to the client for the inner server. It's possible to +# prepend the argument list with a negative level to access clients for +# servers running in outer blocks. +proc r {args} { + set level 0 + if {[string is integer [lindex $args 0]]} { + set level [lindex $args 0] + set args [lrange $args 1 end] + } + [srv $level "client"] {*}$args +} + +proc reconnect {args} { + set level [lindex $args 0] + if {[string length $level] == 0 || ![string is integer $level]} { + set level 0 + } + + set srv [lindex $::servers end+$level] + set host [dict get $srv "host"] + set port [dict get $srv "port"] + set config [dict get $srv "config"] + set client [redis $host $port] + dict set srv "client" $client + + # select the right db when we don't have to authenticate + if {![dict exists $config "requirepass"]} { + # $client select 9 + } + + # re-set $srv in the servers list + lset ::servers end+$level $srv +} + +proc redis_deferring_client {args} { + set level 0 + if {[llength $args] > 0 && [string is integer [lindex $args 0]]} { + set level [lindex $args 0] + set args [lrange $args 1 end] + } + + # create client that defers reading reply + set client [redis [srv $level "host"] [srv $level "port"] 1] + + # select the right db and read the response (OK) + $client select 0 + $client read + return $client +} + +# Provide easy access to INFO properties. Same semantic as "proc r". +proc s {args} { + set level 0 + if {[string is integer [lindex $args 0]]} { + set level [lindex $args 0] + set args [lrange $args 1 end] + } + status [srv $level "client"] [lindex $args 0] +} + +proc cleanup {} { + if {!$::quiet} {puts -nonewline "Cleanup: may take some time... "} + flush stdout + catch {exec rm -rf {*}[glob tests/tmp/redis.conf.*]} + catch {exec rm -rf {*}[glob tests/tmp/server.*]} + if {!$::quiet} {puts "OK"} +} + +proc test_server_main {} { + cleanup + set tclsh [info nameofexecutable] + # Open a listening socket, trying different ports in order to find a + # non busy one. + set port [find_available_port 11111] + if {!$::quiet} { + puts "Starting test server at port $port" + } + socket -server accept_test_clients -myaddr 127.0.0.1 $port + + # Start the client instances + set ::clients_pids {} + set start_port [expr {$::port+100}] + for {set j 0} {$j < $::numclients} {incr j} { + set start_port [find_available_port $start_port] + set p [exec $tclsh [info script] {*}$::argv \ + --client $port --port $start_port &] + lappend ::clients_pids $p + incr start_port 10 + } + + # Setup global state for the test server + set ::idle_clients {} + set ::active_clients {} + array set ::active_clients_task {} + array set ::clients_start_time {} + set ::clients_time_history {} + set ::failed_tests {} + + # Enter the event loop to handle clients I/O + after 100 test_server_cron + vwait forever +} + +# This function gets called 10 times per second. +proc test_server_cron {} { + set elapsed [expr {[clock seconds]-$::last_progress}] + + if {$elapsed > $::timeout} { + set err "\[[colorstr red TIMEOUT]\]: clients state report follows." + puts $err + show_clients_state + kill_clients + force_kill_all_servers + the_end + } + + after 100 test_server_cron +} + +proc accept_test_clients {fd addr port} { + fconfigure $fd -encoding binary + fileevent $fd readable [list read_from_test_client $fd] +} + +# This is the readable handler of our test server. Clients send us messages +# in the form of a status code such and additional data. Supported +# status types are: +# +# ready: the client is ready to execute the command. Only sent at client +# startup. The server will queue the client FD in the list of idle +# clients. +# testing: just used to signal that a given test started. +# ok: a test was executed with success. +# err: a test was executed with an error. +# exception: there was a runtime exception while executing the test. +# done: all the specified test file was processed, this test client is +# ready to accept a new task. +proc read_from_test_client fd { + set bytes [gets $fd] + set payload [read $fd $bytes] + foreach {status data} $payload break + set ::last_progress [clock seconds] + + if {$status eq {ready}} { + if {!$::quiet} { + puts "\[$status\]: $data" + } + signal_idle_client $fd + } elseif {$status eq {done}} { + set elapsed [expr {[clock seconds]-$::clients_start_time($fd)}] + set all_tests_count [llength $::all_tests] + set running_tests_count [expr {[llength $::active_clients]-1}] + set completed_tests_count [expr {$::next_test-$running_tests_count}] + puts "\[$completed_tests_count/$all_tests_count [colorstr yellow $status]\]: $data ($elapsed seconds)" + lappend ::clients_time_history $elapsed $data + signal_idle_client $fd + set ::active_clients_task($fd) DONE + } elseif {$status eq {ok}} { + if {!$::quiet} { + puts "\[[colorstr green $status]\]: $data" + } + set ::active_clients_task($fd) "(OK) $data" + } elseif {$status eq {err}} { + set err "\[[colorstr red $status]\]: $data" + puts $err + lappend ::failed_tests $err + set ::active_clients_task($fd) "(ERR) $data" + } elseif {$status eq {exception}} { + puts "\[[colorstr red $status]\]: $data" + kill_clients + force_kill_all_servers + exit 1 + } elseif {$status eq {testing}} { + set ::active_clients_task($fd) "(IN PROGRESS) $data" + } elseif {$status eq {server-spawned}} { + lappend ::active_servers $data + } elseif {$status eq {server-killed}} { + set ::active_servers [lsearch -all -inline -not -exact $::active_servers $data] + } else { + if {!$::quiet} { + puts "\[$status\]: $data" + } + } +} + +proc show_clients_state {} { + # The following loop is only useful for debugging tests that may + # enter an infinite loop. Commented out normally. + foreach x $::active_clients { + if {[info exist ::active_clients_task($x)]} { + puts "$x => $::active_clients_task($x)" + } else { + puts "$x => ???" + } + } +} + +proc kill_clients {} { + foreach p $::clients_pids { + catch {exec kill $p} + } +} + +proc force_kill_all_servers {} { + foreach p $::active_servers { + puts "Killing still running Redis server $p" + catch {exec kill -9 $p} + } +} + +# A new client is idle. Remove it from the list of active clients and +# if there are still test units to run, launch them. +proc signal_idle_client fd { + # Remove this fd from the list of active clients. + set ::active_clients \ + [lsearch -all -inline -not -exact $::active_clients $fd] + + if 0 {show_clients_state} + + # New unit to process? + if {$::next_test != [llength $::all_tests]} { + if {!$::quiet} { + puts [colorstr bold-white "Testing [lindex $::all_tests $::next_test]"] + set ::active_clients_task($fd) "ASSIGNED: $fd ([lindex $::all_tests $::next_test])" + } + set ::clients_start_time($fd) [clock seconds] + send_data_packet $fd run [lindex $::all_tests $::next_test] + lappend ::active_clients $fd + incr ::next_test + } else { + lappend ::idle_clients $fd + if {[llength $::active_clients] == 0} { + the_end + } + } +} + +# The the_end function gets called when all the test units were already +# executed, so the test finished. +proc the_end {} { + # TODO: print the status, exit with the rigth exit code. + puts "\n The End\n" + puts "Execution time of different units:" + foreach {time name} $::clients_time_history { + puts " $time seconds - $name" + } + if {[llength $::failed_tests]} { + puts "\n[colorstr bold-red {!!! WARNING}] The following tests failed:\n" + foreach failed $::failed_tests { + puts "*** $failed" + } + cleanup + exit 1 + } else { + puts "\n[colorstr bold-white {\o/}] [colorstr bold-green {All tests passed without errors!}]\n" + cleanup + exit 0 + } +} + +# The client is not even driven (the test server is instead) as we just need +# to read the command, execute, reply... all this in a loop. +proc test_client_main server_port { + set ::test_server_fd [socket localhost $server_port] + fconfigure $::test_server_fd -encoding binary + send_data_packet $::test_server_fd ready [pid] + while 1 { + set bytes [gets $::test_server_fd] + set payload [read $::test_server_fd $bytes] + foreach {cmd data} $payload break + if {$cmd eq {run}} { + execute_tests $data + } else { + error "Unknown test client command: $cmd" + } + } +} + +proc send_data_packet {fd status data} { + set payload [list $status $data] + puts $fd [string length $payload] + puts -nonewline $fd $payload + flush $fd +} + +proc print_help_screen {} { + puts [join { + "--valgrind Run the test over valgrind." + "--accurate Run slow randomized tests for more iterations." + "--quiet Don't show individual tests." + "--single Just execute the specified unit (see next option)." + "--list-tests List all the available test units." + "--clients Number of test clients (default 16)." + "--timeout Test timeout in seconds (default 10 min)." + "--force-failure Force the execution of a test that always fails." + "--help Print this help screen." + } "\n"] +} + +# parse arguments +for {set j 0} {$j < [llength $argv]} {incr j} { + set opt [lindex $argv $j] + set arg [lindex $argv [expr $j+1]] + if {$opt eq {--tags}} { + foreach tag $arg { + if {[string index $tag 0] eq "-"} { + lappend ::denytags [string range $tag 1 end] + } else { + lappend ::allowtags $tag + } + } + incr j + } elseif {$opt eq {--valgrind}} { + set ::valgrind 1 + } elseif {$opt eq {--quiet}} { + set ::quiet 1 + } elseif {$opt eq {--host}} { + set ::external 1 + set ::host $arg + incr j + } elseif {$opt eq {--port}} { + set ::port $arg + incr j + } elseif {$opt eq {--accurate}} { + set ::accurate 1 + } elseif {$opt eq {--force-failure}} { + set ::force_failure 1 + } elseif {$opt eq {--single}} { + set ::all_tests $arg + incr j + } elseif {$opt eq {--list-tests}} { + foreach t $::all_tests { + puts $t + } + exit 0 + } elseif {$opt eq {--client}} { + set ::client 1 + set ::test_server_port $arg + incr j + } elseif {$opt eq {--clients}} { + set ::numclients $arg + incr j + } elseif {$opt eq {--timeout}} { + set ::timeout $arg + incr j + } elseif {$opt eq {--help}} { + print_help_screen + exit 0 + } else { + puts "Wrong argument: $opt" + exit 1 + } +} + +proc attach_to_replication_stream {} { + set s [socket [srv 0 "host"] [srv 0 "port"]] + fconfigure $s -translation binary + puts -nonewline $s "SYNC\r\n" + flush $s + + # Get the count + set count [gets $s] + set prefix [string range $count 0 0] + if {$prefix ne {$}} { + error "attach_to_replication_stream error. Received '$count' as count." + } + set count [string range $count 1 end] + + # Consume the bulk payload + while {$count} { + set buf [read $s $count] + set count [expr {$count-[string length $buf]}] + } + return $s +} + +proc read_from_replication_stream {s} { + fconfigure $s -blocking 0 + set attempt 0 + while {[gets $s count] == -1} { + if {[incr attempt] == 10} return "" + after 100 + } + fconfigure $s -blocking 1 + set count [string range $count 1 end] + + # Return a list of arguments for the command. + set res {} + for {set j 0} {$j < $count} {incr j} { + read $s 1 + set arg [::redis::redis_bulk_read $s] + if {$j == 0} {set arg [string tolower $arg]} + lappend res $arg + } + return $res +} + +proc assert_replication_stream {s patterns} { + for {set j 0} {$j < [llength $patterns]} {incr j} { + assert_match [lindex $patterns $j] [read_from_replication_stream $s] + } +} + +proc close_replication_stream {s} { + close $s +} + +# With the parallel test running multiple Redis instances at the same time +# we need a fast enough computer, otherwise a lot of tests may generate +# false positives. +# If the computer is too slow we revert the sequential test without any +# parallelism, that is, clients == 1. +proc is_a_slow_computer {} { + set start [clock milliseconds] + for {set j 0} {$j < 1000000} {incr j} {} + set elapsed [expr [clock milliseconds]-$start] + expr {$elapsed > 200} +} + +if {$::client} { + if {[catch { test_client_main $::test_server_port } err]} { + set estr "Executing test client: $err.\n$::errorInfo" + if {[catch {send_data_packet $::test_server_fd exception $estr}]} { + puts $estr + } + exit 1 + } +} else { + if {[is_a_slow_computer]} { + puts "** SLOW COMPUTER ** Using a single client to avoid false positives." + set ::numclients 1 + } + + if {[catch { test_server_main } err]} { + if {[string length $err] > 0} { + # only display error when not generated by the test suite + if {$err ne "exception"} { + puts $::errorInfo + } + exit 1 + } + } +} diff --git a/tests/unit/Blpop_Brpop_test.py b/tests/unit/Blpop_Brpop_test.py new file mode 100644 index 000000000..664673276 --- /dev/null +++ b/tests/unit/Blpop_Brpop_test.py @@ -0,0 +1,896 @@ +import threading +import time +import redis +import random +import string + + + + +# 单个list不阻塞时的出列顺序测试(行为应当和lpop/rpop一样) +def test_single_existing_list(db_): + print("start test_single_existing_list, db:db%d" % (db_)) + # 创建Redis客户端 + pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + + # 清空测试环境 + pika.delete('blist') + + # 向列表a中插入元素 + pika.lpush('blist', 'a', 'b', 'large', 'c', 'd') + # 此时blist1顺序为: d c large b a + + result = pika.blpop('blist', timeout=0) + assert result[0] == b'blist' and result[1] == b'd', f"Expected (b'blist1', b'd'), but got {result}" + result = pika.brpop('blist', timeout=0) + assert result[0] == b'blist' and result[1] == b'a', f"Expected (b'blist1', b'a'), but got {result}" + + result = pika.blpop("blist", timeout=0) + assert result[0] == b'blist' and result[1] == b'c', f"Expected (b'blist1', b'c'), but got {result}" + result = pika.brpop('blist', timeout=0) + assert result[0] == b'blist' and result[1] == b'b', f"Expected (b'blist1', b'b'), but got {result}" + + pika.close() + print("test_single_existing_list Passed [Passed], db:db%d" % (db_)) + + +# 解阻塞测试(超时自动解阻塞,lpush解阻塞,rpush解阻塞,rpoplpush解阻塞) +def test_blpop_brpop_unblock_lrpush_rpoplpush(db_): + print("start test_blpop_brpop_unblock_lrpush_rpoplpush, db:db%d" % (db_)) + pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + + # 超时自动解阻塞测试(blpop) + blocked = True + blocked_lock = threading.Lock() + pika.delete('blist') + + def blpop_thread1(timeout_): + nonlocal blocked + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.blpop('blist', timeout=timeout_) + with blocked_lock: + blocked = False + client.close() + + thread = threading.Thread(target=blpop_thread1, args=(1,)) + thread.start() + time.sleep(2) + with blocked_lock: + assert blocked == False, f"Expected False but got {blocked}" + thread.join() + + # 超时自动解阻塞测试(brpop) + blocked = True + blocked_lock = threading.Lock() + pika.delete('blist') + + def brpop_thread2(timeout_): + nonlocal blocked + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.brpop('blist', timeout=timeout_) + with blocked_lock: + blocked = False + client.close() + + thread = threading.Thread(target=brpop_thread2, args=(2,)) + thread.start() + time.sleep(3) + with blocked_lock: + assert blocked == False, f"Expected False but got {blocked}" + thread.join() + + # lpush解brpop阻塞 + blocked = True + blocked_lock = threading.Lock() + pika.delete('blist') + + def brpop_thread3(): + nonlocal blocked + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.brpop('blist', timeout=0) + with blocked_lock: + blocked = False + client.close() + + thread = threading.Thread(target=brpop_thread3) + thread.start() + time.sleep(1) + pika.lpush('blist', 'foo') + time.sleep(1) + with blocked_lock: + assert blocked == False, f"Expected False but got {blocked}" + thread.join() + + # lpush解blpop阻塞 + blocked = True + blocked_lock = threading.Lock() + pika.delete('blist') + + def blpop_thread31(): + nonlocal blocked + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.blpop('blist', timeout=0) + with blocked_lock: + blocked = False + client.close() + + thread = threading.Thread(target=blpop_thread31) + thread.start() + time.sleep(1) + pika.lpush('blist', 'foo') + time.sleep(1) + with blocked_lock: + assert blocked == False, f"Expected False but got {blocked}" + thread.join() + + # rpush解blpop阻塞 + blocked = True + blocked_lock = threading.Lock() + pika.delete('blist') + + def blpop_thread4(): + nonlocal blocked + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.blpop('blist', timeout=0) + with blocked_lock: + blocked = False + client.close() + + thread = threading.Thread(target=blpop_thread4) + thread.start() + time.sleep(1) + pika.rpush('blist', 'foo') + time.sleep(1) + with blocked_lock: + assert blocked == False, f"Expected False but got {blocked}" + thread.join() + + # rpush解brpop阻塞 + blocked = True + blocked_lock = threading.Lock() + pika.delete('blist') + + def brpop_thread41(): + nonlocal blocked + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.brpop('blist', timeout=0) + with blocked_lock: + blocked = False + client.close() + + thread = threading.Thread(target=brpop_thread41) + thread.start() + time.sleep(1) + pika.rpush('blist', 'foo') + time.sleep(1) + with blocked_lock: + assert blocked == False, f"Expected False but got {blocked}" + thread.join() + + # rpoplpush解blpop阻塞 + blocked = True + blocked_lock = threading.Lock() + pika.delete('blist') + pika.lpush('blist0', 'v1') + + def blpop_thread5(): + nonlocal blocked + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.blpop('blist', timeout=0) + with blocked_lock: + blocked = False + client.close() + + thread = threading.Thread(target=blpop_thread5) + thread.start() + time.sleep(1) + pika.rpoplpush('blist0', 'blist') + time.sleep(1) + with blocked_lock: + assert blocked == False, f"Expected False but got {blocked}" + thread.join() + + # rpoplpush解brpop阻塞 + blocked = True + blocked_lock = threading.Lock() + pika.delete('blist') + pika.lpush('blist0', 'v1') + + def brpop_thread51(): + nonlocal blocked + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.brpop('blist', timeout=0) + with blocked_lock: + blocked = False + client.close() + + thread = threading.Thread(target=brpop_thread51) + thread.start() + time.sleep(1) + pika.rpoplpush('blist0', 'blist') + time.sleep(1) + with blocked_lock: + assert blocked == False, f"Expected False but got {blocked}" + thread.join() + pika.close() + print("test_blpop_brpop_unblock_lrpush_rpoplpush Passed [Passed], db:db%d" % (db_)) + + +def test_concurrency_block_unblock(db_): + print("start test_concurrency_block_unblock, it will cost some time, pls wait, db:db%d" % (db_)) + pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + pika.delete('blist0', 'blist1', 'blist2', 'blist3') + + def blpop_thread(list, timeout_): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.blpop(list, timeout=timeout_) + client.close() + + def brpop_thread(list, timeout_): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.brpop(list, timeout=timeout_) + client.close() + + def lpush_thread(list_, value_): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + client.lpush(list_, value_) + client.close() + + def rpush_thread(list_, value_): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + client.rpush(list_, value_) + client.close() + + pika.delete('blist0', 'blist1', 'blist2', 'blist3') + pika.delete('blist100', 'blist101', 'blist102', 'blist103') + + lists = ['blist0', 'blist1', 'blist2', 'blist3'] + # 先增加一些阻塞连接作为干扰 + t_threads = [] + for i in range(0, 25): + t1 = threading.Thread(target=blpop_thread, args=(['blist100', 'blist101', 'blist102', 'blist103'], 30)) + t2 = threading.Thread(target=brpop_thread, args=(['blist100', 'blist101', 'blist102', 'blist103'], 30)) + t1.start() + t2.start() + t_threads.append(t1) + t_threads.append(t2) + + # 并发超时测试 + threads = [] + # 添加100个线程执行blpop/brpop,同时被阻塞,并且应当2s后超时自动解阻塞 + for i in range(0, 50): + t1 = threading.Thread(target=blpop_thread, args=(lists, 2)) + t2 = threading.Thread(target=brpop_thread, args=(lists, 2)) + t1.start() + t2.start() + threads.append(t1) + threads.append(t2) + # 线程结束需要一些时间 + time.sleep(6) + for t in threads: + if t.is_alive(): + assert False, "Error: this thread is still running, means conn didn't got unblocked in time" + else: + pass + # print("conn unblocked, OK") + + # 并发push解阻塞测试 + threads = [] + # 添加100个线程执行blpop/brpop,同时被阻塞 + for i in range(0, 50): + t1 = threading.Thread(target=blpop_thread, args=(lists, 0)) + t2 = threading.Thread(target=brpop_thread, args=(lists, 0)) + t1.start() + t2.start() + threads.append(t1) + threads.append(t2) + # 确保线程都执行了blpop/brpop + time.sleep(5) + + # push 200条数据,确保能解除前面100个conn的阻塞 + for i in range(0, 50): + t1 = threading.Thread(target=lpush_thread, args=('blist2', 'v')) + t2 = threading.Thread(target=rpush_thread, args=('blist0', 'v')) + t3 = threading.Thread(target=lpush_thread, args=('blist1', 'v')) + t4 = threading.Thread(target=rpush_thread, args=('blist3', 'v')) + t1.start() + t2.start() + t3.start() + t4.start() + # 100个线程结束需要时间 + time.sleep(5) + for t in threads: + if t.is_alive(): + assert False, "Error: this thread is still running, means conn didn't got unblocked in time" + else: + pass + # print("conn unblocked, OK") + + pika.delete('blist0', 'blist1', 'blist2', 'blist3') + + # 混合并发(一半自动解阻塞,一半push解阻塞) + threads = [] + # 添加100个线程执行blpop/brpop,同时被阻塞 + for i in range(0, 25): + t1 = threading.Thread(target=blpop_thread, args=(['blist0', 'blist1'], 3)) + t2 = threading.Thread(target=brpop_thread, args=(['blist0', 'blist1'], 3)) + t3 = threading.Thread(target=blpop_thread, args=(['blist2', 'blist3'], 0)) + t4 = threading.Thread(target=brpop_thread, args=(['blist2', 'blist3'], 0)) + t1.start() + t2.start() + t3.start() + t4.start() + threads.append(t1) + threads.append(t2) + + # 确保blpop/brpop都执行完了,并且其中50个conn马上要开始超时解除阻塞 + time.sleep(3) + + # 并发push 100条数据,确保能解除前面50个conn的阻塞 + for i in range(0, 25): + t1 = threading.Thread(target=lpush_thread, args=('blist2', 'v')) + t2 = threading.Thread(target=rpush_thread, args=('blist3', 'v')) + t3 = threading.Thread(target=lpush_thread, args=('blist2', 'v')) + t4 = threading.Thread(target=rpush_thread, args=('blist3', 'v')) + t1.start() + t2.start() + t3.start() + t4.start() + + # 100个线程结束需要时间 + time.sleep(5) + for t in threads: + if t.is_alive(): + assert False, "Error: this thread is still running, means conn didn't got unblocked in time" + else: + pass + # print("conn unblocked, OK") + + for t in t_threads: + t.join() + pika.delete('blist0', 'blist1', 'blist2', 'blist3') + + print("test_concurrency_block_unblock Passed [Passed], db:db%d" % (db_)) + pika.close() + + +# blpop/brpop多个list不阻塞时,从左到右选择第一个有元素的list进行pop +def test_multiple_existing_lists(db_): + print("start test_multiple_existing_lists, db:db%d" % (db_)) + # 创建Redis客户端 + pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + + # 清空测试环境 + pika.delete('blist1', 'large', 'large', 'blist2') + + # 向blist1和blist2列表中插入元素 + pika.rpush('blist1', 'a', "large", 'c') + pika.rpush('blist2', 'd', "large", 'f') + + result = pika.blpop(['blist1', 'blist2'], timeout=1) + assert result[0] == b'blist1' and result[1] == b'a', f"Expected (b'blist1', b'a'), but got {result}" + result = pika.brpop(['blist1', 'blist2'], timeout=1) + assert result[0] == b'blist1' and result[1] == b'c', f"Expected (b'blist1', b'c'), but got {result}" + + result = pika.llen('blist1') + assert result == 1, f"Expected 1, but got {result}" + result = pika.llen('blist2') + assert result == 3, f"Expected 3, but got {result}" + + result = pika.blpop(['blist2', 'blist1'], timeout=1) + assert result[0] == b'blist2' and result[1] == b'd', f"Expected (b'blist2', b'd'), but got {result}" + result = pika.brpop(['blist2', 'blist1'], timeout=1) + assert result[0] == b'blist2' and result[1] == b'f', f"Expected (b'blist2', b'f'), but got {result}" + + result = pika.llen('blist1') + assert result == 1, f"Expected 1, but got {result}" + result = pika.llen('blist2') + assert result == 1, f"Expected 1, but got {result}" + + pika.delete("blist3") + # blist3没有元素,应该从blist1/blist2中弹出元素 + result = pika.blpop(['blist3', 'blist2'], timeout=0) + assert result[0] == b'blist2' and result[1] == b'large', f"Expected (b'blist2', b'large'), but got {result}" + + result = pika.brpop(['blist3', 'blist1'], timeout=0) + assert result[0] == b'blist1' and result[1] == b'large', f"Expected (b'blist1', b'large'), but got {result}" + + pika.close() + print("test_multiple_existing_lists Passed [Passed], db:db%d" % (db_)) + + +def test_blpop_brpop_same_key_multiple_times(db_): + print("start test_blpop_brpop_same_key_multiple_times, db:db%d" % (db_)) + # 创建Redis客户端 + pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + + # 清空测试环境 + pika.delete('list1', 'list2') + + def blpop_thread1(): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.blpop(['list1', 'list2', 'list2', 'list1'], timeout=0) + assert result[0] == b'list1' and result[1] == b'a', f"Expected (b'list1', b'a'), but got {result}" + client.close() + + thread = threading.Thread(target=blpop_thread1) + thread.start() + # 确保BLPOP已经执行 + time.sleep(0.5) + # 向list1插入元素 + pika.lpush('list1', 'a') + # 等待线程结束 + thread.join() + + def blpop_thread2(): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.blpop(['list1', 'list2', 'list2', 'list1'], timeout=0) + assert result[0] == b'list2' and result[1] == b'b', f"Expected (b'list2', b'b'), but got {result}" + client.close() + + thread = threading.Thread(target=blpop_thread2) + thread.start() + # 确保BLPOP已经执行 + time.sleep(0.5) + # 向list2插入元素 + pika.lpush('list2', 'b') + # 等待线程结束 + thread.join() + + # 提前插入元素 + pika.lpush('list1', 'c') + pika.lpush('list2', 'd') + result = pika.blpop(['list1', 'list2', 'list2', 'list1'], timeout=0) + assert result[0] == b'list1' and result[1] == b'c', f"Expected (b'list1', b'c'), but got {result}" + result = pika.blpop(['list1', 'list2', 'list2', 'list1'], timeout=0) + assert result[0] == b'list2' and result[1] == b'd', f"Expected (b'list2', b'd'), but got {result}" + + # 下面是brpop + # 清空测试环境 + pika.delete('list1', 'list2') + + def brpop_thread1(): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.brpop(['list1', 'list2', 'list2', 'list1'], timeout=0) + assert result[0] == b'list1' and result[1] == b'a', f"Expected (b'list1', b'a'), but got {result}" + client.close() + + thread = threading.Thread(target=brpop_thread1) + thread.start() + # 确保BRPOP已经执行 + time.sleep(0.5) + # 向list1插入元素 + pika.rpush('list1', 'a') + # 等待线程结束 + thread.join() + + def brpop_thread2(): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.brpop(['list1', 'list2', 'list2', 'list1'], timeout=0) + assert result[0] == b'list2' and result[1] == b'b', f"Expected (b'list2', b'b'), but got {result}" + client.close() + + thread = threading.Thread(target=brpop_thread2) + thread.start() + # 确保BRPOP已经执行 + time.sleep(0.5) + # 向list2插入元素 + pika.rpush('list2', 'b') + # 等待线程结束 + thread.join() + + # 提前插入元素 + pika.rpush('list1', 'c') + pika.rpush('list2', 'd') + result = pika.brpop(['list1', 'list2', 'list2', 'list1'], timeout=0) + assert result[0] == b'list1' and result[1] == b'c', f"Expected (b'list1', b'c'), but got {result}" + result = pika.brpop(['list1', 'list2', 'list2', 'list1'], timeout=0) + assert result[0] == b'list2' and result[1] == b'd', f"Expected (b'list2', b'd'), but got {result}" + + pika.close() + print("test_blpop_brpop_same_key_multiple_times Passed [Passed], db:db%d" % (db_)) + + +# 目标list被一条push增加了多个value,先完成多个value的入列再pop +def test_blpop_brpop_variadic_lpush(db_): + print("start test_blpop_brpop_variadic_lpush, db:db%d" % (db_)) + + # 创建Redis客户端 + pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + + # 清空测试环境 + pika.delete('blist') + + def blpop_thread(): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.blpop('blist', timeout=0) + assert result[0] == b'blist' and result[1] == b'bar', f"Expected (b'blist', b'bar'), but got {result}" + client.close() + + # 启动一个线程,执行BLPOP操作 + thread = threading.Thread(target=blpop_thread) + thread.start() + time.sleep(0.5) + + # 使用LPUSH命令向blist插入多个元素 + pika.lpush('blist', 'foo', 'bar') + # lpush完毕后,blist内部顺序:bar foo + # 等待线程结束 + thread.join() + # 检查blist的第一个元素 + assert pika.lindex('blist', 0) == b'foo', "Expected 'foo'" + + # 下面是brpop的测试 + # 清空测试环境 + pika.delete('blist') + + def brpop_thread(): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.brpop('blist', timeout=0) + assert result[0] == b'blist' and result[1] == b'bar', f"Expected (b'blist', b'bar'), but got {result}" + client.close() + + # 启动一个线程,执行BLPOP操作 + thread = threading.Thread(target=brpop_thread) + thread.start() + time.sleep(0.5) + + # 使用LPUSH命令向blist插入多个元素 + pika.rpush('blist', 'foo', 'bar') + # rpush完毕后,blist内部顺序:foo bar + # 等待线程结束 + thread.join() + # 检查blist的第一个元素 + assert pika.lindex('blist', 0) == b'foo', "Expected 'foo'" + print("test_blpop_brpop_variadic_lpush Passed [Passed], db:db%d" % (db_)) + + +# 先被阻塞的先服务/阻塞最久的优先级最高 +def test_serve_priority(db_): + print("start test_serve_priority, db:db%d" % (db_)) + + pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + + pika.delete('blist') + + def blpop_thread(expect): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.blpop('blist', timeout=0) + assert result[0] == b'blist' and result[1] == expect, f"Expected (b'blist', {expect}), but got {result}" + + def brpop_thread(expect): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.brpop('blist', timeout=0) + assert result[0] == b'blist' and result[1] == expect, f"Expected (b'blist', {expect}), but got {result}" + + # blpop测试 + t1 = threading.Thread(target=blpop_thread, args=(b'v1',)) + t1.start() + time.sleep(0.5) + t2 = threading.Thread(target=blpop_thread, args=(b'v2',)) + t2.start() + time.sleep(0.5) + pika.rpush('blist', 'v1', 'v2') + t1.join() + t2.join() + + # brpop测试 + t3 = threading.Thread(target=brpop_thread, args=(b'v4',)) + t3.start() + time.sleep(0.5) + t4 = threading.Thread(target=brpop_thread, args=(b'v3',)) + t4.start() + time.sleep(0.5) + pika.rpush('blist', 'v3', 'v4') + + t3.join() + t4.join() + + pika.close() + print("test_serve_priority Passed [Passed], db:db%d" % (db_)) + + +# 主从复制测试 +def test_master_slave_replication(db_): + print("start test_master_slave_replication, it will cost some time, pls wait, db:db%d" % (db_)) + + master = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + slave = redis.Redis(host=pika_slave_ip, port=int(pika_slave_port), db=db_) + slave.slaveof(pika_instance_ip, pika_instance_port) + time.sleep(25) + master.delete('blist0', 'blist1', 'blist') + + time.sleep(3) + m_keys = master.keys() + s_keys = slave.keys() + assert s_keys == m_keys, f'Expected: s_keys == m_keys, but got {s_keys == m_keys}' + + # 非阻塞的主从复制测试 + def thread1(): + nonlocal master + for i in range(0, 25): + letters = string.ascii_letters + random_str1 = ''.join(random.choice(letters) for _ in range(5)) + random_str2 = ''.join(random.choice(letters) for _ in range(5)) + random_str3 = ''.join(random.choice(letters) for _ in range(5)) + master.lpush('blist0', random_str1) + master.rpoplpush('blist0', 'blist') + master.lpush('blist', random_str1, random_str2, random_str3) + master.lpush('blist0', random_str2) + master.rpoplpush('blist0', 'blist') + master.blpop('blist') + master.brpop('blist') + master.rpush('blist', random_str3, random_str2, random_str1) + master.blpop('blist') + master.brpop('blist') + + t1 = threading.Thread(target=thread1) + t2 = threading.Thread(target=thread1) + t3 = threading.Thread(target=thread1) + t4 = threading.Thread(target=thread1) + t5 = threading.Thread(target=thread1) + t6 = threading.Thread(target=thread1) + t1.start() + t2.start() + t3.start() + t4.start() + t5.start() + t6.start() + + t1.join() + t2.join() + t3.join() + t4.join() + t5.join() + t6.join() + time.sleep(3) + m_keys = master.keys() + s_keys = slave.keys() + assert s_keys == m_keys, f'Expected: s_keys == m_keys, but got {s_keys == m_keys}' + + for i in range(0, master.llen('blist')): + assert master.lindex('blist', i) == slave.lindex('blist', i), \ + f"Expected:master.lindex('blist', i) == slave.linex('blist', i), but got False when i = {i}" + + master.delete('blist0', 'blist1') + + # 阻塞的主从复制测试 + def blpop_thread(list_, value_): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.blpop(['blist0', 'blist1'], timeout=0) + assert result[0] == list_.encode() and result[ + 1] == value_.encode(), f"Expected: ({list_}, {value_}), but got = {result}" + client.close() + + def blpop_thread1(): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.blpop(['blist0', 'blist1'], timeout=0) + client.close() + + def brpop_thread(list_, value_): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.brpop(['blist0', 'blist1'], timeout=0) + assert result[0] == list_.encode() and result[ + 1] == value_.encode(), f"Expected: ({list_}, {value_}), but got = {result}" + client.close() + + def brpop_thread1(): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.brpop(['blist0', 'blist1'], timeout=0) + client.close() + + for i in range(0, 5): + letters = string.ascii_letters + random_str1 = ''.join(random.choice(letters) for _ in range(5)) + random_str2 = ''.join(random.choice(letters) for _ in range(5)) + random_str3 = ''.join(random.choice(letters) for _ in range(5)) + random_str4 = ''.join(random.choice(letters) for _ in range(5)) + random_str5 = ''.join(random.choice(letters) for _ in range(5)) + + t1 = threading.Thread(target=blpop_thread, args=('blist1', random_str1,)) + t2 = threading.Thread(target=brpop_thread, args=('blist0', random_str2,)) + t3 = threading.Thread(target=blpop_thread, args=('blist1', random_str3,)) + t4 = threading.Thread(target=brpop_thread, args=('blist0', random_str4,)) + t5 = threading.Thread(target=blpop_thread, args=('blist1', random_str5,)) + + t1.start() + time.sleep(0.5) # 确保阻塞顺序 + t2.start() + time.sleep(0.5) + t3.start() + time.sleep(0.5) + t4.start() + time.sleep(0.5) + t5.start() + time.sleep(0.5) + master.lpush('blist1', random_str1) + time.sleep(0.1) + master.rpush('blist0', random_str2) + time.sleep(0.1) + master.lpush('blist1', random_str3) + time.sleep(0.1) + master.rpush('blist0', random_str4) + time.sleep(0.1) + master.lpush('blist1', random_str5) + + t1.join() + t2.join() + t3.join() + t4.join() + t5.join() + time.sleep(1) + m_keys = master.keys() + s_keys = slave.keys() + assert s_keys == m_keys, f'Expected: s_keys == m_keys, but got {s_keys == m_keys}' + for i in range(0, master.llen('blist0')): + assert master.lindex('blist0', i) == slave.lindex('blist0', i), \ + f"Expected:master.lindex('blist0', i) == slave.linex('blist0', i), but got False when i = {i}" + + # 解阻塞过程中高频pop/push, 看binlog是否会乱 + threads1 = [] + for i in range(0, 30): + t1 = threading.Thread(target=blpop_thread1) + t2 = threading.Thread(target=brpop_thread1) + t1.start() + t2.start() + threads1.append(t1) + threads1.append(t2) + + # 此时针对blist0,blist1有60个阻塞,接下来对blist0连续push多次元素(解除阻塞),同时高频pop同被阻塞的client竞争 + def lpop_thread(list): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.lpop(list) + client.close() + + def rpop_thread(list): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + result = client.lpop(list) + client.close() + + def lpush_thread(list_, value1_, value2_, value3_, value4_, value5_): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + client.lpush(list_, value1_, value2_, value3_, value4_, value5_) + client.close() + + def rpush_thread(list_, value_, value2_, value3_, value4_, value5_): + client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) + client.rpush(list_, value_, value2_, value3_, value4_, value5_) + client.close() + + threads2 = [] + for i in range(0, 30): # 每轮push进15个元素,最多pop了9个元素,最少剩下6个元素,所以循环至少要有10次,否则前面的线程不能全部被解阻塞 + letters = string.ascii_letters + random_str1 = ''.join(random.choice(letters) for _ in range(5)) + random_str2 = ''.join(random.choice(letters) for _ in range(5)) + random_str3 = ''.join(random.choice(letters) for _ in range(5)) + random_str4 = ''.join(random.choice(letters) for _ in range(5)) + random_str5 = ''.join(random.choice(letters) for _ in range(5)) + random_str6 = ''.join(random.choice(letters) for _ in range(5)) + random_str7 = ''.join(random.choice(letters) for _ in range(5)) + random_str8 = ''.join(random.choice(letters) for _ in range(5)) + random_str9 = ''.join(random.choice(letters) for _ in range(5)) + t1 = threading.Thread(target=lpush_thread, + args=('blist0', random_str1, random_str2, random_str3, random_str4, random_str5)) + t2 = threading.Thread(target=lpop_thread, args=('blist0',)) + t3 = threading.Thread(target=lpop_thread, args=('blist0',)) + t4 = threading.Thread(target=lpop_thread, args=('blist0',)) + t5 = threading.Thread(target=rpush_thread, + args=('blist0', random_str9, random_str8, random_str7, random_str6, random_str5)) + t6 = threading.Thread(target=rpop_thread, args=('blist0',)) + t7 = threading.Thread(target=rpop_thread, args=('blist0',)) + t8 = threading.Thread(target=rpop_thread, args=('blist0',)) + t9 = threading.Thread(target=rpush_thread, + args=('blist0', random_str7, random_str8, random_str9, random_str1, random_str2)) + t10 = threading.Thread(target=lpop_thread, args=('blist0',)) + t11 = threading.Thread(target=lpop_thread, args=('blist0',)) + t12 = threading.Thread(target=lpop_thread, args=('blist0',)) + + threads2.append(t1) + threads2.append(t2) + threads2.append(t3) + threads2.append(t4) + threads2.append(t5) + threads2.append(t6) + threads2.append(t7) + threads2.append(t8) + threads2.append(t9) + threads2.append(t10) + threads2.append(t11) + threads2.append(t12) + + for t in threads2: + t.start() + + for t in threads1: + t.join() + time.sleep(5) + m_keys = master.keys() + s_keys = slave.keys() + assert s_keys == m_keys, f'Expected: s_keys == m_keys, but got {s_keys == m_keys}' + for i in range(0, master.llen('blist0')): + assert master.lindex('blist0', i) == slave.lindex('blist0', i), \ + f"Expected:master.lindex('blist0', i) == slave.linex('blist0', i), but got False when i = {i}" + + master.close() + slave.close() + print("test_master_slave_replication Passed [Passed], db:db%d" % (db_)) + +def test_with_db(db_id): + test_master_slave_replication(db_id) + test_single_existing_list(db_id) + test_blpop_brpop_unblock_lrpush_rpoplpush(db_id) + test_concurrency_block_unblock(db_id) + test_multiple_existing_lists(db_id) + test_blpop_brpop_same_key_multiple_times(db_id) + test_blpop_brpop_variadic_lpush(db_id) + test_serve_priority(db_id) + + +pika_instance_ip = '127.0.0.1' +pika_instance_port = '9221' +pika_slave_ip = '127.0.0.1' +pika_slave_port = '9231' + +# for i in range(0, 100): +#请给主从节点都开启2个db,否则注释掉db1_t相关的行,只做单db测试 +#如果不做主从复制测试,把test_master_slave_replication(db_id)注释掉 +db0_t = threading.Thread(target=test_with_db, args=(0,)) +db1_t = threading.Thread(target=test_with_db, args=(1,)) + +db0_t.start() +db1_t.start() + +db0_t.join() +db1_t.join() + + + +# 待添加的测试: +# 事务与blpop/brpop +# 1 事务内执行blpop/brpop如果没有获取到元素不阻塞,直接返回 +# 2 "BLPOP, LPUSH + DEL should not awake blocked client": 在事务内对一个空list进行了push后又del,当事务结束时list如果依旧是空的,则不应该去服务被阻塞的客户端(事务内的lpush不触发解阻塞动作,而是事务结束才做这个行为 +# redis单测逻辑如下 +# test "BLPOP, LPUSH + DEL should not awake blocked client" { +# set rd [redis_deferring_client] +# r del list +# +# $rd blpop list 0 +# r multi +# r lpush list a +# r del list +# r exec +# r del list +# r lpush list b +# $rd read +# } {list b} + +# 3 "BLPOP, LPUSH + DEL + SET should not awake blocked client": 这个测试用例与上一个类似,但在删除列表后,还使用SET命令将这个列表设置为一个字符串。 +# redis单测逻辑如下 +# test "BLPOP, LPUSH + DEL + SET should not awake blocked client" { +# set rd [redis_deferring_client] +# r del list +# +# $rd blpop list 0 +# r multi +# r lpush list a +# r del list +# r set list foo +# r exec +# r del list +# r lpush list b +# $rd read +# } {list b} + +# 4 "MULTI/EXEC is isolated from the point of view of BLPOP": 这个测试用例检查了在使用BLPOP命令阻塞等待一个列表的元素时,如果在此期间在一个Redis事务中向这个列表推入多个元素,阻塞的客户端应该只会接收到事务执行前的列表状态。 +# redis单测逻辑如下 +# test "MULTI/EXEC is isolated from the point of view of BLPOP" { +# set rd [redis_deferring_client] +# r del list +# $rd blpop list 0 +# r multi +# r lpush list a +# r lpush list b +# r lpush list c +# r exec +# $rd read +# } {list c} diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl new file mode 100644 index 000000000..b7e3c51ca --- /dev/null +++ b/tests/unit/acl.tcl @@ -0,0 +1,1135 @@ +start_server {tags {"acl external:skip"}} { + test {Connections start with the default user} { + r ACL WHOAMI + } {default} + + test {It is possible to create new users} { + r ACL setuser newuser + } + + test {Coverage: ACL USERS} { + r ACL USERS + } {default limit newuser} + + test {Usernames can not contain spaces or null characters} { + catch {r ACL setuser "a a"} err + set err + } {*Usernames can't contain spaces or null characters*} + + test {New users start disabled} { + r ACL setuser newuser >passwd1 + catch {r AUTH newuser passwd1} err + set err + } {*WRONGPASS*} + + test {Enabling the user allows the login} { + r ACL setuser newuser on +acl + r AUTH newuser passwd1 + r ACL WHOAMI + } {newuser} + + test {Only the set of correct passwords work} { + r ACL setuser newuser >passwd2 + catch {r AUTH newuser passwd1} e + assert {$e eq "OK"} + catch {r AUTH newuser passwd2} e + assert {$e eq "OK"} + catch {r AUTH newuser passwd3} e + set e + } {*WRONGPASS*} + + test {It is possible to remove passwords from the set of valid ones} { + r ACL setuser newuser pspass +acl +client +@pubsub + r AUTH psuser pspass + catch {r PUBLISH foo bar} e + set e + } {*NOPERM*channel*} + + test {By default, only default user is able to subscribe to any channel} { + set rd [redis_deferring_client] + $rd AUTH default pwd + $rd read + $rd SUBSCRIBE foo + assert_match {subscribe foo 1} [$rd read] + $rd UNSUBSCRIBE + $rd read + $rd AUTH psuser pspass + $rd read + $rd SUBSCRIBE foo + catch {$rd read} e + $rd close + set e + } {*NOPERM*channel*} + + test {By default, only default user is able to subscribe to any pattern} { + set rd [redis_deferring_client] + $rd AUTH default pwd + $rd read + $rd PSUBSCRIBE bar* + assert_match {psubscribe bar\* 1} [$rd read] + $rd PUNSUBSCRIBE + $rd read + $rd AUTH psuser pspass + $rd read + $rd PSUBSCRIBE bar* + catch {$rd read} e + $rd close + set e + } {*NOPERM*channel*} + + test {It's possible to allow publishing to a subset of channels} { + r ACL setuser psuser resetchannels &foo:1 &bar:* + assert_equal {0} [r PUBLISH foo:1 somemessage] + assert_equal {0} [r PUBLISH bar:2 anothermessage] + catch {r PUBLISH zap:3 nosuchmessage} e + set e + } {*NOPERM*channel*} + + test {Validate subset of channels is prefixed with resetchannels flag} { + r ACL setuser hpuser on nopass resetchannels &foo +@all + + # Verify resetchannels flag is prefixed before the channel name(s) + set users [r ACL LIST] + set curruser "hpuser" + + # authenticate as hpuser + r AUTH hpuser pass + + assert_equal {0} [r PUBLISH foo bar] + catch {r PUBLISH bar game} e + + # Falling back to psuser for the below tests + r AUTH psuser pspass + r ACL deluser hpuser + set e + } {*NOPERM*channel*} + + test {In transaction queue publish/subscribe/psubscribe to unauthorized channel will fail} { + r ACL setuser psuser +multi +discard + r MULTI + assert_error {*NOPERM*channel*} {r PUBLISH notexits helloworld} + r DISCARD + r MULTI + assert_error {*NOPERM*channel*} {r SUBSCRIBE notexits foo:1} + r DISCARD + r MULTI + assert_error {*NOPERM*channel*} {r PSUBSCRIBE notexits:* bar:*} + r DISCARD + } + + test {It's possible to allow subscribing to a subset of channels} { + set rd [redis_deferring_client] + $rd AUTH psuser pspass + $rd read + $rd SUBSCRIBE foo:1 + assert_match {subscribe foo:1 1} [$rd read] + $rd SUBSCRIBE bar:2 + assert_match {subscribe bar:2 2} [$rd read] + $rd SUBSCRIBE zap:3 + catch {$rd read} e + set e + } {*NOPERM*channel*} + +# test {It's possible to allow subscribing to a subset of shard channels} { +# set rd [redis_deferring_client] +# $rd AUTH psuser pspass +# $rd read +# $rd SSUBSCRIBE foo:1 +# assert_match {ssubscribe foo:1 1} [$rd read] +# $rd SSUBSCRIBE bar:2 +# assert_match {ssubscribe bar:2 2} [$rd read] +# $rd SSUBSCRIBE zap:3 +# catch {$rd read} e +# set e +# } {*NOPERM*channel*} + + test {It's possible to allow subscribing to a subset of channel patterns} { + set rd [redis_deferring_client] + $rd AUTH psuser pspass + $rd read + $rd PSUBSCRIBE foo:1 + assert_match {psubscribe foo:1 1} [$rd read] + $rd PSUBSCRIBE bar:* + assert_match {psubscribe bar:\* 2} [$rd read] + $rd PSUBSCRIBE bar:baz + catch {$rd read} e + set e + } {*NOPERM*channel*} + + test {Subscribers are killed when revoked of channel permission} { + set rd [redis_deferring_client] + r ACL setuser psuser resetchannels &foo:1 + $rd AUTH psuser pspass + $rd read + $rd CLIENT SETNAME deathrow + $rd read + $rd SUBSCRIBE foo:1 + $rd read + r ACL setuser psuser resetchannels + assert_no_match {*deathrow*} [r CLIENT LIST] + $rd close + } {0} + +# test {Subscribers are killed when revoked of channel permission} { +# set rd [redis_deferring_client] +# r ACL setuser psuser resetchannels &foo:1 +# $rd AUTH psuser pspass +# $rd read +# $rd CLIENT SETNAME deathrow +# $rd read +# $rd SSUBSCRIBE foo:1 +# $rd read +# r ACL setuser psuser resetchannels +# assert_no_match {*deathrow*} [r CLIENT LIST] +# $rd close +# } {0} + + test {Subscribers are killed when revoked of pattern permission} { + set rd [redis_deferring_client] + r ACL setuser psuser resetchannels &bar:* + $rd AUTH psuser pspass + $rd read + $rd CLIENT SETNAME deathrow + $rd read + $rd PSUBSCRIBE bar:* + $rd read + r ACL setuser psuser resetchannels + assert_no_match {*deathrow*} [r CLIENT LIST] + $rd close + } {0} + + test {Subscribers are killed when revoked of allchannels permission} { + set rd [redis_deferring_client] + r ACL setuser psuser allchannels + $rd AUTH psuser pspass + $rd read + $rd CLIENT SETNAME deathrow + $rd read + $rd PSUBSCRIBE foo + $rd read + r ACL setuser psuser resetchannels + assert_no_match {*deathrow*} [r CLIENT LIST] + $rd close + } {0} + +# test {Subscribers are pardoned if literal permissions are retained and/or gaining allchannels} { +# set rd [redis_deferring_client] +# r ACL setuser psuser resetchannels &foo:1 &bar:* &orders +# $rd AUTH psuser pspass +# $rd read +# $rd CLIENT SETNAME pardoned +# $rd read +# $rd SUBSCRIBE foo:1 +# $rd read +# $rd SSUBSCRIBE orders +# $rd read +# $rd PSUBSCRIBE bar:* +# $rd read +# r ACL setuser psuser resetchannels &foo:1 &bar:* &orders &baz:qaz &zoo:* +# assert_match {*pardoned*} [r CLIENT LIST] +# r ACL setuser psuser allchannels +# assert_match {*pardoned*} [r CLIENT LIST] +# $rd close +# } {0} + +### +# test {blocked command gets rejected when reprocessed after permission change} { +# r auth default "" +# r config resetstat +# set rd [redis_deferring_client] +# r ACL setuser psuser reset on nopass +@all allkeys +# $rd AUTH psuser pspass +# $rd read +# $rd BLPOP list1 0 +# wait_for_blocked_client +# r ACL setuser psuser resetkeys +# r LPUSH list1 foo +# assert_error {*NOPERM No permissions to access a key*} {$rd read} +# $rd ping +# $rd close +# assert_match {*calls=0,usec=0,*,rejected_calls=1,failed_calls=0} [cmdrstat blpop r] +# } + + test {Users can be configured to authenticate with any password} { + r ACL setuser newuser nopass + r AUTH newuser zipzapblabla + } {OK} + + test {ACLs can exclude single commands} { + r ACL setuser newuser -ping + r INCR mycounter ; # Should not raise an error + catch {r PING} e + set e + } {*NOPERM*ping*} + + test {ACLs can include or exclude whole classes of commands} { + r ACL setuser newuser -@all +@set +acl + r SADD myset a b c; # Should not raise an error + r ACL setuser newuser +@all -@string + r SADD myset a b c; # Again should not raise an error + # String commands instead should raise an error + catch {r SET foo bar} e + r ACL setuser newuser allcommands; # Undo commands ACL + set e + } {*NOPERM*set*} + + test {ACLs can include single subcommands} { + r ACL setuser newuser +@all -client + r ACL setuser newuser +client|setname + set cmdstr [dict get [r ACL getuser newuser] commands] + #assert_match {+@all*-client*+client|id*} $cmdstr + assert_match {+@all*-client*+client|setname*} $cmdstr + #r CLIENT ID; # Should not fail + r CLIENT SETNAME foo ; # Should not fail + catch {r CLIENT KILL ALL} e + set e + } {*NOPERM*client|kill*} + + test {ACLs can exclude single subcommands, case 1} { + r ACL setuser newuser +@all -client|kill + set cmdstr [dict get [r ACL getuser newuser] commands] + assert_equal {+@all -client|kill} $cmdstr + #r CLIENT ID; # Should not fail + r CLIENT SETNAME foo ; # Should not fail + catch {r CLIENT KILL all} e + set e + } {*NOPERM*client|kill*} + + test {ACLs can exclude single subcommands, case 2} { + r ACL setuser newuser -@all +acl +config -config|set + set cmdstr [dict get [r ACL getuser newuser] commands] + assert_match {*+config*} $cmdstr + assert_match {*-config|set*} $cmdstr + r CONFIG GET loglevel; # Should not fail + catch {r CONFIG SET loglevel debug} e + set e + } {*NOPERM*config|set*} + + test {ACLs cannot include a subcommand with a specific arg} { + r ACL setuser newuser +@all -config|get + catch { r ACL setuser newuser +config|get|appendonly} e + set e + } {*Allowing first-arg of a subcommand is not supported*} + + test {ACLs cannot exclude or include a container commands with a specific arg} { + r ACL setuser newuser +@all +config|get + catch { r ACL setuser newuser +@all +config|asdf} e + assert_match "*Unknown command or category name in ACL*" $e + catch { r ACL setuser newuser +@all -config|asdf} e + assert_match "*Unknown command or category name in ACL*" $e + } {} + +# test {ACLs cannot exclude or include a container command with two args} { +# r ACL setuser newuser +@all +config|get +# catch { r ACL setuser newuser +@all +get|key1|key2} e +# assert_match "*Unknown command or category name in ACL*" $e +# catch { r ACL setuser newuser +@all -get|key1|key2} e +# assert_match "*Unknown command or category name in ACL*" $e +# } {} + +# now pika not supported the command +# test {ACLs including of a type includes also subcommands} { +# r ACL setuser newuser -@all +del +acl +@stream +# r DEL key +# r XADD key * field value +# r XINFO STREAM key +# } + +# test {ACLs can block all DEBUG subcommands except one} { +# r ACL setuser newuser -@all +acl +del +incr +debug|object +# r DEL key +# set cmdstr [dict get [r ACL getuser newuser] commands] +# assert_match {*+debug|object*} $cmdstr +# r INCR key +# r DEBUG OBJECT key +# catch {r DEBUG SEGFAULT} e +# set e +# } {*NOPERM*debug*} + +# test {ACLs set can include subcommands, if already full command exists} { +# r ACL setuser bob +memory|doctor +# set cmdstr [dict get [r ACL getuser bob] commands] +# assert_equal {-@all +memory|doctor} $cmdstr +# +# # Validate the commands have got engulfed to +memory. +# r ACL setuser bob +memory +# set cmdstr [dict get [r ACL getuser bob] commands] +# assert_equal {-@all +memory} $cmdstr +# +# # Appending to the existing access string of bob. +# r ACL setuser bob +@all +client|id +# # Although this does nothing, we retain it anyways so we can reproduce +# # the original ACL. +# set cmdstr [dict get [r ACL getuser bob] commands] +# assert_equal {+@all +client|id} $cmdstr +# +# r ACL setuser bob >passwd1 on +# r AUTH bob passwd1 +# r CLIENT ID; # Should not fail +# r MEMORY DOCTOR; # Should not fail +# } + +# now pika not supported the command +# test {ACLs set can exclude subcommands, if already full command exists} { +# r ACL setuser alice +@all -memory|doctor +# set cmdstr [dict get [r ACL getuser alice] commands] +# assert_equal {+@all -memory|doctor} $cmdstr +# +# r ACL setuser alice >passwd1 on +# r AUTH alice passwd1 +# +# assert_error {*NOPERM*memory|doctor*} {r MEMORY DOCTOR} +# r MEMORY STATS ;# should work +# +# # Validate the commands have got engulfed to -memory. +# r ACL setuser alice +@all -memory +# set cmdstr [dict get [r ACL getuser alice] commands] +# assert_equal {+@all -memory} $cmdstr +# +# assert_error {*NOPERM*memory|doctor*} {r MEMORY DOCTOR} +# assert_error {*NOPERM*memory|stats*} {r MEMORY STATS} +# +# # Appending to the existing access string of alice. +# r ACL setuser alice -@all +# +# # Now, alice can't do anything, we need to auth newuser to execute ACL GETUSER +# r AUTH newuser passwd1 +# +# # Validate the new commands has got engulfed to -@all. +# set cmdstr [dict get [r ACL getuser alice] commands] +# assert_equal {-@all} $cmdstr +# +# r AUTH alice passwd1 +# +# assert_error {*NOPERM*get*} {r GET key} +# assert_error {*NOPERM*memory|stats*} {r MEMORY STATS} +# +# # Auth newuser before the next test +# r AUTH newuser passwd1 +# } + + test {ACL SETUSER RESET reverting to default newly created user} { + set current_user "example" + r ACL DELUSER $current_user + r ACL SETUSER $current_user + + set users [r ACL LIST] + foreach user [lshuffle $users] { + if {[string first $current_user $user] != -1} { + set current_user_output $user + } + } + + r ACL SETUSER $current_user reset + set users [r ACL LIST] + foreach user [lshuffle $users] { + if {[string first $current_user $user] != -1} { + assert_equal $current_user_output $user + } + } + } + + # Note that the order of the generated ACL rules is not stable in Redis + # so we need to match the different parts and not as a whole string. + test {ACL GETUSER is able to translate back command permissions} { + # Subtractive + # r ACL setuser newuser reset +@all ~* -@string +incr -debug +debug|digest + r ACL setuser newuser reset +@all ~* -@string +incr + set cmdstr [dict get [r ACL getuser newuser] commands] + assert_match {*+@all*} $cmdstr + assert_match {*-@string*} $cmdstr + assert_match {*+incr*} $cmdstr + #assert_match {*-debug +debug|digest**} $cmdstr + + # Additive + #r ACL setuser newuser reset +@string -incr +acl +debug|digest +debug|segfault + r ACL setuser newuser reset +@string -incr +acl + set cmdstr [dict get [r ACL getuser newuser] commands] + assert_match {*-@all*} $cmdstr + assert_match {*+@string*} $cmdstr + assert_match {*-incr*} $cmdstr + # {*+debug|digest*} $cmdstr + #assert_match {*+debug|segfault*} $cmdstr + assert_match {*+acl*} $cmdstr + } + + # A regression test make sure that as long as there is a simple + # category defining the commands, that it will be used as is. + test {ACL GETUSER provides reasonable results} { + set categories [r ACL CAT] + + # Test that adding each single category will + # result in just that category with both +@all and -@all + foreach category $categories { + # Test for future commands where allowed + r ACL setuser additive reset +@all "-@$category" + set cmdstr [dict get [r ACL getuser additive] commands] + assert_equal "+@all -@$category" $cmdstr + + # Test for future commands where disallowed + r ACL setuser restrictive reset -@all "+@$category" + set cmdstr [dict get [r ACL getuser restrictive] commands] + assert_equal "-@all +@$category" $cmdstr + } + } + + # Test that only lossless compaction of ACLs occur. + test {ACL GETUSER provides correct results} { + r ACL SETUSER adv-test + r ACL SETUSER adv-test +@all -@hash -@slow +hget + assert_equal "+@all -@hash -@slow +hget" [dict get [r ACL getuser adv-test] commands] + + # Categories are re-ordered if re-added + r ACL SETUSER adv-test -@hash + assert_equal "+@all -@slow +hget -@hash" [dict get [r ACL getuser adv-test] commands] + + # Inverting categories removes existing categories + r ACL SETUSER adv-test +@hash + assert_equal "+@all -@slow +hget +@hash" [dict get [r ACL getuser adv-test] commands] + + # Inverting the all category compacts everything + r ACL SETUSER adv-test -@all + assert_equal "-@all" [dict get [r ACL getuser adv-test] commands] + r ACL SETUSER adv-test -@string -@slow +@all + assert_equal "+@all" [dict get [r ACL getuser adv-test] commands] + + # Make sure categories are case insensitive + r ACL SETUSER adv-test -@all +@HASH +@hash +@HaSh + assert_equal "-@all +@hash" [dict get [r ACL getuser adv-test] commands] + + # Make sure commands are case insensitive + r ACL SETUSER adv-test -@all +HGET +hget +hGeT + assert_equal "-@all +hget" [dict get [r ACL getuser adv-test] commands] + + # Arbitrary category additions and removals are handled + r ACL SETUSER adv-test -@all +@hash +@slow +@set +@set +@slow +@hash + assert_equal "-@all +@set +@slow +@hash" [dict get [r ACL getuser adv-test] commands] + + # Arbitrary command additions and removals are handled + r ACL SETUSER adv-test -@all +hget -hset +hset -hget + assert_equal "-@all +hset -hget" [dict get [r ACL getuser adv-test] commands] + + # Arbitrary subcommands are compacted + r ACL SETUSER adv-test -@all +client|list +client|list +config|get +config +acl|list -acl + assert_equal "-@all +client|list +config -acl" [dict get [r ACL getuser adv-test] commands] + + # Unnecessary categories are retained for potentional future compatibility (pika not supported `dangerous`) + #r ACL SETUSER adv-test -@all -@dangerous + #assert_equal "-@all -@dangerous" [dict get [r ACL getuser adv-test] commands] + + # Duplicate categories are compressed, regression test for #12470 + r ACL SETUSER adv-test -@all +config +config|get -config|set +config + assert_equal "-@all +config" [dict get [r ACL getuser adv-test] commands] + } + + test "ACL CAT with illegal arguments" { + assert_error {*Unknown category 'NON_EXISTS'} {r ACL CAT NON_EXISTS} + assert_error {*unknown subcommand or wrong number of arguments for 'CAT'*} {r ACL CAT NON_EXISTS NON_EXISTS2} + } + + test "ACL CAT without category - list all categories" { + set categories [r acl cat] + assert_not_equal [lsearch $categories "keyspace"] -1 + assert_not_equal [lsearch $categories "connection"] -1 + } + + test "ACL CAT category - list all commands/subcommands that belong to category" { + # now pika not supported the command + #assert_not_equal [lsearch [r acl cat transaction] "multi"] -1 + #assert_not_equal [lsearch [r acl cat scripting] "function|list"] -1 + + # Negative check to make sure it doesn't actually return all commands. + assert_equal [lsearch [r acl cat keyspace] "set"] -1 + #assert_equal [lsearch [r acl cat stream] "get"] -1 + } + +# now pika not supported the command +# test "ACL requires explicit permission for scripting for EVAL_RO, EVALSHA_RO and FCALL_RO" { +# r ACL SETUSER scripter on nopass +readonly +# assert_match {*has no permissions to run the 'eval_ro' command*} [r ACL DRYRUN scripter EVAL_RO "" 0] +# assert_match {*has no permissions to run the 'evalsha_ro' command*} [r ACL DRYRUN scripter EVALSHA_RO "" 0] +# assert_match {*has no permissions to run the 'fcall_ro' command*} [r ACL DRYRUN scripter FCALL_RO "" 0] +# } + +# now pika not supported the command +# test {ACL #5998 regression: memory leaks adding / removing subcommands} { +# r AUTH default "" +# r ACL setuser newuser reset -debug +debug|a +debug|b +debug|c +# r ACL setuser newuser -debug +# # The test framework will detect a leak if any. +# } + +# now pika not supported the command +# test {ACL LOG aggregates similar errors together and assigns unique entry-id to new errors} { +# r ACL LOG RESET +# r ACL setuser user1 >foo +# assert_error "*WRONGPASS*" {r AUTH user1 doo} +# set entry_id_initial_error [dict get [lindex [r ACL LOG] 0] entry-id] +# set timestamp_created_original [dict get [lindex [r ACL LOG] 0] timestamp-created] +# set timestamp_last_update_original [dict get [lindex [r ACL LOG] 0] timestamp-last-updated] +# after 1 +# for {set j 0} {$j < 10} {incr j} { +# assert_error "*WRONGPASS*" {r AUTH user1 doo} +# } +# set entry_id_lastest_error [dict get [lindex [r ACL LOG] 0] entry-id] +# set timestamp_created_updated [dict get [lindex [r ACL LOG] 0] timestamp-created] +# set timestamp_last_updated_after_update [dict get [lindex [r ACL LOG] 0] timestamp-last-updated] +# assert {$entry_id_lastest_error eq $entry_id_initial_error} +# assert {$timestamp_last_update_original < $timestamp_last_updated_after_update} +# assert {$timestamp_created_original eq $timestamp_created_updated} +# r ACL setuser user2 >doo +# assert_error "*WRONGPASS*" {r AUTH user2 foo} +# set new_error_entry_id [dict get [lindex [r ACL LOG] 0] entry-id] +# assert {$new_error_entry_id eq $entry_id_lastest_error + 1 } +# } +# + test {ACL LOG shows failed command executions at toplevel} { + r ACL LOG RESET + r ACL setuser antirez >foo on +set ~object:1234 + r ACL setuser antirez +multi +exec + r ACL setuser antirez resetchannels +publish + r AUTH antirez foo + assert_error "*NOPERM*get*" {r GET foo} + r AUTH default "" + set entry [lindex [r ACL LOG] 0] + assert {[dict get $entry username] eq {antirez}} + assert {[dict get $entry context] eq {toplevel}} + assert {[dict get $entry reason] eq {command}} + assert {[dict get $entry object] eq {get}} + assert_match {*cmd=get*} [dict get $entry client-info] + } + +# test "ACL LOG shows failed subcommand executions at toplevel" { +# r ACL LOG RESET +# r ACL DELUSER demo +# r ACL SETUSER demo on nopass +# r AUTH demo "" +# assert_error "*NOPERM*script|help*" {r SCRIPT HELP} +# r AUTH default "" +# set entry [lindex [r ACL LOG] 0] +# assert_equal [dict get $entry username] {demo} +# assert_equal [dict get $entry context] {toplevel} +# assert_equal [dict get $entry reason] {command} +# assert_equal [dict get $entry object] {script|help} +# } + + test {ACL LOG is able to test similar events} { + r ACL LOG RESET + r AUTH antirez foo + catch {r GET foo} + catch {r GET foo} + catch {r GET foo} + r AUTH default "" + set entry [lindex [r ACL LOG] 0] + assert {[dict get $entry count] == 3} + } + + test {ACL LOG is able to log keys access violations and key name} { + r AUTH antirez foo + catch {r SET somekeynotallowed 1234} + r AUTH default "" + set entry [lindex [r ACL LOG] 0] + assert {[dict get $entry reason] eq {key}} + assert {[dict get $entry object] eq {somekeynotallowed}} + } + + test {ACL LOG is able to log channel access violations and channel name} { + r AUTH antirez foo + catch {r PUBLISH somechannelnotallowed nullmsg} + r AUTH default "" + set entry [lindex [r ACL LOG] 0] + assert {[dict get $entry reason] eq {channel}} + assert {[dict get $entry object] eq {somechannelnotallowed}} + } + + test {ACL LOG RESET is able to flush the entries in the log} { + r ACL LOG RESET + assert {[llength [r ACL LOG]] == 0} + } + + test {ACL LOG can distinguish the transaction context (1)} { + r AUTH antirez foo + r MULTI + catch {r INCR foo} + catch {r EXEC} + r AUTH default "" + set entry [lindex [r ACL LOG] 0] + assert {[dict get $entry context] eq {multi}} + assert {[dict get $entry object] eq {incr}} + } + + test {ACL LOG can distinguish the transaction context (2)} { + set rd1 [redis_deferring_client] + r ACL SETUSER antirez +incr + + r AUTH antirez foo + r MULTI + r INCR object:1234 + $rd1 ACL SETUSER antirez -incr + $rd1 read + catch {r EXEC} + $rd1 close + r AUTH default "" + set entry [lindex [r ACL LOG] 0] + assert {[dict get $entry context] eq {multi}} + assert {[dict get $entry object] eq {incr}} + r ACL SETUSER antirez -incr + } + +# now pika not supported lua command +# test {ACL can log errors in the context of Lua scripting} { +# r AUTH antirez foo +# catch {r EVAL {redis.call('incr','foo')} 0} +# r AUTH default "" +# set entry [lindex [r ACL LOG] 0] +# assert {[dict get $entry context] eq {lua}} +# assert {[dict get $entry object] eq {incr}} +# assert_match {*cmd=eval*} [dict get $entry client-info] +# } + + test {ACL LOG can accept a numerical argument to show less entries} { + r AUTH antirez foo + catch {r INCR foo} + catch {r INCR foo} + catch {r INCR foo} + catch {r INCR foo} + r AUTH default "" + assert {[llength [r ACL LOG]] > 1} + assert {[llength [r ACL LOG 2]] == 2} + } + + test {ACL LOG can log failed auth attempts} { + catch {r AUTH antirez wrong-password} + set entry [lindex [r ACL LOG] 0] + assert {[dict get $entry context] eq {toplevel}} + assert {[dict get $entry reason] eq {auth}} + assert {[dict get $entry object] eq {AUTH}} + assert {[dict get $entry username] eq {antirez}} + } + + test {ACL LOG entries are limited to a maximum amount} { + r ACL LOG RESET + r CONFIG SET acllog-max-len 5 + r AUTH antirez foo + for {set j 0} {$j < 10} {incr j} { + catch {r SET obj:$j 123} + } + r AUTH default "" + assert {[llength [r ACL LOG]] == 5} + } + +# test {When default user is off, new connections are not authenticated} { +# r ACL setuser default off +# catch {set rd1 [redis_deferring_client]} e +# r ACL setuser default on +# set e +# } {*NOAUTH*} + + test {When default user has no command permission, hello command still works for other users} { + r ACL setuser secure-user >supass on +@all + r ACL setuser default -@all + r HELLO 2 AUTH secure-user supass + r ACL setuser default nopass +@all + r AUTH default "" + } + + test {When an authentication chain is used in the HELLO cmd, the last auth cmd has precedence} { + r ACL setuser secure-user1 >supass on +@all + r ACL setuser secure-user2 >supass on +@all + r HELLO 2 AUTH secure-user supass AUTH secure-user2 supass AUTH secure-user1 supass + assert_equal [r ACL whoami] {secure-user1} + catch {r HELLO 2 AUTH secure-user supass AUTH secure-user2 supass AUTH secure-user pass} e + assert_match "WRONGPASS invalid username-password pair or user is disabled." $e + assert_equal [r ACL whoami] {secure-user2} + } + + test {When a setname chain is used in the HELLO cmd, the last setname cmd has precedence} { + r HELLO 2 setname client1 setname client2 setname client3 setname client4 + assert_equal [r client getname] {client4} + catch {r HELLO 2 setname client5 setname client6 setname "client name"} e + assert_match "ERR Client names cannot contain spaces, newlines or special characters." $e + assert_equal [r client getname] {client6} + } + + test {When authentication fails in the HELLO cmd, the client setname should not be applied} { + r client setname client0 + catch {r HELLO 2 AUTH user pass setname client1} e + assert_match "WRONGPASS invalid username-password pair or user is disabled." $e + assert {[r client getname] eq {client0}} + } + + test {ACL HELP should not have unexpected options} { + catch {r ACL help xxx} e + assert_match "*wrong number of arguments for 'acl|help' command" $e + } + + test {Delete a user that the client doesn't use} { + r ACL setuser not_used on >passwd + assert {[r ACL deluser not_used] == 1} + # The client is not closed + assert {[r ping] eq {PONG}} + } + + test {Delete a user that the client is using} { + r ACL setuser using on +acl >passwd + r AUTH using passwd + # The client will receive reply normally + assert {[r ACL deluser using] == 1} + # The client is closed + catch {[r ping]} e + assert_match "*I/O error*" $e + } + + test {ACL GENPASS command failed test} { + catch {r ACL genpass -236} err1 + catch {r ACL genpass 5000} err2 + assert_match "*ACL GENPASS argument must be the number*" $err1 + assert_match "*ACL GENPASS argument must be the number*" $err2 + } + + test {Default user can not be removed} { + catch {r ACL deluser default} err + set err + } {ERR The 'default' user cannot be removed} + + test {ACL load non-existing configured ACL file} { + catch {r ACL load} err + set err + } {*not configured to use an ACL file*} + + # If there is an AUTH failure the metric increases +# test {ACL-Metrics user AUTH failure} { +# set current_auth_failures [s acl_access_denied_auth] +# set current_invalid_cmd_accesses [s acl_access_denied_cmd] +# set current_invalid_key_accesses [s acl_access_denied_key] +# set current_invalid_channel_accesses [s acl_access_denied_channel] +# assert_error "*WRONGPASS*" {r AUTH notrealuser 1233456} +# assert {[s acl_access_denied_auth] eq [expr $current_auth_failures + 1]} +# assert_error "*WRONGPASS*" {r HELLO 3 AUTH notrealuser 1233456} +# assert {[s acl_access_denied_auth] eq [expr $current_auth_failures + 2]} +# assert_error "*WRONGPASS*" {r HELLO 2 AUTH notrealuser 1233456} +# assert {[s acl_access_denied_auth] eq [expr $current_auth_failures + 3]} +# assert {[s acl_access_denied_cmd] eq $current_invalid_cmd_accesses} +# assert {[s acl_access_denied_key] eq $current_invalid_key_accesses} +# assert {[s acl_access_denied_channel] eq $current_invalid_channel_accesses} +# } +# +# # If a user try to access an unauthorized command the metric increases +# test {ACL-Metrics invalid command accesses} { +# set current_auth_failures [s acl_access_denied_auth] +# set current_invalid_cmd_accesses [s acl_access_denied_cmd] +# set current_invalid_key_accesses [s acl_access_denied_key] +# set current_invalid_channel_accesses [s acl_access_denied_channel] +# r ACL setuser invalidcmduser on >passwd nocommands +# r AUTH invalidcmduser passwd +# assert_error "*no permissions to run the * command*" {r acl list} +# r AUTH default "" +# assert {[s acl_access_denied_auth] eq $current_auth_failures} +# assert {[s acl_access_denied_cmd] eq [expr $current_invalid_cmd_accesses + 1]} +# assert {[s acl_access_denied_key] eq $current_invalid_key_accesses} +# assert {[s acl_access_denied_channel] eq $current_invalid_channel_accesses} +# } +# +# # If a user try to access an unauthorized key the metric increases +# test {ACL-Metrics invalid key accesses} { +# set current_auth_failures [s acl_access_denied_auth] +# set current_invalid_cmd_accesses [s acl_access_denied_cmd] +# set current_invalid_key_accesses [s acl_access_denied_key] +# set current_invalid_channel_accesses [s acl_access_denied_channel] +# r ACL setuser invalidkeyuser on >passwd resetkeys allcommands +# r AUTH invalidkeyuser passwd +# assert_error "*NOPERM*key*" {r get x} +# r AUTH default "" +# assert {[s acl_access_denied_auth] eq $current_auth_failures} +# assert {[s acl_access_denied_cmd] eq $current_invalid_cmd_accesses} +# assert {[s acl_access_denied_key] eq [expr $current_invalid_key_accesses + 1]} +# assert {[s acl_access_denied_channel] eq $current_invalid_channel_accesses} +# } +# +# # If a user try to access an unauthorized channel the metric increases +# test {ACL-Metrics invalid channels accesses} { +# set current_auth_failures [s acl_access_denied_auth] +# set current_invalid_cmd_accesses [s acl_access_denied_cmd] +# set current_invalid_key_accesses [s acl_access_denied_key] +# set current_invalid_channel_accesses [s acl_access_denied_channel] +# r ACL setuser invalidchanneluser on >passwd resetchannels allcommands +# r AUTH invalidkeyuser passwd +# assert_error "*NOPERM*channel*" {r subscribe x} +# r AUTH default "" +# assert {[s acl_access_denied_auth] eq $current_auth_failures} +# assert {[s acl_access_denied_cmd] eq $current_invalid_cmd_accesses} +# assert {[s acl_access_denied_key] eq $current_invalid_key_accesses} +# assert {[s acl_access_denied_channel] eq [expr $current_invalid_channel_accesses + 1]} +# } +} + +set server_path [tmpdir "server.acl"] +set base_path ${server_path}/ +exec cp -f tests/assets/user.acl $base_path +set acl_file ${base_path}user.acl +start_server [list overrides [list "dir" $server_path "acl-pubsub-default" "allchannels" "aclfile" $acl_file ] tags [list "external:skip"]] { + # user alice on allcommands allkeys &* >alice + # user bob on -@all +@set +acl ~set* &* >bob + # user default on nopass ~* &* +@all + + test {default: load from include file, can access any channels} { + r SUBSCRIBE foo + r PSUBSCRIBE bar* + r UNSUBSCRIBE + r PUNSUBSCRIBE + r PUBLISH hello world + } + + test {default: with config acl-pubsub-default allchannels after reset, can access any channels} { + r ACL setuser default reset on nopass ~* +@all + r SUBSCRIBE foo + r PSUBSCRIBE bar* + r UNSUBSCRIBE + r PUNSUBSCRIBE + r PUBLISH hello world + } + + test {default: with config acl-pubsub-default resetchannels after reset, can not access any channels} { + r CONFIG SET acl-pubsub-default resetchannels + r ACL setuser default reset on nopass ~* +@all + assert_error {*NOPERM*channel*} {r SUBSCRIBE foo} + assert_error {*NOPERM*channel*} {r PSUBSCRIBE bar*} + assert_error {*NOPERM*channel*} {r PUBLISH hello world} + r CONFIG SET acl-pubsub-default resetchannels + } + + test {Alice: can execute all command} { + r AUTH alice alice + assert_equal "alice" [r acl whoami] + r SET key value + } + + test {Bob: just execute @set and acl command} { + r AUTH bob bob + assert_equal "bob" [r acl whoami] + # The test was passed on local machine, Restarting the pika data will still exist, + # which may cause the test to fail, so remove it + #assert_equal "3" [r sadd set 1 2 3] + catch {r SET key value} e + set e + } {*NOPERM*set*} + + test {ACL load and save} { + r ACL setuser eve +get allkeys >eve on + r ACL save + + # ACL load will free user and kill clients + r ACL load + catch {r ACL LIST} e + assert_match {*I/O error*} $e + + reconnect + r AUTH alice alice + r SET key value + r AUTH eve eve + r GET key + catch {r SET key value} e + set e + } {*NOPERM*set*} + + test {ACL load and save with restricted channels} { + r AUTH alice alice + r ACL setuser harry on nopass resetchannels &test +@all ~* + r ACL save + + # ACL load will free user and kill clients + r ACL load + catch {r ACL LIST} e + assert_match {*I/O error*} $e + + reconnect + r AUTH harry anything + r publish test bar + catch {r publish test1 bar} e + r ACL deluser harry + set e + } {*NOPERM*channel*} +} + +set server_path [tmpdir "resetchannels.acl"] +set base_path ${server_path}/ +exec cp -f tests/assets/nodefaultuser.acl $base_path +exec cp -f tests/assets/default.conf $server_path +set acl_file ${base_path}nodefaultuser.acl +start_server [list overrides [list "dir" $server_path "aclfile" $acl_file] tags [list "external:skip"]] { + + test {Default user has access to all channels irrespective of flag} { + set channelinfo [dict get [r ACL getuser default] channels] + assert_equal "&*" $channelinfo + set channelinfo [dict get [r ACL getuser alice] channels] + assert_equal "" $channelinfo + } + + test {Update acl-pubsub-default, existing users shouldn't get affected} { + set channelinfo [dict get [r ACL getuser default] channels] + assert_equal "&*" $channelinfo + r CONFIG set acl-pubsub-default allchannels + r ACL setuser mydefault + set channelinfo [dict get [r ACL getuser mydefault] channels] + assert_equal "&*" $channelinfo + r CONFIG set acl-pubsub-default resetchannels + set channelinfo [dict get [r ACL getuser mydefault] channels] + assert_equal "&*" $channelinfo + } + + test {Single channel is valid} { + r ACL setuser onechannel &test + set channelinfo [dict get [r ACL getuser onechannel] channels] + assert_equal "&test" $channelinfo + r ACL deluser onechannel + } + + test {Single channel is not valid with allchannels} { + r CONFIG set acl-pubsub-default allchannels + catch {r ACL setuser onechannel &test} err + r CONFIG set acl-pubsub-default resetchannels + set err + } {*start with an empty list of channels*} +} + +set server_path [tmpdir "resetchannels.acl"] +set base_path ${server_path}/ +exec cp -f tests/assets/nodefaultuser.acl $base_path +exec cp -f tests/assets/default.conf $server_path +set acl_file ${base_path}nodefaultuser.acl +start_server [list overrides [list "dir" $server_path "acl-pubsub-default" "resetchannels" "aclfile" $acl_file] tags [list "external:skip"]] { + + test {Only default user has access to all channels irrespective of flag} { + set channelinfo [dict get [r ACL getuser default] channels] + assert_equal "&*" $channelinfo + set channelinfo [dict get [r ACL getuser alice] channels] + assert_equal "" $channelinfo + } +} + + +start_server {overrides {user "default on nopass ~* +@all"} tags {"external:skip"}} { + test {default: load from config file, without channel permission default user can't access any channels} { + catch {r SUBSCRIBE foo} e + set e + } {*NOPERM*channel*} +} + +start_server {overrides {user "default on nopass ~* &* +@all"} tags {"external:skip"}} { + test {default: load from config file with all channels permissions} { + r SUBSCRIBE foo + r PSUBSCRIBE bar* + r UNSUBSCRIBE + r PUNSUBSCRIBE + r PUBLISH hello world + } +} + +set server_path [tmpdir "duplicate.acl"] +set base_path ${server_path}/ +exec cp -f tests/assets/user.acl $base_path +exec cp -f tests/assets/default.conf $server_path +set acl_file ${base_path}user.acl +start_server [list overrides [list "dir" $server_path "aclfile" $acl_file] tags [list "external:skip"]] { + + test {Test loading an ACL file with duplicate users} { + exec cp -f tests/assets/user.acl $base_path + + # Corrupt the ACL file + set corruption "\nuser alice on nopass ~* -@all" + exec echo $corruption >> ${base_path}user.acl + catch {r ACL LOAD} err + assert_match {*Duplicate user 'alice' found*} $err + + # Verify the previous users still exist + # NOTE: A missing user evaluates to an empty + # string. + assert {[r ACL GETUSER alice] != ""} + assert_equal [dict get [r ACL GETUSER alice] commands] "+@all" + assert {[r ACL GETUSER bob] != ""} + assert {[r ACL GETUSER default] != ""} + } + + test {Test loading an ACL file with duplicate default user} { + exec cp -f tests/assets/user.acl $base_path + + # Corrupt the ACL file + set corruption "\nuser default on nopass ~* -@all" + exec echo $corruption >> ${base_path}user.acl + catch {r ACL LOAD} err + assert_match {*Duplicate user 'default' found*} $err + + # Verify the previous users still exist + # NOTE: A missing user evaluates to an empty + # string. + assert {[r ACL GETUSER alice] != ""} + assert_equal [dict get [r ACL GETUSER alice] commands] "+@all" + assert {[r ACL GETUSER bob] != ""} + assert {[r ACL GETUSER default] != ""} + } +} + +# test on local machine is passed +#Because the tcl test was slow and there was a problem with restarting the service, everything was removed +#start_server {overrides {user "default on nopass ~* +@all -flushdb"} tags {acl external:skip}} { +# test {ACL from config file and config rewrite} { +# assert_error {NOPERM *} {r flushdb} +# r config rewrite +# restart_server 0 true false +# assert_error {NOPERM *} {r flushdb} +# } +#} diff --git a/tests/unit/aofrw.tcl b/tests/unit/aofrw.tcl new file mode 100644 index 000000000..a2d74168f --- /dev/null +++ b/tests/unit/aofrw.tcl @@ -0,0 +1,210 @@ +start_server {tags {"aofrw"}} { + # Enable the AOF + r config set appendonly yes + r config set auto-aof-rewrite-percentage 0 ; # Disable auto-rewrite. + waitForBgrewriteaof r + + test {AOF rewrite during write load} { + # Start a write load for 10 seconds + set master [srv 0 client] + set master_host [srv 0 host] + set master_port [srv 0 port] + set load_handle0 [start_write_load $master_host $master_port 10] + set load_handle1 [start_write_load $master_host $master_port 10] + set load_handle2 [start_write_load $master_host $master_port 10] + set load_handle3 [start_write_load $master_host $master_port 10] + set load_handle4 [start_write_load $master_host $master_port 10] + + # Make sure the instance is really receiving data + wait_for_condition 50 100 { + [r dbsize] > 0 + } else { + fail "No write load detected." + } + + # After 3 seconds, start a rewrite, while the write load is still + # active. + after 3000 + r bgrewriteaof + waitForBgrewriteaof r + + # Let it run a bit more so that we'll append some data to the new + # AOF. + after 1000 + + # Stop the processes generating the load if they are still active + stop_write_load $load_handle0 + stop_write_load $load_handle1 + stop_write_load $load_handle2 + stop_write_load $load_handle3 + stop_write_load $load_handle4 + + # Make sure that we remain the only connected client. + # This step is needed to make sure there are no pending writes + # that will be processed between the two "debug digest" calls. + wait_for_condition 50 100 { + [llength [split [string trim [r client list]] "\n"]] == 1 + } else { + puts [r client list] + fail "Clients generating loads are not disconnecting" + } + + # Get the data set digest + set d1 [r debug digest] + + # Load the AOF + r debug loadaof + set d2 [r debug digest] + + # Make sure they are the same + assert {$d1 eq $d2} + } +} + +start_server {tags {"aofrw"}} { + test {Turning off AOF kills the background writing child if any} { + r config set appendonly yes + waitForBgrewriteaof r + r multi + r bgrewriteaof + r config set appendonly no + r exec + wait_for_condition 50 100 { + [string match {*Killing*AOF*child*} [exec tail -n5 < [srv 0 stdout]]] + } else { + fail "Can't find 'Killing AOF child' into recent logs" + } + } + + foreach d {string int} { + foreach e {ziplist linkedlist} { + test "AOF rewrite of list with $e encoding, $d data" { + r flushall + if {$e eq {ziplist}} {set len 10} else {set len 1000} + for {set j 0} {$j < $len} {incr j} { + if {$d eq {string}} { + set data [randstring 0 16 alpha] + } else { + set data [randomInt 4000000000] + } + r lpush key $data + } + assert_equal [r object encoding key] $e + set d1 [r debug digest] + r bgrewriteaof + waitForBgrewriteaof r + r debug loadaof + set d2 [r debug digest] + if {$d1 ne $d2} { + error "assertion:$d1 is not equal to $d2" + } + } + } + } + + foreach d {string int} { + foreach e {intset hashtable} { + test "AOF rewrite of set with $e encoding, $d data" { + r flushall + if {$e eq {intset}} {set len 10} else {set len 1000} + for {set j 0} {$j < $len} {incr j} { + if {$d eq {string}} { + set data [randstring 0 16 alpha] + } else { + set data [randomInt 4000000000] + } + r sadd key $data + } + if {$d ne {string}} { + assert_equal [r object encoding key] $e + } + set d1 [r debug digest] + r bgrewriteaof + waitForBgrewriteaof r + r debug loadaof + set d2 [r debug digest] + if {$d1 ne $d2} { + error "assertion:$d1 is not equal to $d2" + } + } + } + } + + foreach d {string int} { + foreach e {ziplist hashtable} { + test "AOF rewrite of hash with $e encoding, $d data" { + r flushall + if {$e eq {ziplist}} {set len 10} else {set len 1000} + for {set j 0} {$j < $len} {incr j} { + if {$d eq {string}} { + set data [randstring 0 16 alpha] + } else { + set data [randomInt 4000000000] + } + r hset key $data $data + } + assert_equal [r object encoding key] $e + set d1 [r debug digest] + r bgrewriteaof + waitForBgrewriteaof r + r debug loadaof + set d2 [r debug digest] + if {$d1 ne $d2} { + error "assertion:$d1 is not equal to $d2" + } + } + } + } + + foreach d {string int} { + foreach e {ziplist skiplist} { + test "AOF rewrite of zset with $e encoding, $d data" { + r flushall + if {$e eq {ziplist}} {set len 10} else {set len 1000} + for {set j 0} {$j < $len} {incr j} { + if {$d eq {string}} { + set data [randstring 0 16 alpha] + } else { + set data [randomInt 4000000000] + } + r zadd key [expr rand()] $data + } + assert_equal [r object encoding key] $e + set d1 [r debug digest] + r bgrewriteaof + waitForBgrewriteaof r + r debug loadaof + set d2 [r debug digest] + if {$d1 ne $d2} { + error "assertion:$d1 is not equal to $d2" + } + } + } + } + + test {BGREWRITEAOF is delayed if BGSAVE is in progress} { + r multi + r bgsave + r bgrewriteaof + r info persistence + set res [r exec] + assert_match {*scheduled*} [lindex $res 1] + assert_match {*aof_rewrite_scheduled:1*} [lindex $res 2] + while {[string match {*aof_rewrite_scheduled:1*} [r info persistence]]} { + after 100 + } + } + + test {BGREWRITEAOF is refused if already in progress} { + catch { + r multi + r bgrewriteaof + r bgrewriteaof + r exec + } e + assert_match {*ERR*already*} $e + while {[string match {*aof_rewrite_scheduled:1*} [r info persistence]]} { + after 100 + } + } +} diff --git a/tests/unit/auth.tcl b/tests/unit/auth.tcl new file mode 100644 index 000000000..0ec35985a --- /dev/null +++ b/tests/unit/auth.tcl @@ -0,0 +1,43 @@ +start_server {tags {"auth"}} { + test {AUTH fails if there is no password configured server side} { + catch {r auth foo} err + set _ $err + } {ERR*no password*} +} + +start_server {tags {"auth"} overrides {requirepass foobar}} { +# test {AUTH fails when a wrong password is given} { +# catch {r auth wrong!} err +# set _ $err +# } {ERR*invalid password} + +# test {AUTH succeeds when the right password is given} { +# r auth foobar +# } {OK} +# +# test {Once AUTH succeeded we can actually send commands to the server} { +# r set foo 100 +# r incr foo +# } {101} +} + +start_server {tags {"auth"} overrides {userpass foobar}} { +# test {AUTH fails when a wrong password is given} { +# catch {r auth wrong!} err +# set _ $err +# } {ERR*invalid password} +# +# test {Arbitrary command gives an error when AUTH is required} { +# catch {r set foo bar} err +# set _ $err +# } {ERR*NOAUTH*} + +# test {AUTH succeeds when the right password is given} { +# r auth foobar +# } {OK} +# +# test {Once AUTH succeeded we can actually send commands to the server} { +# r set foo 100 +# r incr foo +# } {101} +} diff --git a/tests/unit/basic.tcl b/tests/unit/basic.tcl new file mode 100644 index 000000000..6988e46a2 --- /dev/null +++ b/tests/unit/basic.tcl @@ -0,0 +1,789 @@ +start_server {tags {"basic"}} { + test {DEL all keys to start with a clean DB} { + foreach key [r keys *] {r del $key} + r dbsize + } {0} + + test {SET and GET an item} { + r set x foobar + r get x + } {foobar} + + test {SET and GET an empty item} { + r set x {} + r get x + } {} + + test {DEL against a single item} { + r del x + r get x + } {} + + test {Vararg DEL} { + r set foo1 a + r set foo2 b + r set foo3 c + list [r del foo1 foo2 foo3 foo4] [r mget foo1 foo2 foo3] + } {3 {{} {} {}}} + + test {KEYS with pattern} { + foreach key {key_x key_y key_z foo_a foo_b foo_c} { + r set $key hello + } + lsort [r keys foo*] + } {foo_a foo_b foo_c} + + test {KEYS to get all keys} { + lsort [r keys *] + } {foo_a foo_b foo_c key_x key_y key_z} + + test {DBSIZE} { + r info keyspace 1 + after 1000 + r dbsize + } {6} + + test {DEL all keys} { + foreach key [r keys *] {r del $key} + r info keyspace 1 + after 1000 + r dbsize + } {0} + +# test {Very big payload in GET/SET} { +# set buf [string repeat "abcd" 1000000] +# r set foo $buf +# r get foo +# } [string repeat "abcd" 1000000] + +# tags {"slow"} { +# test {Very big payload random access} { +# set err {} +# array set payload {} +# for {set j 0} {$j < 100} {incr j} { +# set size [expr 1+[randomInt 100000]] +# set buf [string repeat "pl-$j" $size] +# set payload($j) $buf +# r set bigpayload_$j $buf +# } +# for {set j 0} {$j < 1000} {incr j} { +# set index [randomInt 100] +# set buf [r get bigpayload_$index] +# if {$buf != $payload($index)} { +# set err "Values differ: I set '$payload($index)' but I read back '$buf'" +# break +# } +# } +# unset payload +# set _ $err +# } {} +# +# test {SET 10000 numeric keys and access all them in reverse order} { +# set err {} +# for {set x 0} {$x < 10000} {incr x} { +# r set $x $x +# } +# set sum 0 +# for {set x 9999} {$x >= 0} {incr x -1} { +# set val [r get $x] +# if {$val ne $x} { +# set err "Element at position $x is $val instead of $x" +# break +# } +# } +# set _ $err +# } {} + +# test {DBSIZE should be 10101 now} { +# r info keyspace 1 +# after 1000 +# r dbsize +# } {10101} +# } + + test {INCR against non existing key} { + set res {} + append res [r incr novar] + append res [r get novar] + } {11} + + test {INCR against key created by incr itself} { + r incr novar + } {2} + + test {INCR against key originally set with SET} { + r set novar 100 + r incr novar + } {101} + + test {INCR over 32bit value} { + r set novar 17179869184 + r incr novar + } {17179869185} + + test {INCRBY over 32bit value with over 32bit increment} { + r set novar 17179869184 + r incrby novar 17179869184 + } {34359738368} + +# test {INCR fails against key with spaces (left)} { +# r set novar " 11" +# catch {r incr novar} err +# format $err +# } {ERR*} + + test {INCR fails against key with spaces (right)} { + r set novar "11 " + catch {r incr novar} err + format $err + } {ERR*} + + test {INCR fails against key with spaces (both)} { + r set novar " 11 " + catch {r incr novar} err + format $err + } {ERR*} + +# test {INCR fails against a key holding a list} { +# r rpush mylist 1 +# catch {r incr mylist} err +# r rpop mylist +# format $err +# } {WRONGTYPE*} + + test {DECRBY over 32bit value with over 32bit increment, negative res} { + r set novar 17179869184 + r decrby novar 17179869185 + } {-1} + + test {INCRBYFLOAT against non existing key} { + r del novar + list [roundFloat [r incrbyfloat novar 1]] \ + [roundFloat [r get novar]] \ + [roundFloat [r incrbyfloat novar 0.25]] \ + [roundFloat [r get novar]] + } {1 1 1.25 1.25} + + test {INCRBYFLOAT against key originally set with SET} { + r set novar 1.5 + roundFloat [r incrbyfloat novar 1.5] + } {3} + + test {INCRBYFLOAT over 32bit value} { + r set novar 17179869184 + r incrbyfloat novar 1.5 + } {17179869185.5} + + test {INCRBYFLOAT over 32bit value with over 32bit increment} { + r set novar 17179869184 + r incrbyfloat novar 17179869184 + } {34359738368} + + test {INCRBYFLOAT fails against key with spaces (left)} { + set err {} + r set novar " 11" + catch {r incrbyfloat novar 1.0} err + format $err + } {ERR*valid*} + + test {INCRBYFLOAT fails against key with spaces (right)} { + set err {} + r set novar "11 " + catch {r incrbyfloat novar 1.0} err + format $err + } {ERR*valid*} + + test {INCRBYFLOAT fails against key with spaces (both)} { + set err {} + r set novar " 11 " + catch {r incrbyfloat novar 1.0} err + format $err + } {ERR*valid*} + +# test {INCRBYFLOAT fails against a key holding a list} { +# r del mylist +# set err {} +# r rpush mylist 1 +# catch {r incrbyfloat mylist 1.0} err +# r del mylist +# format $err +# } {WRONGTYPE*} + + test {INCRBYFLOAT does not allow NaN or Infinity} { + r set foo 0 + set err {} + catch {r incrbyfloat foo +inf} err + set err + # p.s. no way I can force NaN to test it from the API because + # there is no way to increment / decrement by infinity nor to + # perform divisions. + } {ERR*would produce*} + + test {INCRBYFLOAT decrement} { + r set foo 1 + roundFloat [r incrbyfloat foo -1.1] + } {-0.1} + + test "SETNX target key missing" { + r del novar + assert_equal 1 [r setnx novar foobared] + assert_equal "foobared" [r get novar] + } + + test "SETNX target key exists" { + r set novar foobared + assert_equal 0 [r setnx novar blabla] + assert_equal "foobared" [r get novar] + } + + test "SETNX against not-expired volatile key" { + r set x 10 + r expire x 10000 + assert_equal 0 [r setnx x 20] + assert_equal 10 [r get x] + } + + test "SETNX against expired volatile key" { + # Make it very unlikely for the key this test uses to be expired by the + # active expiry cycle. This is tightly coupled to the implementation of + # active expiry and dbAdd() but currently the only way to test that + # SETNX expires a key when it should have been. + for {set x 0} {$x < 9999} {incr x} { + r setex key-$x 3600 value + } + + # This will be one of 10000 expiring keys. A cycle is executed every + # 100ms, sampling 10 keys for being expired or not. This key will be + # expired for at most 1s when we wait 2s, resulting in a total sample + # of 100 keys. The probability of the success of this test being a + # false positive is therefore approx. 1%. + r set x 10 + r expire x 1 + + # Wait for the key to expire + after 2000 + + assert_equal 1 [r setnx x 20] + assert_equal 20 [r get x] + } + +# test "DEL against expired key" { +# r debug set-active-expire 0 +# r setex keyExpire 1 valExpire +# after 1100 +# assert_equal 0 [r del keyExpire] +# r debug set-active-expire 1 +# } + + test {EXISTS} { + set res {} + r set newkey test + append res [r exists newkey] + r del newkey + append res [r exists newkey] + } {10} + + test {Zero length value in key. SET/GET/EXISTS} { + r set emptykey {} + set res [r get emptykey] + append res [r exists emptykey] + r del emptykey + append res [r exists emptykey] + } {10} + + test {Commands pipelining} { + set fd [r channel] + puts -nonewline $fd "SET k1 xyzk\r\nGET k1\r\nPING\r\n" + flush $fd + set res {} + append res [string match OK* [r read]] + append res [r read] + append res [string match PONG* [r read]] + format $res + } {1xyzk1} + + test {Non existing command} { + catch {r foobaredcommand} err + string match ERR* $err + } {1} + +# test {RENAME basic usage} { +# r set mykey hello +# r rename mykey mykey1 +# r rename mykey1 mykey2 +# r get mykey2 +# } {hello} + +# test {RENAME source key should no longer exist} { +# r exists mykey +# } {0} + +# test {RENAME against already existing key} { +# r set mykey a +# r set mykey2 b +# r rename mykey2 mykey +# set res [r get mykey] +# append res [r exists mykey2] +# } {b0} + +# test {RENAMENX basic usage} { +# r del mykey +# r del mykey2 +# r set mykey foobar +# r renamenx mykey mykey2 +# set res [r get mykey2] +# append res [r exists mykey] +# } {foobar0} +# +# test {RENAMENX against already existing key} { +# r set mykey foo +# r set mykey2 bar +# r renamenx mykey mykey2 +# } {0} +# +# test {RENAMENX against already existing key (2)} { +# set res [r get mykey] +# append res [r get mykey2] +# } {foobar} +# +# test {RENAME against non existing source key} { +# catch {r rename nokey foobar} err +# format $err +# } {ERR*} +# +# test {RENAME where source and dest key is the same} { +# catch {r rename mykey mykey} err +# format $err +# } {ERR*} +# +# test {RENAME with volatile key, should move the TTL as well} { +# r del mykey mykey2 +# r set mykey foo +# r expire mykey 100 +# assert {[r ttl mykey] > 95 && [r ttl mykey] <= 100} +# r rename mykey mykey2 +# assert {[r ttl mykey2] > 95 && [r ttl mykey2] <= 100} +# } +# +# test {RENAME with volatile key, should not inherit TTL of target key} { +# r del mykey mykey2 +# r set mykey foo +# r set mykey2 bar +# r expire mykey2 100 +# assert {[r ttl mykey] == -1 && [r ttl mykey2] > 0} +# r rename mykey mykey2 +# r ttl mykey2 +# } {-1} + +# test {DEL all keys again (DB 0)} { +# foreach key [r keys *] { +# r del $key +# } +# r dbsize +# } {0} + +# test {DEL all keys again (DB 1)} { +# r select 10 +# foreach key [r keys *] { +# r del $key +# } +# set res [r dbsize] +# r select 9 +# format $res +# } {0} + +# test {MOVE basic usage} { +# r set mykey foobar +# r move mykey 10 +# set res {} +# lappend res [r exists mykey] +# lappend res [r dbsize] +# r select 10 +# lappend res [r get mykey] +# lappend res [r dbsize] +# r select 9 +# format $res +# } [list 0 0 foobar 1] + +# test {MOVE against key existing in the target DB} { +# r set mykey hello +# r move mykey 10 +# } {0} + +# test {MOVE against non-integer DB (#1428)} { +# r set mykey hello +# catch {r move mykey notanumber} e +# set e +# } {*ERR*index out of range} + +# test {SET/GET keys in different DBs} { +# r set a hello +# r set b world +# r select 10 +# r set a foo +# r set b bared +# r select 9 +# set res {} +# lappend res [r get a] +# lappend res [r get b] +# r select 10 +# lappend res [r get a] +# lappend res [r get b] +# r select 9 +# format $res +# } {hello world foo bared} + +# test {MGET} { +# r flushdb +# r set foo BAR +# r set bar FOO +# r mget foo bar +# } {BAR FOO} + +# test {MGET against non existing key} { +# r mget foo baazz bar +# } {BAR {} FOO} +# +# test {MGET against non-string key} { +# r sadd myset ciao +# r sadd myset bau +# r mget foo baazz bar myset +# } {BAR {} FOO {}} + +# test {RANDOMKEY} { +# r flushdb +# r set foo x +# r set bar y +# set foo_seen 0 +# set bar_seen 0 +# for {set i 0} {$i < 100} {incr i} { +# set rkey [r randomkey] +# if {$rkey eq {foo}} { +# set foo_seen 1 +# } +# if {$rkey eq {bar}} { +# set bar_seen 1 +# } +# } +# list $foo_seen $bar_seen +# } {1 1} +# +# test {RANDOMKEY against empty DB} { +# r flushdb +# r randomkey +# } {} +# +# test {RANDOMKEY regression 1} { +# r flushdb +# r set x 10 +# r del x +# r randomkey +# } {} + +# test {GETSET (set new value)} { +# list [r getset foo xyz] [r get foo] +# } {{} xyz} + + test {GETSET (replace old value)} { + r set foo bar + list [r getset foo xyz] [r get foo] + } {bar xyz} + + test {MSET base case} { + r mset x 10 y "foo bar" z "x x x x x x x\n\n\r\n" + r mget x y z + } [list 10 {foo bar} "x x x x x x x\n\n\r\n"] + + test {MSET wrong number of args} { + catch {r mset x 10 y "foo bar" z} err + format $err + } {*wrong number*} + + test {MSETNX with already existent key} { + list [r msetnx x1 xxx y2 yyy x 20] [r exists x1] [r exists y2] + } {0 0 0} + + test {MSETNX with not existing keys} { + list [r msetnx x1 xxx y2 yyy] [r get x1] [r get y2] + } {1 xxx yyy} + + test "STRLEN against non-existing key" { + assert_equal 0 [r strlen notakey] + } + + test "STRLEN against integer-encoded value" { + r set myinteger -555 + assert_equal 4 [r strlen myinteger] + } + + test "STRLEN against plain string" { + r set mystring "foozzz0123456789 baz" + assert_equal 20 [r strlen mystring] + } + + test "SETBIT against non-existing key" { + r del mykey + assert_equal 0 [r setbit mykey 1 1] + assert_equal [binary format B* 01000000] [r get mykey] + } + + test "SETBIT against string-encoded key" { + # Ascii "@" is integer 64 = 01 00 00 00 + r set mykey "@" + + assert_equal 0 [r setbit mykey 2 1] + assert_equal [binary format B* 01100000] [r get mykey] + assert_equal 1 [r setbit mykey 1 0] + assert_equal [binary format B* 00100000] [r get mykey] + } + +# test "SETBIT against integer-encoded key" { +# # Ascii "1" is integer 49 = 00 11 00 01 +# r set mykey 1 +# assert_encoding int mykey +# +# assert_equal 0 [r setbit mykey 6 1] +# assert_equal [binary format B* 00110011] [r get mykey] +# assert_equal 1 [r setbit mykey 2 0] +# assert_equal [binary format B* 00010011] [r get mykey] +# } + +# test "SETBIT against key with wrong type" { +# r del mykey +# r lpush mykey "foo" +# assert_error "WRONGTYPE*" {r setbit mykey 0 1} +# } + + test "SETBIT with out of range bit offset" { + r del mykey + assert_error "*out of range*" {r setbit mykey [expr 4*1024*1024*1024] 1} + assert_error "*out of range*" {r setbit mykey -1 1} + } + + test "SETBIT with non-bit argument" { + r del mykey + assert_error "*out of range*" {r setbit mykey 0 -1} + assert_error "*out of range*" {r setbit mykey 0 2} + assert_error "*out of range*" {r setbit mykey 0 10} + assert_error "*out of range*" {r setbit mykey 0 20} + } + +# test "SETBIT fuzzing" { +# set str "" +# set len [expr 256*8] +# r del mykey +# +# for {set i 0} {$i < 2000} {incr i} { +# set bitnum [randomInt $len] +# set bitval [randomInt 2] +# set fmt [format "%%-%ds%%d%%-s" $bitnum] +# set head [string range $str 0 $bitnum-1] +# set tail [string range $str $bitnum+1 end] +# set str [string map {" " 0} [format $fmt $head $bitval $tail]] +# +# r setbit mykey $bitnum $bitval +# assert_equal [binary format B* $str] [r get mykey] +# } +# } + + test "GETBIT against non-existing key" { + r del mykey + assert_equal 0 [r getbit mykey 0] + } + + test "GETBIT against string-encoded key" { + # Single byte with 2nd and 3rd bit set + r set mykey "`" + + # In-range + assert_equal 0 [r getbit mykey 0] + assert_equal 1 [r getbit mykey 1] + assert_equal 1 [r getbit mykey 2] + assert_equal 0 [r getbit mykey 3] + + # Out-range + assert_equal 0 [r getbit mykey 8] + assert_equal 0 [r getbit mykey 100] + assert_equal 0 [r getbit mykey 10000] + } + +# test "GETBIT against integer-encoded key" { +# r set mykey 1 +# assert_encoding int mykey +# +# # Ascii "1" is integer 49 = 00 11 00 01 +# assert_equal 0 [r getbit mykey 0] +# assert_equal 0 [r getbit mykey 1] +# assert_equal 1 [r getbit mykey 2] +# assert_equal 1 [r getbit mykey 3] +# +# # Out-range +# assert_equal 0 [r getbit mykey 8] +# assert_equal 0 [r getbit mykey 100] +# assert_equal 0 [r getbit mykey 10000] +# } +# +# test "SETRANGE against non-existing key" { +# r del mykey +# assert_equal 3 [r setrange mykey 0 foo] +# assert_equal "foo" [r get mykey] +# +# r del mykey +# assert_equal 0 [r setrange mykey 0 ""] +# assert_equal 0 [r exists mykey] +# +# r del mykey +# assert_equal 4 [r setrange mykey 1 foo] +# assert_equal "\000foo" [r get mykey] +# } + + test "SETRANGE against string-encoded key" { + r set mykey "foo" + assert_equal 3 [r setrange mykey 0 b] + assert_equal "boo" [r get mykey] + + r set mykey "foo" + assert_equal 3 [r setrange mykey 0 ""] + assert_equal "foo" [r get mykey] + + r set mykey "foo" + assert_equal 3 [r setrange mykey 1 b] + assert_equal "fbo" [r get mykey] + + r set mykey "foo" + assert_equal 7 [r setrange mykey 4 bar] + assert_equal "foo\000bar" [r get mykey] + } + +# test "SETRANGE against integer-encoded key" { +# r set mykey 1234 +# assert_encoding int mykey +# assert_equal 4 [r setrange mykey 0 2] +# assert_encoding raw mykey +# assert_equal 2234 [r get mykey] +# +# # Shouldn't change encoding when nothing is set +# r set mykey 1234 +# assert_encoding int mykey +# assert_equal 4 [r setrange mykey 0 ""] +# assert_encoding int mykey +# assert_equal 1234 [r get mykey] +# +# r set mykey 1234 +# assert_encoding int mykey +# assert_equal 4 [r setrange mykey 1 3] +# assert_encoding raw mykey +# assert_equal 1334 [r get mykey] +# +# r set mykey 1234 +# assert_encoding int mykey +# assert_equal 6 [r setrange mykey 5 2] +# assert_encoding raw mykey +# assert_equal "1234\0002" [r get mykey] +# } + +# test "SETRANGE against key with wrong type" { +# r del mykey +# r lpush mykey "foo" +# assert_error "WRONGTYPE*" {r setrange mykey 0 bar} +# } + +# test "SETRANGE with out of range offset" { +# r del mykey +# assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world} +# +# r set mykey "hello" +# assert_error "*out of range*" {r setrange mykey -1 world} +# assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world} +# } + + test "GETRANGE against non-existing key" { + r del mykey + assert_equal "" [r getrange mykey 0 -1] + } + + test "GETRANGE against string value" { + r set mykey "Hello World" + assert_equal "Hell" [r getrange mykey 0 3] + assert_equal "Hello World" [r getrange mykey 0 -1] + assert_equal "orld" [r getrange mykey -4 -1] + assert_equal "" [r getrange mykey 5 3] + assert_equal " World" [r getrange mykey 5 5000] + assert_equal "Hello World" [r getrange mykey -5000 10000] + } + + test "GETRANGE against integer-encoded value" { + r set mykey 1234 + assert_equal "123" [r getrange mykey 0 2] + assert_equal "1234" [r getrange mykey 0 -1] + assert_equal "234" [r getrange mykey -3 -1] + assert_equal "" [r getrange mykey 5 3] + assert_equal "4" [r getrange mykey 3 5000] + assert_equal "1234" [r getrange mykey -5000 10000] + } + +# test "GETRANGE fuzzing" { +# for {set i 0} {$i < 1000} {incr i} { +# r set bin [set bin [randstring 0 1024 binary]] +# set _start [set start [randomInt 1500]] +# set _end [set end [randomInt 1500]] +# if {$_start < 0} {set _start "end-[abs($_start)-1]"} +# if {$_end < 0} {set _end "end-[abs($_end)-1]"} +# assert_equal [string range $bin $_start $_end] [r getrange bin $start $end] +# } +# } + + test {Extended SET can detect syntax errors} { + set e {} + catch {r set foo bar non-existing-option} e + set e + } {*syntax*} + + test {Extended SET NX option} { + r del foo + set v1 [r set foo 1 nx] + set v2 [r set foo 2 nx] + list $v1 $v2 [r get foo] + } {OK {} 1} + + test {Extended SET XX option} { + r del foo + set v1 [r set foo 1 xx] + r set foo bar + set v2 [r set foo 2 xx] + list $v1 $v2 [r get foo] + } {{} OK 2} + + test {Extended SET EX option} { + r del foo + r set foo bar ex 10 + set ttl [r ttl foo] + assert {$ttl <= 10 && $ttl > 5} + } + + test {Extended SET PX option} { + r del foo + r set foo bar px 10000 + set ttl [r ttl foo] + assert {$ttl <= 10 && $ttl > 5} + } + + test {Extended SET using multiple options at once} { + r set foo val + assert {[r set foo bar xx px 10000] eq {OK}} + set ttl [r ttl foo] + assert {$ttl <= 10 && $ttl > 5} + } + +# test {KEYS * two times with long key, Github issue #1208} { +# r flushdb +# r set dlskeriewrioeuwqoirueioqwrueoqwrueqw test +# r keys * +# r keys * +# } {dlskeriewrioeuwqoirueioqwrueoqwrueqw} + + test {GETRANGE with huge ranges, Github issue #1844} { + r set foo bar + r getrange foo 0 4294967297 + } {bar} +} diff --git a/tests/unit/bitops.tcl b/tests/unit/bitops.tcl new file mode 100644 index 000000000..6ddae9170 --- /dev/null +++ b/tests/unit/bitops.tcl @@ -0,0 +1,341 @@ +# Compare Redis commadns against Tcl implementations of the same commands. +proc count_bits s { + binary scan $s b* bits + string length [regsub -all {0} $bits {}] +} + +proc simulate_bit_op {op args} { + set maxlen 0 + set j 0 + set count [llength $args] + foreach a $args { + binary scan $a b* bits + set b($j) $bits + if {[string length $bits] > $maxlen} { + set maxlen [string length $bits] + } + incr j + } + for {set j 0} {$j < $count} {incr j} { + if {[string length $b($j)] < $maxlen} { + append b($j) [string repeat 0 [expr $maxlen-[string length $b($j)]]] + } + } + set out {} + for {set x 0} {$x < $maxlen} {incr x} { + set bit [string range $b(0) $x $x] + if {$op eq {not}} {set bit [expr {!$bit}]} + for {set j 1} {$j < $count} {incr j} { + set bit2 [string range $b($j) $x $x] + switch $op { + and {set bit [expr {$bit & $bit2}]} + or {set bit [expr {$bit | $bit2}]} + xor {set bit [expr {$bit ^ $bit2}]} + } + } + append out $bit + } + binary format b* $out +} + +start_server {tags {"bitops"}} { + test {BITCOUNT returns 0 against non existing key} { + r bitcount no-key + } 0 + +# catch {unset num} +# foreach vec [list "" "\xaa" "\x00\x00\xff" "foobar" "123"] { +# incr num +# test "BITCOUNT against test vector #$num" { +# r set str $vec +# assert {[r bitcount str] == [count_bits $vec]} +# } +# } + +# test {BITCOUNT fuzzing without start/end} { +# for {set j 0} {$j < 100} {incr j} { +# set str [randstring 0 3000] +# r set str $str +# assert {[r bitcount str] == [count_bits $str]} +# } +# } + +# test {BITCOUNT fuzzing with start/end} { +# for {set j 0} {$j < 100} {incr j} { +# set str [randstring 0 3000] +# r set str $str +# set l [string length $str] +# set start [randomInt $l] +# set end [randomInt $l] +# if {$start > $end} { +# lassign [list $end $start] start end +# } +# assert {[r bitcount str $start $end] == [count_bits [string range $str $start $end]]} +# } +# } + + test {BITCOUNT with start, end} { + r set s "foobar" + assert_equal [r bitcount s 0 -1] [count_bits "foobar"] + assert_equal [r bitcount s 1 -2] [count_bits "ooba"] + assert_equal [r bitcount s -2 1] [count_bits ""] + assert_equal [r bitcount s 0 1000] [count_bits "foobar"] + } + + test {BITCOUNT syntax error #1} { + catch {r bitcount s 0} e + set e + } {ERR*syntax*} + + test {BITCOUNT regression test for github issue #582} { + r del str + r setbit foo 0 1 + if {[catch {r bitcount foo 0 4294967296} e]} { + assert_match {*ERR*out of range*} $e + set _ 1 + } else { + set e + } + } {1} + + test {BITCOUNT misaligned prefix} { + r del str + r set str ab + r bitcount str 1 -1 + } {3} + + test {BITCOUNT misaligned prefix + full words + remainder} { + r del str + r set str __PPxxxxxxxxxxxxxxxxRR__ + r bitcount str 2 -3 + } {74} + + test {BITOP NOT (empty string)} { + r set s "" + r bitop not dest s + r get dest + } {} + + test {BITOP NOT (known string)} { + r set s "\xaa\x00\xff\x55" + r bitop not dest s + r get dest + } "\x55\xff\x00\xaa" + + test {BITOP where dest and target are the same key} { + r set s "\xaa\x00\xff\x55" + r bitop not s s + r get s + } "\x55\xff\x00\xaa" + + test {BITOP AND|OR|XOR don't change the string with single input key} { + r set a "\x01\x02\xff" + r bitop and res1 a + r bitop or res2 a + r bitop xor res3 a + list [r get res1] [r get res2] [r get res3] + } [list "\x01\x02\xff" "\x01\x02\xff" "\x01\x02\xff"] + + test {BITOP missing key is considered a stream of zero} { + r set a "\x01\x02\xff" + r bitop and res1 no-suck-key a + r bitop or res2 no-suck-key a no-such-key + r bitop xor res3 no-such-key a + list [r get res1] [r get res2] [r get res3] + } [list "\x00\x00\x00" "\x01\x02\xff" "\x01\x02\xff"] + + test {BITOP shorter keys are zero-padded to the key with max length} { + r set a "\x01\x02\xff\xff" + r set b "\x01\x02\xff" + r bitop and res1 a b + r bitop or res2 a b + r bitop xor res3 a b + list [r get res1] [r get res2] [r get res3] + } [list "\x01\x02\xff\x00" "\x01\x02\xff\xff" "\x00\x00\x00\xff"] + + foreach op {and or xor} { + test "BITOP $op fuzzing" { + for {set i 0} {$i < 10} {incr i} { + r flushall + set vec {} + set veckeys {} + set numvec [expr {[randomInt 10]+1}] + for {set j 0} {$j < $numvec} {incr j} { + set str [randstring 0 1000] + lappend vec $str + lappend veckeys vector_$j + r set vector_$j $str + } + r bitop $op target {*}$veckeys + assert_equal [r get target] [simulate_bit_op $op {*}$vec] + } + } + } + + test {BITOP NOT fuzzing} { + for {set i 0} {$i < 10} {incr i} { + r flushall + set str [randstring 0 1000] + r set str $str + r bitop not target str + assert_equal [r get target] [simulate_bit_op not $str] + } + } + + test {BITOP with integer encoded source objects} { + r set a 1 + r set b 2 + r bitop xor dest a b a + r get dest + } {2} + +# test {BITOP with non string source key} { +# r del c +# r set a 1 +# r set b 2 +# r lpush c foo +# catch {r bitop xor dest a b c d} e +# set e +# } {WRONGTYPE*} + + test {BITOP with empty string after non empty string (issue #529)} { + r flushdb + r set a "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + r bitop or x a b + } {32} + +# test {BITPOS bit=0 with empty key returns 0} { +# r del str +# r bitpos str 0 +# } {0} + +# test {BITPOS bit=1 with empty key returns -1} { +# r del str +# r bitpos str 1 +# } {-1} +# +# test {BITPOS bit=0 with string less than 1 word works} { +# r set str "\xff\xf0\x00" +# r bitpos str 0 +# } {12} +# +# test {BITPOS bit=1 with string less than 1 word works} { +# r set str "\x00\x0f\x00" +# r bitpos str 1 +# } {12} +# +# test {BITPOS bit=0 starting at unaligned address} { +# r set str "\xff\xf0\x00" +# r bitpos str 0 1 +# } {12} +# +# test {BITPOS bit=1 starting at unaligned address} { +# r set str "\x00\x0f\xff" +# r bitpos str 1 1 +# } {12} +# +# test {BITPOS bit=0 unaligned+full word+reminder} { +# r del str +# r set str "\xff\xff\xff" ; # Prefix +# # Followed by two (or four in 32 bit systems) full words +# r append str "\xff\xff\xff\xff\xff\xff\xff\xff" +# r append str "\xff\xff\xff\xff\xff\xff\xff\xff" +# r append str "\xff\xff\xff\xff\xff\xff\xff\xff" +# # First zero bit. +# r append str "\x0f" +# assert {[r bitpos str 0] == 216} +# assert {[r bitpos str 0 1] == 216} +# assert {[r bitpos str 0 2] == 216} +# assert {[r bitpos str 0 3] == 216} +# assert {[r bitpos str 0 4] == 216} +# assert {[r bitpos str 0 5] == 216} +# assert {[r bitpos str 0 6] == 216} +# assert {[r bitpos str 0 7] == 216} +# assert {[r bitpos str 0 8] == 216} +# } +# +# test {BITPOS bit=1 unaligned+full word+reminder} { +# r del str +# r set str "\x00\x00\x00" ; # Prefix +# # Followed by two (or four in 32 bit systems) full words +# r append str "\x00\x00\x00\x00\x00\x00\x00\x00" +# r append str "\x00\x00\x00\x00\x00\x00\x00\x00" +# r append str "\x00\x00\x00\x00\x00\x00\x00\x00" +# # First zero bit. +# r append str "\xf0" +# assert {[r bitpos str 1] == 216} +# assert {[r bitpos str 1 1] == 216} +# assert {[r bitpos str 1 2] == 216} +# assert {[r bitpos str 1 3] == 216} +# assert {[r bitpos str 1 4] == 216} +# assert {[r bitpos str 1 5] == 216} +# assert {[r bitpos str 1 6] == 216} +# assert {[r bitpos str 1 7] == 216} +# assert {[r bitpos str 1 8] == 216} +# } +# +# test {BITPOS bit=1 returns -1 if string is all 0 bits} { +# r set str "" +# for {set j 0} {$j < 20} {incr j} { +# assert {[r bitpos str 1] == -1} +# r append str "\x00" +# } +# } +# +# test {BITPOS bit=0 works with intervals} { +# r set str "\x00\xff\x00" +# assert {[r bitpos str 0 0 -1] == 0} +# assert {[r bitpos str 0 1 -1] == 16} +# assert {[r bitpos str 0 2 -1] == 16} +# assert {[r bitpos str 0 2 200] == 16} +# assert {[r bitpos str 0 1 1] == -1} +# } +# +# test {BITPOS bit=1 works with intervals} { +# r set str "\x00\xff\x00" +# assert {[r bitpos str 1 0 -1] == 8} +# assert {[r bitpos str 1 1 -1] == 8} +# assert {[r bitpos str 1 2 -1] == -1} +# assert {[r bitpos str 1 2 200] == -1} +# assert {[r bitpos str 1 1 1] == 8} +# } +# +# test {BITPOS bit=0 changes behavior if end is given} { +# r set str "\xff\xff\xff" +# assert {[r bitpos str 0] == 24} +# assert {[r bitpos str 0 0] == 24} +# assert {[r bitpos str 0 0 -1] == -1} +# } +# +# test {BITPOS bit=1 fuzzy testing using SETBIT} { +# r del str +# set max 524288; # 64k +# set first_one_pos -1 +# for {set j 0} {$j < 1000} {incr j} { +# assert {[r bitpos str 1] == $first_one_pos} +# set pos [randomInt $max] +# r setbit str $pos 1 +# if {$first_one_pos == -1 || $first_one_pos > $pos} { +# # Update the position of the first 1 bit in the array +# # if the bit we set is on the left of the previous one. +# set first_one_pos $pos +# } +# } +# } +# +# test {BITPOS bit=0 fuzzy testing using SETBIT} { +# set max 524288; # 64k +# set first_zero_pos $max +# r set str [string repeat "\xff" [expr $max/8]] +# for {set j 0} {$j < 1000} {incr j} { +# assert {[r bitpos str 0] == $first_zero_pos} +# set pos [randomInt $max] +# r setbit str $pos 0 +# if {$first_zero_pos > $pos} { +# # Update the position of the first 0 bit in the array +# # if the bit we clear is on the left of the previous one. +# set first_zero_pos $pos +# } +# } +# } +} diff --git a/tests/unit/command.tcl b/tests/unit/command.tcl new file mode 100644 index 000000000..a647b42b7 --- /dev/null +++ b/tests/unit/command.tcl @@ -0,0 +1,12 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + +start_server {tags {"command"}} { + test "Command docs supported." { + set doc [r command docs set] + # puts $doc + assert [dict exists $doc set] + } +} diff --git a/tests/unit/dump.tcl b/tests/unit/dump.tcl new file mode 100644 index 000000000..b79c3ba9d --- /dev/null +++ b/tests/unit/dump.tcl @@ -0,0 +1,142 @@ +start_server {tags {"dump"}} { + test {DUMP / RESTORE are able to serialize / unserialize a simple key} { + r set foo bar + set encoded [r dump foo] + r del foo + list [r exists foo] [r restore foo 0 $encoded] [r ttl foo] [r get foo] + } {0 OK -1 bar} + + test {RESTORE can set an arbitrary expire to the materialized key} { + r set foo bar + set encoded [r dump foo] + r del foo + r restore foo 5000 $encoded + set ttl [r pttl foo] + assert {$ttl >= 3000 && $ttl <= 5000} + r get foo + } {bar} + + test {RESTORE can set an expire that overflows a 32 bit integer} { + r set foo bar + set encoded [r dump foo] + r del foo + r restore foo 2569591501 $encoded + set ttl [r pttl foo] + assert {$ttl >= (2569591501-3000) && $ttl <= 2569591501} + r get foo + } {bar} + + test {RESTORE returns an error of the key already exists} { + r set foo bar + set e {} + catch {r restore foo 0 "..."} e + set e + } {*is busy*} + + test {DUMP of non existing key returns nil} { + r dump nonexisting_key + } {} + + test {MIGRATE is able to migrate a key between two instances} { + set first [srv 0 client] + r set key "Some Value" + start_server {tags {"repl"}} { + set second [srv 0 client] + set second_host [srv 0 host] + set second_port [srv 0 port] + + assert {[$first exists key] == 1} + assert {[$second exists key] == 0} + set ret [r -1 migrate $second_host $second_port key 9 5000] + assert {$ret eq {OK}} + assert {[$first exists key] == 0} + assert {[$second exists key] == 1} + assert {[$second get key] eq {Some Value}} + assert {[$second ttl key] == -1} + } + } + + test {MIGRATE propagates TTL correctly} { + set first [srv 0 client] + r set key "Some Value" + start_server {tags {"repl"}} { + set second [srv 0 client] + set second_host [srv 0 host] + set second_port [srv 0 port] + + assert {[$first exists key] == 1} + assert {[$second exists key] == 0} + $first expire key 10 + set ret [r -1 migrate $second_host $second_port key 9 5000] + assert {$ret eq {OK}} + assert {[$first exists key] == 0} + assert {[$second exists key] == 1} + assert {[$second get key] eq {Some Value}} + assert {[$second ttl key] >= 7 && [$second ttl key] <= 10} + } + } + + test {MIGRATE can correctly transfer large values} { + set first [srv 0 client] + r del key + for {set j 0} {$j < 5000} {incr j} { + r rpush key 1 2 3 4 5 6 7 8 9 10 + r rpush key "item 1" "item 2" "item 3" "item 4" "item 5" \ + "item 6" "item 7" "item 8" "item 9" "item 10" + } + assert {[string length [r dump key]] > (1024*64)} + start_server {tags {"repl"}} { + set second [srv 0 client] + set second_host [srv 0 host] + set second_port [srv 0 port] + + assert {[$first exists key] == 1} + assert {[$second exists key] == 0} + set ret [r -1 migrate $second_host $second_port key 9 10000] + assert {$ret eq {OK}} + assert {[$first exists key] == 0} + assert {[$second exists key] == 1} + assert {[$second ttl key] == -1} + assert {[$second llen key] == 5000*20} + } + } + + test {MIGRATE can correctly transfer hashes} { + set first [srv 0 client] + r del key + r hmset key field1 "item 1" field2 "item 2" field3 "item 3" \ + field4 "item 4" field5 "item 5" field6 "item 6" + start_server {tags {"repl"}} { + set second [srv 0 client] + set second_host [srv 0 host] + set second_port [srv 0 port] + + assert {[$first exists key] == 1} + assert {[$second exists key] == 0} + set ret [r -1 migrate $second_host $second_port key 9 10000] + assert {$ret eq {OK}} + assert {[$first exists key] == 0} + assert {[$second exists key] == 1} + assert {[$second ttl key] == -1} + } + } + + test {MIGRATE timeout actually works} { + set first [srv 0 client] + r set key "Some Value" + start_server {tags {"repl"}} { + set second [srv 0 client] + set second_host [srv 0 host] + set second_port [srv 0 port] + + assert {[$first exists key] == 1} + assert {[$second exists key] == 0} + + set rd [redis_deferring_client] + $rd debug sleep 5.0 ; # Make second server unable to reply. + set e {} + catch {r -1 migrate $second_host $second_port key 9 1000} e + assert_match {IOERR*} $e + } + } +} diff --git a/tests/unit/expire.tcl b/tests/unit/expire.tcl new file mode 100644 index 000000000..e1474def3 --- /dev/null +++ b/tests/unit/expire.tcl @@ -0,0 +1,201 @@ +start_server {tags {"expire"}} { + test {EXPIRE - set timeouts multiple times} { + r set x foobar + set v1 [r expire x 5] + set v2 [r ttl x] + set v3 [r expire x 10] + set v4 [r ttl x] + r expire x 2 + list $v1 $v2 $v3 $v4 + } {1 [45] 1 10} + + test {EXPIRE - It should be still possible to read 'x'} { + r get x + } {foobar} + +# tags {"slow"} { +# test {EXPIRE - After 2.1 seconds the key should no longer be here} { +# after 2100 +# list [r get x] [r exists x] +# } {{} 0} +# } + + test {EXPIRE - write on expire should work} { + r del x + r lpush x foo + r expire x 1000 + r lpush x bar + r lrange x 0 -1 + } {bar foo} + + test {EXPIREAT - Check for EXPIRE alike behavior} { + r del x + r set x foo + r expireat x [expr [clock seconds]+15] + r ttl x + } {1[345]} + + test {SETEX - Set + Expire combo operation. Check for TTL} { + r setex x 12 test + r ttl x + } {1[012]} + + test {SETEX - Check value} { + r get x + } {test} + + test {SETEX - Overwrite old key} { + r setex y 1 foo + r get y + } {foo} + +# tags {"slow"} { +# test {SETEX - Wait for the key to expire} { +# after 1100 +# r get y +# } {} +# } + + test {SETEX - Wrong time parameter} { + catch {r setex z -10 foo} e + set _ $e + } {*invalid expire*} + + test {PERSIST can undo an EXPIRE} { + r set x foo + r expire x 50 + list [r ttl x] [r persist x] [r ttl x] [r get x] + } {50 1 -1 foo} + + test {PERSIST returns 0 against non existing or non volatile keys} { + r set x foo + list [r persist foo] [r persist nokeyatall] + } {0 0} + + test {EXPIRE pricision is now the millisecond} { + # This test is very likely to do a false positive if the + # server is under pressure, so if it does not work give it a few more + # chances. + for {set j 0} {$j < 3} {incr j} { + r del x + r setex x 1 somevalue + after 900 + set a [r get x] + after 1100 + set b [r get x] + if {$a eq {somevalue} && $b eq {}} break + } + list $a $b + } {somevalue {}} + +# test {PEXPIRE/PSETEX/PEXPIREAT can set sub-second expires} { +# # This test is very likely to do a false positive if the +# # server is under pressure, so if it does not work give it a few more +# # chances. +# for {set j 0} {$j < 3} {incr j} { +# r del x y z +# r psetex x 100 somevalue +# after 80 +# set a [r get x] +# after 120 +# set b [r get x] +# +# r set x somevalue +# r pexpire x 100 +# after 80 +# set c [r get x] +# after 120 +# set d [r get x] +# +# r set x somevalue +# r pexpireat x [expr ([clock seconds]*1000)+100] +# after 80 +# set e [r get x] +# after 120 +# set f [r get x] +# +# if {$a eq {somevalue} && $b eq {} && +# $c eq {somevalue} && $d eq {} && +# $e eq {somevalue} && $f eq {}} break +# } +# list $a $b +# } {somevalue {}} + + test {TTL returns tiem to live in seconds} { + r del x + r setex x 10 somevalue + set ttl [r ttl x] + assert {$ttl > 8 && $ttl <= 10} + } + + test {PTTL returns time to live in milliseconds} { + r del x + r setex x 1 somevalue + set ttl [r pttl x] + assert {$ttl > 900 && $ttl <= 1000} + } + + test {TTL / PTTL return -1 if key has no expire} { + r del x + r set x hello + list [r ttl x] [r pttl x] + } {-1 -1} + + test {TTL / PTTL return -2 if key does not exit} { + r del x + list [r ttl x] [r pttl x] + } {-2 -2} + +# test {Redis should actively expire keys incrementally} { +# r flushdb +# r psetex key1 500 a +# r psetex key2 500 a +# r psetex key3 500 a +# set size1 [r dbsize] +# # Redis expires random keys ten times every second so we are +# # fairly sure that all the three keys should be evicted after +# # one second. +# after 1000 +# set size2 [r dbsize] +# list $size1 $size2 +# } {3 0} + +# test {Redis should lazy expire keys} { +# r flushdb +# r debug set-active-expire 0 +# r psetex key1 500 a +# r psetex key2 500 a +# r psetex key3 500 a +# set size1 [r dbsize] +# # Redis expires random keys ten times every second so we are +# # fairly sure that all the three keys should be evicted after +# # one second. +# after 1000 +# set size2 [r dbsize] +# r mget key1 key2 key3 +# set size3 [r dbsize] +# r debug set-active-expire 1 +# list $size1 $size2 $size3 +# } {3 3 0} +# +# test {EXPIRE should not resurrect keys (issue #1026)} { +# r debug set-active-expire 0 +# r set foo bar +# r pexpire foo 500 +# after 1000 +# r expire foo 10 +# r debug set-active-expire 1 +# r exists foo +# } {0} + + test {5 keys in, 5 keys out} { + r flushdb + r set a c + r expire a 5 + r set t c + r set e c + r set s c + r set foo b + lsort [r keys *] + } {a e foo s t} +} diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl new file mode 100644 index 000000000..7ed871098 --- /dev/null +++ b/tests/unit/geo.tcl @@ -0,0 +1,311 @@ +# Helper functions to simulate search-in-radius in the Tcl side in order to +# verify the Redis implementation with a fuzzy test. +proc geo_degrad deg {expr {$deg*atan(1)*8/360}} + +proc geo_distance {lon1d lat1d lon2d lat2d} { + set lon1r [geo_degrad $lon1d] + set lat1r [geo_degrad $lat1d] + set lon2r [geo_degrad $lon2d] + set lat2r [geo_degrad $lat2d] + set v [expr {sin(($lon2r - $lon1r) / 2)}] + set u [expr {sin(($lat2r - $lat1r) / 2)}] + expr {2.0 * 6372797.560856 * \ + asin(sqrt($u * $u + cos($lat1r) * cos($lat2r) * $v * $v))} +} + +proc geo_random_point {lonvar latvar} { + upvar 1 $lonvar lon + upvar 1 $latvar lat + # Note that the actual latitude limit should be -85 to +85, we restrict + # the test to -70 to +70 since in this range the algorithm is more precise + # while outside this range occasionally some element may be missing. + set lon [expr {-180 + rand()*360}] + set lat [expr {-70 + rand()*140}] +} + +# Return elements non common to both the lists. +# This code is from http://wiki.tcl.tk/15489 +proc compare_lists {List1 List2} { + set DiffList {} + foreach Item $List1 { + if {[lsearch -exact $List2 $Item] == -1} { + lappend DiffList $Item + } + } + foreach Item $List2 { + if {[lsearch -exact $List1 $Item] == -1} { + if {[lsearch -exact $DiffList $Item] == -1} { + lappend DiffList $Item + } + } + } + return $DiffList +} + +# The following list represents sets of random seed, search position +# and radius that caused bugs in the past. It is used by the randomized +# test later as a starting point. When the regression vectors are scanned +# the code reverts to using random data. +# +# The format is: seed km lon lat +set regression_vectors { + {1482225976969 7083 81.634948934258375 30.561509253718668} + {1482340074151 5416 -70.863281847379767 -46.347003465679947} + {1499014685896 6064 -89.818768962202014 -40.463868561416803} + {1412 156 149.29737817929004 15.95807862745508} + {441574 143 59.235461856813856 66.269555127373678} + {160645 187 -101.88575239939883 49.061997951502917} + {750269 154 -90.187939661642517 66.615930412251487} + {342880 145 163.03472387745728 64.012747720821181} + {729955 143 137.86663517256579 63.986745399416776} + {939895 151 59.149620271823181 65.204186651485145} + {1412 156 149.29737817929004 15.95807862745508} + {564862 149 84.062063109158544 -65.685403922426232} +} +set rv_idx 0 + +start_server {tags {"geo"}} { + test {GEOADD create} { + r geoadd nyc -73.9454966 40.747533 "lic market" + } {1} + + test {GEOADD update} { + r geoadd nyc -73.9454966 40.747533 "lic market" + } {0} + + test {GEOADD invalid coordinates} { + catch { + r geoadd nyc -73.9454966 40.747533 "lic market" \ + foo bar "luck market" + } err + set err + } {*valid*} + + test {GEOADD multi add} { + r geoadd nyc -73.9733487 40.7648057 "central park n/q/r" -73.9903085 40.7362513 "union square" -74.0131604 40.7126674 "wtc one" -73.7858139 40.6428986 "jfk" -73.9375699 40.7498929 "q4" -73.9564142 40.7480973 4545 + } {6} + + test {Check geoset values} { + r zrange nyc 0 -1 withscores + } {{wtc one} 1791873972053020 {union square} 1791875485187452 {central park n/q/r} 1791875761332224 4545 1791875796750882 {lic market} 1791875804419201 q4 1791875830079666 jfk 1791895905559723} + + test {GEORADIUS simple (sorted)} { + r georadius nyc -73.9798091 40.7598464 3 km asc + } {{central park n/q/r} 4545 {union square}} + + test {GEORADIUS withdist (sorted)} { + r georadius nyc -73.9798091 40.7598464 3 km withdist asc + } {{{central park n/q/r} 0.7750} {4545 2.3651} {{union square} 2.7697}} + + test {GEORADIUS with COUNT} { + r georadius nyc -73.9798091 40.7598464 10 km COUNT 3 + } {{wtc one} {union square} {central park n/q/r}} + + test {GEORADIUS with COUNT but missing integer argument} { + catch {r georadius nyc -73.9798091 40.7598464 10 km COUNT} e + set e + } {ERR*syntax*} + + test {GEORADIUS with COUNT DESC} { + r georadius nyc -73.9798091 40.7598464 10 km COUNT 2 DESC + } {{wtc one} q4} + + test {GEORADIUS HUGE, issue #2767} { + r geoadd users -47.271613776683807 -54.534504198047678 user_000000 + llength [r GEORADIUS users 0 0 50000 km WITHCOORD] + } {1} + + test {GEORADIUSBYMEMBER simple (sorted)} { + r georadiusbymember nyc "wtc one" 7 km + } {{wtc one} {union square} {central park n/q/r} 4545 {lic market}} + + test {GEORADIUSBYMEMBER withdist (sorted)} { + r georadiusbymember nyc "wtc one" 7 km withdist + } {{{wtc one} 0.0000} {{union square} 3.2544} {{central park n/q/r} 6.7000} {4545 6.1975} {{lic market} 6.8969}} + + test {GEOHASH is able to return geohash strings} { + # Example from Wikipedia. + r del points + r geoadd points -5.6 42.6 test + lindex [r geohash points test] 0 + } {ezs42e44yx0} + + test {GEOPOS simple} { + r del points + r geoadd points 10 20 a 30 40 b + lassign [lindex [r geopos points a b] 0] x1 y1 + lassign [lindex [r geopos points a b] 1] x2 y2 + assert {abs($x1 - 10) < 0.001} + assert {abs($y1 - 20) < 0.001} + assert {abs($x2 - 30) < 0.001} + assert {abs($y2 - 40) < 0.001} + } + + test {GEOPOS missing element} { + r del points + r geoadd points 10 20 a 30 40 b + lindex [r geopos points a x b] 1 + } {} + + test {GEODIST simple & unit} { + r del points + r geoadd points 13.361389 38.115556 "Palermo" \ + 15.087269 37.502669 "Catania" + set m [r geodist points Palermo Catania] + assert {$m > 166274 && $m < 166275} + set km [r geodist points Palermo Catania km] + assert {$km > 166.2 && $km < 166.3} + } + + test {GEODIST missing elements} { + r del points + r geoadd points 13.361389 38.115556 "Palermo" \ + 15.087269 37.502669 "Catania" + set m [r geodist points Palermo Agrigento] + assert {$m eq {}} + set m [r geodist points Ragusa Agrigento] + assert {$m eq {}} + set m [r geodist empty_key Palermo Catania] + assert {$m eq {}} + } + + test {GEORADIUS STORE option: syntax error} { + r del points + r geoadd points 13.361389 38.115556 "Palermo" \ + 15.087269 37.502669 "Catania" + catch {r georadius points 13.361389 38.115556 50 km store} e + set e + } {*ERR*syntax*} + + test {GEORANGE STORE option: incompatible options} { + r del points + r geoadd points 13.361389 38.115556 "Palermo" \ + 15.087269 37.502669 "Catania" + catch {r georadius points 13.361389 38.115556 50 km store points2 withdist} e + assert_match {*ERR*} $e + catch {r georadius points 13.361389 38.115556 50 km store points2 withhash} e + assert_match {*ERR*} $e + catch {r georadius points 13.361389 38.115556 50 km store points2 withcoords} e + assert_match {*ERR*} $e + } + + test {GEORANGE STORE option: plain usage} { + r del points + r geoadd points 13.361389 38.115556 "Palermo" \ + 15.087269 37.502669 "Catania" + r georadius points 13.361389 38.115556 500 km store points2 + assert_equal [r zrange points 0 -1] [r zrange points2 0 -1] + } + + test {GEORANGE STOREDIST option: plain usage} { + r del points + r geoadd points 13.361389 38.115556 "Palermo" \ + 15.087269 37.502669 "Catania" + r georadius points 13.361389 38.115556 500 km storedist points2 + set res [r zrange points2 0 -1 withscores] + assert {[lindex $res 1] < 1} + assert {[lindex $res 3] > 166} + assert {[lindex $res 3] < 167} + } + + test {GEORANGE STOREDIST option: COUNT ASC and DESC} { + r del points + r geoadd points 13.361389 38.115556 "Palermo" \ + 15.087269 37.502669 "Catania" + r georadius points 13.361389 38.115556 500 km storedist points2 asc count 1 + assert {[r zcard points2] == 1} + set res [r zrange points2 0 -1 withscores] + assert {[lindex $res 0] eq "Palermo"} + + r georadius points 13.361389 38.115556 500 km storedist points2 desc count 1 + assert {[r zcard points2] == 1} + set res [r zrange points2 0 -1 withscores] + assert {[lindex $res 0] eq "Catania"} + } + + test {GEOADD + GEORANGE randomized test} { + set attempt 30 + while {[incr attempt -1]} { + set rv [lindex $regression_vectors $rv_idx] + incr rv_idx + + unset -nocomplain debuginfo + set srand_seed [clock milliseconds] + if {$rv ne {}} {set srand_seed [lindex $rv 0]} + lappend debuginfo "srand_seed is $srand_seed" + expr {srand($srand_seed)} ; # If you need a reproducible run + r del mypoints + + if {[randomInt 10] == 0} { + # From time to time use very big radiuses + set radius_km [expr {[randomInt 50000]+10}] + } else { + # Normally use a few - ~200km radiuses to stress + # test the code the most in edge cases. + set radius_km [expr {[randomInt 200]+10}] + } + if {$rv ne {}} {set radius_km [lindex $rv 1]} + set radius_m [expr {$radius_km*1000}] + geo_random_point search_lon search_lat + if {$rv ne {}} { + set search_lon [lindex $rv 2] + set search_lat [lindex $rv 3] + } + lappend debuginfo "Search area: $search_lon,$search_lat $radius_km km" + set tcl_result {} + set argv {} + for {set j 0} {$j < 20000} {incr j} { + geo_random_point lon lat + lappend argv $lon $lat "place:$j" + set distance [geo_distance $lon $lat $search_lon $search_lat] + if {$distance < $radius_m} { + lappend tcl_result "place:$j" + } + lappend debuginfo "place:$j $lon $lat [expr {$distance/1000}] km" + } + r geoadd mypoints {*}$argv + set res [lsort [r georadius mypoints $search_lon $search_lat $radius_km km]] + set res2 [lsort $tcl_result] + set test_result OK + + if {$res != $res2} { + set rounding_errors 0 + set diff [compare_lists $res $res2] + foreach place $diff { + set mydist [geo_distance $lon $lat $search_lon $search_lat] + set mydist [expr $mydist/1000] + if {($mydist / $radius_km) > 0.999} {incr rounding_errors} + } + # Make sure this is a real error and not a rounidng issue. + if {[llength $diff] == $rounding_errors} { + set res $res2; # Error silenced + } + } + + if {$res != $res2} { + set diff [compare_lists $res $res2] + puts "*** Possible problem in GEO radius query ***" + puts "Redis: $res" + puts "Tcl : $res2" + puts "Diff : $diff" + puts [join $debuginfo "\n"] + foreach place $diff { + if {[lsearch -exact $res2 $place] != -1} { + set where "(only in Tcl)" + } else { + set where "(only in Redis)" + } + lassign [lindex [r geopos mypoints $place] 0] lon lat + set mydist [geo_distance $lon $lat $search_lon $search_lat] + set mydist [expr $mydist/1000] + puts "$place -> [r geopos mypoints $place] $mydist $where" + if {($mydist / $radius_km) > 0.999} {incr rounding_errors} + } + set test_result FAIL + } + unset -nocomplain debuginfo + if {$test_result ne {OK}} break + } + set test_result + } {OK} +} diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl new file mode 100755 index 000000000..c8d56e4ba --- /dev/null +++ b/tests/unit/hyperloglog.tcl @@ -0,0 +1,250 @@ +start_server {tags {"hll"}} { +# test {HyperLogLog self test passes} { +# catch {r pfselftest} e +# set e +# } {OK} + + test {PFADD without arguments creates an HLL value} { + r pfadd hll + r exists hll + } {1} + + test {Approximated cardinality after creation is zero} { + r pfcount hll + } {0} + + test {PFADD returns 1 when at least 1 reg was modified} { + r pfadd hll a b c + } {1} + + test {PFADD returns 0 when no reg was modified} { + r pfadd hll a b c + } {0} + + test {PFADD works with empty string (regression)} { + r pfadd hll "" + } + + # Note that the self test stresses much better the + # cardinality estimation error. We are testing just the + # command implementation itself here. + test {PFCOUNT returns approximated cardinality of set} { + r del hll + set res {} + r pfadd hll 1 2 3 4 5 + lappend res [r pfcount hll] + # Call it again to test cached value invalidation. + r pfadd hll 6 7 8 8 9 10 + lappend res [r pfcount hll] + set res + } {5 10} + +# test {HyperLogLogs are promote from sparse to dense} { +# r del hll +# r config set hll-sparse-max-bytes 3000 +# set n 0 +# while {$n < 100000} { +# set elements {} +# for {set j 0} {$j < 100} {incr j} {lappend elements [expr rand()]} +# incr n 100 +# r pfadd hll {*}$elements +# set card [r pfcount hll] +# set err [expr {abs($card-$n)}] +# assert {$err < (double($card)/100)*5} +# if {$n < 1000} { +# assert {[r pfdebug encoding hll] eq {sparse}} +# } elseif {$n > 10000} { +# assert {[r pfdebug encoding hll] eq {dense}} +# } +# } +# } + +# test {HyperLogLog sparse encoding stress test} { +# for {set x 0} {$x < 1000} {incr x} { +# r del hll1 hll2 +# set numele [randomInt 100] +# set elements {} +# for {set j 0} {$j < $numele} {incr j} { +# lappend elements [expr rand()] +# } + # Force dense representation of hll2 +# r pfadd hll2 +# r pfdebug todense hll2 +# r pfadd hll1 {*}$elements +# r pfadd hll2 {*}$elements +# assert {[r pfdebug encoding hll1] eq {sparse}} +# assert {[r pfdebug encoding hll2] eq {dense}} + # Cardinality estimated should match exactly. +# assert {[r pfcount hll1] eq [r pfcount hll2]} +# } +# } + +# test {Corrupted sparse HyperLogLogs are detected: Additionl at tail} { +# r del hll +# r pfadd hll a b c +# r append hll "hello" +# set e {} +# catch {r pfcount hll} e +# set e +# } {*INVALIDOBJ*} + +# test {Corrupted sparse HyperLogLogs are detected: Broken magic} { +# r del hll +# r pfadd hll a b c +# r setrange hll 0 "0123" +# set e {} +# catch {r pfcount hll} e +# set e +# } {*WRONGTYPE*} + +# test {Corrupted sparse HyperLogLogs are detected: Invalid encoding} { +# r del hll +# r pfadd hll a b c +# r setrange hll 4 "x" +# set e {} +# catch {r pfcount hll} e +# set e +# } {*WRONGTYPE*} + +# test {Corrupted dense HyperLogLogs are detected: Wrong length} { +# r del hll +# r pfadd hll a b c +# r setrange hll 4 "\x00" +# set e {} +# catch {r pfcount hll} e +# set e +# } {*WRONGTYPE*} + +# test {PFADD, PFCOUNT, PFMERGE type checking works} { +# r set foo bar +# catch {r pfadd foo 1} e +# assert_match {*WRONGTYPE*} $e +# catch {r pfcount foo} e +# assert_match {*WRONGTYPE*} $e +# catch {r pfmerge bar foo} e +# assert_match {*WRONGTYPE*} $e +# catch {r pfmerge foo bar} e +# assert_match {*WRONGTYPE*} $e +# } + + test {PFMERGE results on the cardinality of union of sets} { + r del hll hll1 hll2 hll3 + r pfadd hll1 a b c + r pfadd hll2 b c d + r pfadd hll3 c d e + r pfmerge hll hll1 hll2 hll3 + r pfcount hll + } {5} + +# test {PFCOUNT multiple-keys merge returns cardinality of union} { +# r del hll1 hll2 hll3 +# for {set x 1} {$x < 100000} {incr x} { +# # Force dense representation of hll2 +# r pfadd hll1 "foo-$x" +# r pfadd hll2 "bar-$x" +# r pfadd hll3 "zap-$x" +# +# set card [r pfcount hll1 hll2 hll3] +# set realcard [expr {$x*3}] +# set err [expr {abs($card-$realcard)}] +# assert {$err < (double($card)/100)*5} +# } +# } + +# test {HYPERLOGLOG press test: 5w, 10w, 15w, 20w, 30w, 50w, 100w} { +# r del hll1 +# for {set x 1} {$x <= 1000000} {incr x} { +# r pfadd hll1 "foo-$x" +# if {$x == 50000} { +# set card [r pfcount hll1] +# set realcard [expr {$x*1}] +# set err [expr {abs($card-$realcard)}] +# +# set d_err [expr {$err * 1.0}] +# set d_realcard [expr {$realcard * 1.0}] +# set err_precentage [expr {double($d_err / $d_realcard)}] +# puts "$x error rate: $err_precentage" +# assert {$err < $realcard * 0.01} +# } +# if {$x == 100000} { +# set card [r pfcount hll1] +# set realcard [expr {$x*1}] +# set err [expr {abs($card-$realcard)}] +# +# set d_err [expr {$err * 1.0}] +# set d_realcard [expr {$realcard * 1.0}] +# set err_precentage [expr {double($d_err / $d_realcard)}] +# puts "$x error rate: $err_precentage" +# assert {$err < $realcard * 0.01} +# } +# if {$x == 150000} { +# set card [r pfcount hll1] +# set realcard [expr {$x*1}] +# set err [expr {abs($card-$realcard)}] +# +# set d_err [expr {$err * 1.0}] +# set d_realcard [expr {$realcard * 1.0}] +# set err_precentage [expr {double($d_err / $d_realcard)}] +# puts "$x error rate: $err_precentage" +# assert {$err < $realcard * 0.01} +# } +# if {$x == 300000} { +# set card [r pfcount hll1] +# set realcard [expr {$x*1}] +# set err [expr {abs($card-$realcard)}] +# +# set d_err [expr {$err * 1.0}] +# set d_realcard [expr {$realcard * 1.0}] +# set err_precentage [expr {double($d_err / $d_realcard)}] +# puts "$x error rate: $err_precentage" +# assert {$err < $realcard * 0.01} +# } +# if {$x == 500000} { +# set card [r pfcount hll1] +# set realcard [expr {$x*1}] +# set err [expr {abs($card-$realcard)}] +# +# set d_err [expr {$err * 1.0}] +# set d_realcard [expr {$realcard * 1.0}] +# set err_precentage [expr {double($d_err / $d_realcard)}] +# puts "$x error rate: $err_precentage" +# assert {$err < $realcard * 0.01} +# } +# if {$x == 1000000} { +# set card [r pfcount hll1] +# set realcard [expr {$x*1}] +# set err [expr {abs($card-$realcard)}] +# +# set d_err [expr {$err * 1.0}] +# set d_realcard [expr {$realcard * 1.0}] +# set err_precentage [expr {double($d_err / $d_realcard)}] +# puts "$x error rate: $err_precentage" +# assert {$err < $realcard * 0.03} +# } +# } +# } + +# test {PFDEBUG GETREG returns the HyperLogLog raw registers} { +# r del hll +# r pfadd hll 1 2 3 +# llength [r pfdebug getreg hll] +# } {16384} + + +# test {PFDEBUG GETREG returns the HyperLogLog raw registers} { +# r del hll +# r pfadd hll 1 2 3 +# llength [r pfdebug getreg hll] +# } {16384} + +# test {PFADD / PFCOUNT cache invalidation works} { +# r del hll +# r pfadd hll a b c +# r pfcount hll +# assert {[r getrange hll 15 15] eq "\x00"} +# r pfadd hll a b c +# assert {[r getrange hll 15 15] eq "\x00"} +# r pfadd hll 1 2 3 +# assert {[r getrange hll 15 15] eq "\x80"} +# } +} diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl new file mode 100644 index 000000000..342bb939a --- /dev/null +++ b/tests/unit/introspection.tcl @@ -0,0 +1,59 @@ +start_server {tags {"introspection"}} { + test {CLIENT LIST} { + r client list + } {*addr=*:* fd=* age=* idle=* flags=N db=9 sub=0 psub=0 multi=-1 qbuf=0 qbuf-free=* obl=0 oll=0 omem=0 events=r cmd=client*} + + test {MONITOR can log executed commands} { + set rd [redis_deferring_client] + $rd monitor + r set foo bar + r get foo + list [$rd read] [$rd read] [$rd read] + } {*OK*"set" "foo"*"get" "foo"*} + + test {MONITOR can log commands issued by the scripting engine} { + set rd [redis_deferring_client] + $rd monitor + r eval {redis.call('set',KEYS[1],ARGV[1])} 1 foo bar + $rd read ;# Discard the OK + assert_match {*eval*} [$rd read] + assert_match {*lua*"set"*"foo"*"bar"*} [$rd read] + } + + test {CLIENT GETNAME should return NIL if name is not assigned} { + r client getname + } {} + + test {CLIENT LIST shows empty fields for unassigned names} { + r client list + } {*name= *} + + test {CLIENT SETNAME does not accept spaces} { + catch {r client setname "foo bar"} e + set e + } {ERR*} + + test {CLIENT SETNAME can assign a name to this connection} { + assert_equal [r client setname myname] {OK} + r client list + } {*name=myname*} + + test {CLIENT SETNAME can change the name of an existing connection} { + assert_equal [r client setname someothername] {OK} + r client list + } {*name=someothername*} + + test {After CLIENT SETNAME, connection can still be closed} { + set rd [redis_deferring_client] + $rd client setname foobar + assert_equal [$rd read] "OK" + assert_match {*foobar*} [r client list] + $rd close + # Now the client should no longer be listed + wait_for_condition 50 100 { + [string match {*foobar*} [r client list]] == 0 + } else { + fail "Client still listed in CLIENT LIST after SETNAME." + } + } +} diff --git a/tests/unit/keys.tcl b/tests/unit/keys.tcl new file mode 100644 index 000000000..cb62444f3 --- /dev/null +++ b/tests/unit/keys.tcl @@ -0,0 +1,54 @@ +start_server {tags {"keys"}} { + test {KEYS with pattern} { + foreach key {key_x key_y key_z foo_a foo_b foo_c} { + r set $key hello + } + assert_equal {foo_a foo_b foo_c} [r keys foo*] + assert_equal {foo_a foo_b foo_c} [r keys f*] + assert_equal {foo_a foo_b foo_c} [r keys f*o*] + } + + test {KEYS to get all keys} { + lsort [r keys *] + } {foo_a foo_b foo_c key_x key_y key_z} + + test {KEYS select by type} { + foreach key {key_x key_y key_z foo_a foo_b foo_c} { + r del $key + } + r set kv_1 value + r set kv_2 value + r hset hash_1 hash_field 1 + r hset hash_2 hash_field 1 + r lpush list_1 value + r lpush list_2 value + r zadd zset_1 1 "a" + r zadd zset_2 1 "a" + r sadd set_1 "a" + r sadd set_2 "a" + assert_equal {kv_1 kv_2} [r keys * string] + assert_equal {hash_1 hash_2} [r keys * hash] + assert_equal {list_1 list_2} [r keys * list] + assert_equal {zset_1 zset_2} [r keys * zset] + assert_equal {set_1 set_2} [r keys * set] + assert_equal {kv_1 kv_2 hash_1 hash_2 zset_1 zset_2 set_1 set_2 list_1 list_2} [r keys *] + assert_equal {kv_1 kv_2} [r keys * STRING] + assert_equal {hash_1 hash_2} [r keys * HASH] + assert_equal {list_1 list_2} [r keys * LIST] + assert_equal {zset_1 zset_2} [r keys * ZSET] + assert_equal {set_1 set_2} [r keys * SET] + } + + test {KEYS syntax error} { + catch {r keys * a} e1 + catch {r keys * strings} e2 + catch {r keys * c d} e3 + catch {r keys} e4 + catch {r keys * set zset} e5 + assert_equal {ERR syntax error} [set e1] + assert_equal {ERR syntax error} [set e2] + assert_equal {ERR syntax error} [set e3] + assert_equal {ERR wrong number of arguments for 'keys' command} [set e4] + assert_equal {ERR syntax error} [set e5] + } +} diff --git a/tests/unit/latency-monitor.tcl b/tests/unit/latency-monitor.tcl new file mode 100644 index 000000000..b736cad98 --- /dev/null +++ b/tests/unit/latency-monitor.tcl @@ -0,0 +1,50 @@ +start_server {tags {"latency-monitor"}} { + # Set a threshold high enough to avoid spurious latency events. + r config set latency-monitor-threshold 200 + r latency reset + + test {Test latency events logging} { + r debug sleep 0.3 + after 1100 + r debug sleep 0.4 + after 1100 + r debug sleep 0.5 + assert {[r latency history command] >= 3} + } + + test {LATENCY HISTORY output is ok} { + set min 250 + set max 450 + foreach event [r latency history command] { + lassign $event time latency + assert {$latency >= $min && $latency <= $max} + incr min 100 + incr max 100 + set last_time $time ; # Used in the next test + } + } + + test {LATENCY LATEST output is ok} { + foreach event [r latency latest] { + lassign $event eventname time latency max + assert {$eventname eq "command"} + assert {$max >= 450 & $max <= 650} + assert {$time == $last_time} + break + } + } + + test {LATENCY HISTORY / RESET with wrong event name is fine} { + assert {[llength [r latency history blabla]] == 0} + assert {[r latency reset blabla] == 0} + } + + test {LATENCY DOCTOR produces some output} { + assert {[string length [r latency doctor]] > 0} + } + + test {LATENCY RESET is able to reset events} { + assert {[r latency reset] > 0} + assert {[r latency latest] eq {}} + } +} diff --git a/tests/unit/limits.tcl b/tests/unit/limits.tcl new file mode 100644 index 000000000..b37ea9b0f --- /dev/null +++ b/tests/unit/limits.tcl @@ -0,0 +1,16 @@ +start_server {tags {"limits"} overrides {maxclients 10}} { + test {Check if maxclients works refusing connections} { + set c 0 + catch { + while {$c < 50} { + incr c + set rd [redis_deferring_client] + $rd ping + $rd read + after 100 + } + } e + assert {$c > 8 && $c <= 10} + set e + } {*ERR max*reached*} +} diff --git a/tests/unit/maxmemory.tcl b/tests/unit/maxmemory.tcl new file mode 100644 index 000000000..2f853f29d --- /dev/null +++ b/tests/unit/maxmemory.tcl @@ -0,0 +1,152 @@ +start_server {tags {"maxmemory"}} { + test "Config get maxmemory." { + set maxm [r config get maxmemory] + assert {$maxm > 1} + } + + # The current maxmemory command does not support config set and policy. + # For a complete list of commands, refer to the wiki: https://github.com/OpenAtomFoundation/pika/wiki/pika-%E5%B7%AE%E5%BC%82%E5%8C%96%E5%91%BD%E4%BB%A4 + + # test "Without maxmemory small integers are shared" { + # r config set maxmemory 0 + # r set a 1 + # assert {[r object refcount a] > 1} + # } + + # test "With maxmemory and non-LRU policy integers are still shared" { + # r config set maxmemory 1073741824 + # r config set maxmemory-policy allkeys-random + # r set a 1 + # assert {[r object refcount a] > 1} + # } + + # test "With maxmemory and LRU policy integers are not shared" { + # r config set maxmemory 1073741824 + # r config set maxmemory-policy allkeys-lru + # r set a 1 + # r config set maxmemory-policy volatile-lru + # r set b 1 + # assert {[r object refcount a] == 1} + # assert {[r object refcount b] == 1} + # r config set maxmemory 0 + # } + + # foreach policy { + # allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl + # } { + # test "maxmemory - is the memory limit honoured? (policy $policy)" { + # # make sure to start with a blank instance + # r flushall + # # Get the current memory limit and calculate a new limit. + # # We just add 100k to the current memory size so that it is + # # fast for us to reach that limit. + # set used [s used_memory] + # set limit [expr {$used+100*1024}] + # r config set maxmemory $limit + # r config set maxmemory-policy $policy + # # Now add keys until the limit is almost reached. + # set numkeys 0 + # while 1 { + # r setex [randomKey] 10000 x + # incr numkeys + # if {[s used_memory]+4096 > $limit} { + # assert {$numkeys > 10} + # break + # } + # } + # # If we add the same number of keys already added again, we + # # should still be under the limit. + # for {set j 0} {$j < $numkeys} {incr j} { + # r setex [randomKey] 10000 x + # } + # assert {[s used_memory] < ($limit+4096)} + # } + # } + + # foreach policy { + # allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl + # } { + # test "maxmemory - only allkeys-* should remove non-volatile keys ($policy)" { + # # make sure to start with a blank instance + # r flushall + # # Get the current memory limit and calculate a new limit. + # # We just add 100k to the current memory size so that it is + # # fast for us to reach that limit. + # set used [s used_memory] + # set limit [expr {$used+100*1024}] + # r config set maxmemory $limit + # r config set maxmemory-policy $policy + # # Now add keys until the limit is almost reached. + # set numkeys 0 + # while 1 { + # r set [randomKey] x + # incr numkeys + # if {[s used_memory]+4096 > $limit} { + # assert {$numkeys > 10} + # break + # } + # } + # # If we add the same number of keys already added again and + # # the policy is allkeys-* we should still be under the limit. + # # Otherwise we should see an error reported by Redis. + # set err 0 + # for {set j 0} {$j < $numkeys} {incr j} { + # if {[catch {r set [randomKey] x} e]} { + # if {[string match {*used memory*} $e]} { + # set err 1 + # } + # } + # } + # if {[string match allkeys-* $policy]} { + # assert {[s used_memory] < ($limit+4096)} + # } else { + # assert {$err == 1} + # } + # } + # } + + # foreach policy { + # volatile-lru volatile-random volatile-ttl + # } { + # test "maxmemory - policy $policy should only remove volatile keys." { + # # make sure to start with a blank instance + # r flushall + # # Get the current memory limit and calculate a new limit. + # # We just add 100k to the current memory size so that it is + # # fast for us to reach that limit. + # set used [s used_memory] + # set limit [expr {$used+100*1024}] + # r config set maxmemory $limit + # r config set maxmemory-policy $policy + # # Now add keys until the limit is almost reached. + # set numkeys 0 + # while 1 { + # # Odd keys are volatile + # # Even keys are non volatile + # if {$numkeys % 2} { + # r setex "key:$numkeys" 10000 x + # } else { + # r set "key:$numkeys" x + # } + # if {[s used_memory]+4096 > $limit} { + # assert {$numkeys > 10} + # break + # } + # incr numkeys + # } + # # Now we add the same number of volatile keys already added. + # # We expect Redis to evict only volatile keys in order to make + # # space. + # set err 0 + # for {set j 0} {$j < $numkeys} {incr j} { + # catch {r setex "foo:$j" 10000 x} + # } + # # We should still be under the limit. + # assert {[s used_memory] < ($limit+4096)} + # # However all our non volatile keys should be here. + # for {set j 0} {$j < $numkeys} {incr j 2} { + # assert {[r exists "key:$j"]} + # } + # } + # } +} diff --git a/tests/unit/memefficiency.tcl b/tests/unit/memefficiency.tcl new file mode 100644 index 000000000..7ca9a705b --- /dev/null +++ b/tests/unit/memefficiency.tcl @@ -0,0 +1,37 @@ +proc test_memory_efficiency {range} { + r flushall + set rd [redis_deferring_client] + set base_mem [s used_memory] + set written 0 + for {set j 0} {$j < 10000} {incr j} { + set key key:$j + set val [string repeat A [expr {int(rand()*$range)}]] + $rd set $key $val + incr written [string length $key] + incr written [string length $val] + incr written 2 ;# A separator is the minimum to store key-value data. + } + for {set j 0} {$j < 10000} {incr j} { + $rd read ; # Discard replies + } + + set current_mem [s used_memory] + set used [expr {$current_mem-$base_mem}] + set efficiency [expr {double($written)/$used}] + return $efficiency +} + +start_server {tags {"memefficiency"}} { + foreach {size_range expected_min_efficiency} { + 32 0.15 + 64 0.25 + 128 0.35 + 1024 0.75 + 16384 0.82 + } { + test "Memory efficiency with values in range $size_range" { + set efficiency [test_memory_efficiency $size_range] + assert {$efficiency >= $expected_min_efficiency} + } + } +} diff --git a/tests/unit/multi.tcl b/tests/unit/multi.tcl new file mode 100644 index 000000000..399221473 --- /dev/null +++ b/tests/unit/multi.tcl @@ -0,0 +1,309 @@ +start_server {tags {"multi"}} { + test {MUTLI / EXEC basics} { + r del mylist + r rpush mylist a + r rpush mylist b + r rpush mylist c + r multi + set v1 [r lrange mylist 0 -1] + set v2 [r ping] + set v3 [r exec] + list $v1 $v2 $v3 + } {QUEUED QUEUED {{a b c} PONG}} + + test {DISCARD} { + r del mylist + r rpush mylist a + r rpush mylist b + r rpush mylist c + r multi + set v1 [r del mylist] + set v2 [r discard] + set v3 [r lrange mylist 0 -1] + list $v1 $v2 $v3 + } {QUEUED OK {a b c}} + + test {Nested MULTI are not allowed} { + set err {} + r multi + catch {[r multi]} err + r exec + set _ $err + } {*ERR MULTI*} + + test {MULTI where commands alter argc/argv} { + r sadd myset a + r multi + r spop myset + list [r exec] [r exists myset] + } {a 0} + + test {WATCH inside MULTI is not allowed} { + set err {} + r multi + catch {[r watch x]} err + r exec + set _ $err + } {*ERR WATCH*} + + test {EXEC fails if there are errors while queueing commands #1} { + r del foo1 foo2 + r multi + r set foo1 bar1 + catch {r non-existing-command} + r set foo2 bar2 + catch {r exec} e + assert_match {EXECABORT*} $e + list [r exists foo1] [r exists foo2] + } {0 0} + +# test {EXEC fails if there are errors while queueing commands #2} { +# set rd [redis_deferring_client] +# r del foo1 foo2 +# r multi +# r set foo1 bar1 +# $rd config set maxmemory 1 +# assert {[$rd read] eq {OK}} +# catch {r lpush mylist myvalue} +# $rd config set maxmemory 0 +# assert {[$rd read] eq {OK}} +# r set foo2 bar2 +# catch {r exec} e +# assert_match {EXECABORT*} $e +# $rd close +# list [r exists foo1] [r exists foo2] +# } {0 0} + +# test {If EXEC aborts, the client MULTI state is cleared} { +# r del foo1 foo2 +# r multi +# r set foo1 bar1 +# catch {r non-existing-command} +# r set foo2 bar2 +# catch {r exec} e +# assert_match {EXECABORT*} $e +# r ping +# } {PONG} + +# test {EXEC works on WATCHed key not modified} { +# r watch x y z +# r watch k +# r multi +# r ping +# r exec +# } {PONG} + +# test {EXEC fail on WATCHed key modified (1 key of 1 watched)} { +# r set x 30 +# r watch x +# r set x 40 +# r multi +# r ping +# r exec +# } {} + +# test {EXEC fail on WATCHed key modified (1 key of 5 watched)} { +# r set x 30 +# r watch a b x k z +# r set x 40 +# r multi +# r ping +# r exec +# } {} + +# test {EXEC fail on WATCHed key modified by SORT with STORE even if the result is empty} { +# r flushdb +# r lpush foo barsync" +# r watch foo +# r sort emptylist store foo +# r multi +# r ping +# r exec +# } {} + +# test {After successful EXEC key is no longer watched} { +# r set x 30 +# r watch x +# r multi +# r ping +# r exec +# r set x 40 +# r multi +# r ping +# r exec +# } {PONG} + +# test {After failed EXEC key is no longer watched} { +# r set x 30 +# r watch x +# r set x 40 +# r multi +# r ping +# r exec +# r set x 40 +# r multi +# r ping +# r exec +# } {PONG} + +# test {It is possible to UNWATCH} { +# r set x 30 +# r watch x +# r set x 40 +# r unwatch +# r multi +# r ping +# r exec +# } {PONG} + + test {UNWATCH when there is nothing watched works as expected} { + r unwatch + } {OK} + +# test {FLUSHALL is able to touch the watched keys} { +# r set x 30 +# r watch x +# r flushall +# r multi +# r ping +# r exec +# } {} + +# test {FLUSHALL does not touch non affected keys} { +# r del x +# r watch x +# r flushall +# r multi +# r ping +# r exec +# } {PONG} + +# test {FLUSHDB is able to touch the watched keys} { +# r set x 30 +# r watch x +# r flushdb +# r multi +# r ping +# r exec +# } {} + +# test {FLUSHDB does not touch non affected keys} { +# r del x +# r watch x +# r flushdb +# r multi +# r ping +# r exec +# } {PONG} + +# test {WATCH is able to remember the DB a key belongs to} { +# r select 5 +# r set x 30 +# r watch x +# r select 1 +# r set x 10 +# r select 5 +# r multi +# r ping +# set res [r exec] +# # Restore original DB +# r select 9 +# set res +# } {PONG} + +# test {WATCH will consider touched keys target of EXPIRE} { +# r del x +# r set x foo +# r watch x +# r expire x 10 +# r multi +# r ping +# r exec +# } {} + +# test {WATCH will not consider touched expired keys} { +# r del x +# r set x foo +# r expire x 1 +# r watch x +# after 1100 +# r multi +# r ping +# r exec +# } {PONG} + + test {DISCARD should clear the WATCH dirty flag on the client} { + r watch x + r set x 10 + r multi + r discard + r multi + r incr x + r exec + } {11} + + test {DISCARD should UNWATCH all the keys} { + r watch x + r set x 10 + r multi + r discard + r set x 10 + r multi + r incr x + r exec + } {11} + +# test {MULTI / EXEC is propagated correctly (single write command)} { +# set repl [attach_to_replication_stream] +# r multi +# r set foo bar +# r exec +# assert_replication_stream $repl { +# {select *} +# {multi} +# {set foo bar} +# {exec} +# } +# close_replication_stream $repl +# } +# +# test {MULTI / EXEC is propagated correctly (empty transaction)} { +# set repl [attach_to_replication_stream] +# r multi +# r exec +# r set foo bar +# assert_replication_stream $repl { +# {select *} +# {set foo bar} +# } +# close_replication_stream $repl +# } +# +# test {MULTI / EXEC is propagated correctly (read-only commands)} { +# r set foo value1 +# set repl [attach_to_replication_stream] +# r multi +# r get foo +# r exec +# r set foo value2 +# assert_replication_stream $repl { +# {select *} +# {set foo value2} +# } +# close_replication_stream $repl +# } +# +# test {MULTI / EXEC is propagated correctly (write command, no effect)} { +# r del bar foo bar +# set repl [attach_to_replication_stream] +# r multi +# r del foo +# r exec +# assert_replication_stream $repl { +# {select *} +# {multi} +# {exec} +# } +# close_replication_stream $repl +# } +} diff --git a/tests/unit/obuf-limits.tcl b/tests/unit/obuf-limits.tcl new file mode 100644 index 000000000..5d625cf45 --- /dev/null +++ b/tests/unit/obuf-limits.tcl @@ -0,0 +1,73 @@ +start_server {tags {"obuf-limits"}} { + test {Client output buffer hard limit is enforced} { + r config set client-output-buffer-limit {pubsub 100000 0 0} + set rd1 [redis_deferring_client] + + $rd1 subscribe foo + set reply [$rd1 read] + assert {$reply eq "subscribe foo 1"} + + set omem 0 + while 1 { + r publish foo bar + set clients [split [r client list] "\r\n"] + set c [split [lindex $clients 1] " "] + if {![regexp {omem=([0-9]+)} $c - omem]} break + if {$omem > 200000} break + } + assert {$omem >= 90000 && $omem < 200000} + $rd1 close + } + + test {Client output buffer soft limit is not enforced if time is not overreached} { + r config set client-output-buffer-limit {pubsub 0 100000 10} + set rd1 [redis_deferring_client] + + $rd1 subscribe foo + set reply [$rd1 read] + assert {$reply eq "subscribe foo 1"} + + set omem 0 + set start_time 0 + set time_elapsed 0 + while 1 { + r publish foo bar + set clients [split [r client list] "\r\n"] + set c [split [lindex $clients 1] " "] + if {![regexp {omem=([0-9]+)} $c - omem]} break + if {$omem > 100000} { + if {$start_time == 0} {set start_time [clock seconds]} + set time_elapsed [expr {[clock seconds]-$start_time}] + if {$time_elapsed >= 5} break + } + } + assert {$omem >= 100000 && $time_elapsed >= 5 && $time_elapsed <= 10} + $rd1 close + } + + test {Client output buffer soft limit is enforced if time is overreached} { + r config set client-output-buffer-limit {pubsub 0 100000 3} + set rd1 [redis_deferring_client] + + $rd1 subscribe foo + set reply [$rd1 read] + assert {$reply eq "subscribe foo 1"} + + set omem 0 + set start_time 0 + set time_elapsed 0 + while 1 { + r publish foo bar + set clients [split [r client list] "\r\n"] + set c [split [lindex $clients 1] " "] + if {![regexp {omem=([0-9]+)} $c - omem]} break + if {$omem > 100000} { + if {$start_time == 0} {set start_time [clock seconds]} + set time_elapsed [expr {[clock seconds]-$start_time}] + if {$time_elapsed >= 10} break + } + } + assert {$omem >= 100000 && $time_elapsed < 6} + $rd1 close + } +} diff --git a/tests/unit/other.tcl b/tests/unit/other.tcl new file mode 100644 index 000000000..90faf00a6 --- /dev/null +++ b/tests/unit/other.tcl @@ -0,0 +1,245 @@ +start_server {tags {"other"}} { + if {$::force_failure} { + # This is used just for test suite development purposes. + test {Failing test} { + format err + } {ok} + } + +# test {SAVE - make sure there are all the types as values} { +# # Wait for a background saving in progress to terminate +# waitForBgsave r +# r lpush mysavelist hello +# r lpush mysavelist world +# r set myemptykey {} +# r set mynormalkey {blablablba} +# r zadd mytestzset 10 a +# r zadd mytestzset 20 b +# r zadd mytestzset 30 c +# r save +# } {OK} + + tags {slow} { + if {$::accurate} {set iterations 10000} else {set iterations 1000} + foreach fuzztype {binary alpha compr} { + test "FUZZ stresser with data model $fuzztype" { + set err 0 + for {set i 0} {$i < $iterations} {incr i} { + set fuzz [randstring 0 512 $fuzztype] + r set foo $fuzz + set got [r get foo] + if {$got ne $fuzz} { + set err [list $fuzz $got] + break + } + } + set _ $err + } {0} + } + } + +# test {BGSAVE} { +# waitForBgsave r +# r flushdb +# r save +# r set x 10 +# r bgsave +# waitForBgsave r +# r debug reload +# r get x +# } {10} + + test {SELECT an out of range DB} { + catch {r select 1000000} err + set _ $err + } {*invalid*} + +# tags {consistency} { +# if {![catch {package require sha1}]} { +# if {$::accurate} {set numops 10000} else {set numops 1000} +# test {Check consistency of different data types after a reload} { +# r flushdb +# createComplexDataset r $numops +# set dump [csvdump r] +# set sha1 [r debug digest] +# r debug reload +# set sha1_after [r debug digest] +# if {$sha1 eq $sha1_after} { +# set _ 1 +# } else { +# set newdump [csvdump r] +# puts "Consistency test failed!" +# puts "You can inspect the two dumps in /tmp/repldump*.txt" +# +# set fd [open /tmp/repldump1.txt w] +# puts $fd $dump +# close $fd +# set fd [open /tmp/repldump2.txt w] +# puts $fd $newdump +# close $fd +# +# set _ 0 +# } +# } {1} + +# test {Same dataset digest if saving/reloading as AOF?} { +# r bgrewriteaof +# waitForBgrewriteaof r +# r debug loadaof +# set sha1_after [r debug digest] +# if {$sha1 eq $sha1_after} { +# set _ 1 +# } else { +# set newdump [csvdump r] +# puts "Consistency test failed!" +# puts "You can inspect the two dumps in /tmp/aofdump*.txt" +# +# set fd [open /tmp/aofdump1.txt w] +# puts $fd $dump +# close $fd +# set fd [open /tmp/aofdump2.txt w] +# puts $fd $newdump +# close $fd +# +# set _ 0 +# } +# } {1} +# } +# } + +# test {EXPIRES after a reload (snapshot + append only file rewrite)} { +# r flushdb +# r set x 10 +# r expire x 1000 +# r save +# r debug reload +# set ttl [r ttl x] +# set e1 [expr {$ttl > 900 && $ttl <= 1000}] +# r bgrewriteaof +# waitForBgrewriteaof r +# r debug loadaof +# set ttl [r ttl x] +# set e2 [expr {$ttl > 900 && $ttl <= 1000}] +# list $e1 $e2 +# } {1 1} + +# test {EXPIRES after AOF reload (without rewrite)} { +# r flushdb +# r config set appendonly yes +# r set x somevalue +# r expire x 1000 +# r setex y 2000 somevalue +# r set z somevalue +# r expireat z [expr {[clock seconds]+3000}] +# +# # Milliseconds variants +# r set px somevalue +# r pexpire px 1000000 +# r psetex py 2000000 somevalue +# r set pz somevalue +# r pexpireat pz [expr {([clock seconds]+3000)*1000}] +# +# # Reload and check +# waitForBgrewriteaof r +# # We need to wait two seconds to avoid false positives here, otherwise +# # the DEBUG LOADAOF command may read a partial file. +# # Another solution would be to set the fsync policy to no, since this +# # prevents write() to be delayed by the completion of fsync(). +# after 2000 +# r debug loadaof +# set ttl [r ttl x] +# assert {$ttl > 900 && $ttl <= 1000} +# set ttl [r ttl y] +# assert {$ttl > 1900 && $ttl <= 2000} +# set ttl [r ttl z] +# assert {$ttl > 2900 && $ttl <= 3000} +# set ttl [r ttl px] +# assert {$ttl > 900 && $ttl <= 1000} +# set ttl [r ttl py] +# assert {$ttl > 1900 && $ttl <= 2000} +# set ttl [r ttl pz] +# assert {$ttl > 2900 && $ttl <= 3000} +# r config set appendonly no +# } + + tags {protocol} { + test {PIPELINING stresser (also a regression for the old epoll bug)} { + set fd2 [socket $::host $::port] + fconfigure $fd2 -encoding binary -translation binary + puts -nonewline $fd2 "SELECT 9\r\n" + flush $fd2 + gets $fd2 + + for {set i 0} {$i < 100000} {incr i} { + set q {} + set val "0000${i}0000" + append q "SET key:$i $val\r\n" + puts -nonewline $fd2 $q + set q {} + append q "GET key:$i\r\n" + puts -nonewline $fd2 $q + } + flush $fd2 + + for {set i 0} {$i < 100000} {incr i} { + gets $fd2 line + gets $fd2 count + set count [string range $count 1 end] + set val [read $fd2 $count] + read $fd2 2 + } + close $fd2 + set _ 1 + } {1} + } + +# test {APPEND basics} { +# list [r append foo bar] [r get foo] \ +# [r append foo 100] [r get foo] +# } {3 bar 6 bar100} + + test {APPEND basics, integer encoded values} { + set res {} + r del foo + r append foo 1 + r append foo 2 + lappend res [r get foo] + r set foo 1 + r append foo 2 + lappend res [r get foo] + } {12 12} + + test {APPEND fuzzing} { + set err {} + foreach type {binary alpha compr} { + set buf {} + r del x + for {set i 0} {$i < 1000} {incr i} { + set bin [randstring 0 10 $type] + append buf $bin + r append x $bin + } + if {$buf != [r get x]} { + set err "Expected '$buf' found '[r get x]'" + break + } + } + set _ $err + } {} + + # Leave the user with a clean DB before to exit +# test {FLUSHDB} { +# set aux {} +# r select 9 +# r flushdb +# lappend aux [r dbsize] +# r select 10 +# r flushdb +# lappend aux [r dbsize] +# } {0 0} + +# test {Perform a final SAVE to leave a clean DB on disk} { +# waitForBgsave r +# r save +# } {OK} +} diff --git a/tests/unit/printver.tcl b/tests/unit/printver.tcl new file mode 100644 index 000000000..b164ac74c --- /dev/null +++ b/tests/unit/printver.tcl @@ -0,0 +1,6 @@ +start_server {} { + set i [r info] + regexp {pika_version:(.*?)\r\n} $i - version + regexp {pika_git_sha:(.*?)\r\n} $i - sha1 + puts "Testing Pika version $version ($sha1)" +} diff --git a/tests/unit/protocol.tcl b/tests/unit/protocol.tcl new file mode 100644 index 000000000..492d4bede --- /dev/null +++ b/tests/unit/protocol.tcl @@ -0,0 +1,117 @@ +start_server {tags {"protocol"}} { + test "Handle an empty query" { + reconnect + r write "\r\n" + r flush + assert_equal "PONG" [r ping] + } + +# test "Negative multibulk length" { +# reconnect +# r write "*-10\r\n" +# r flush +# assert_equal PONG [r ping] +# } + +# test "Out of range multibulk length" { +# reconnect +# r write "*20000000\r\n" +# r flush +# assert_error "*invalid multibulk length*" {r read} +# } +# +# test "Wrong multibulk payload header" { +# reconnect +# r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\nfooz\r\n" +# r flush +# assert_error "*expected '$', got 'f'*" {r read} +# } +# +# test "Negative multibulk payload length" { +# reconnect +# r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$-10\r\n" +# r flush +# assert_error "*invalid bulk length*" {r read} +# } +# +# test "Out of range multibulk payload length" { +# reconnect +# r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$2000000000\r\n" +# r flush +# assert_error "*invalid bulk length*" {r read} +# } +# +# test "Non-number multibulk payload length" { +# reconnect +# r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$blabla\r\n" +# r flush +# assert_error "*invalid bulk length*" {r read} +# } +# +# test "Multi bulk request not followed by bulk arguments" { +# reconnect +# r write "*1\r\nfoo\r\n" +# r flush +# assert_error "*expected '$', got 'f'*" {r read} +# } +# +# test "Generic wrong number of args" { +# reconnect +# assert_error "*wrong*arguments*ping*" {r ping x y z} +# } +# +# test "Unbalanced number of quotes" { +# reconnect +# r write "set \"\"\"test-key\"\"\" test-value\r\n" +# r write "ping\r\n" +# r flush +# assert_error "*unbalanced*" {r read} +# } + +# set c 0 +# foreach seq [list "\x00" "*\x00" "$\x00"] { +# incr c +# test "Protocol desync regression test #$c" { +# set s [socket [srv 0 host] [srv 0 port]] +# puts -nonewline $s $seq +# set payload [string repeat A 1024]"\n" +# set test_start [clock seconds] +# set test_time_limit 30 +# while 1 { +# if {[catch { +# puts -nonewline $s payload +# flush $s +# incr payload_size [string length $payload] +# }]} { +# set retval [gets $s] +# close $s +# break +# } else { +# set elapsed [expr {[clock seconds]-$test_start}] +# if {$elapsed > $test_time_limit} { +# close $s +# error "assertion:Redis did not closed connection after protocol desync" +# } +# } +# } +# set retval +# } {*Protocol error*} +# } +# unset c +} + +start_server {tags {"regression"}} { + test "Regression for a crash with blocking ops and pipelining" { + set rd [redis_deferring_client] + set fd [r channel] + set proto "*3\r\n\$5\r\nBLPOP\r\n\$6\r\nnolist\r\n\$1\r\n0\r\n" + puts -nonewline $fd $proto$proto + flush $fd + set res {} + + $rd rpush nolist a + $rd read + $rd rpush nolist a + $rd read + } +} diff --git a/tests/unit/pubsub.tcl b/tests/unit/pubsub.tcl new file mode 100644 index 000000000..60930ae27 --- /dev/null +++ b/tests/unit/pubsub.tcl @@ -0,0 +1,399 @@ +start_server {tags {"pubsub"}} { + proc __consume_subscribe_messages {client type channels} { + set numsub -1 + set counts {} + + for {set i [llength $channels]} {$i > 0} {incr i -1} { + set msg [$client read] + assert_equal $type [lindex $msg 0] + + # when receiving subscribe messages the channels names + # are ordered. when receiving unsubscribe messages + # they are unordered + set idx [lsearch -exact $channels [lindex $msg 1]] + if {[string match "*unsubscribe" $type]} { + assert {$idx >= 0} + } else { + assert {$idx == 0} + } + set channels [lreplace $channels $idx $idx] + + # aggregate the subscription count to return to the caller + lappend counts [lindex $msg 2] + } + + # we should have received messages for channels + assert {[llength $channels] == 0} + return $counts + } + + proc subscribe {client channels} { + $client subscribe {*}$channels + __consume_subscribe_messages $client subscribe $channels + } + + proc unsubscribe {client {channels {}}} { + $client unsubscribe {*}$channels + __consume_subscribe_messages $client unsubscribe $channels + } + + proc psubscribe {client channels} { + $client psubscribe {*}$channels + __consume_subscribe_messages $client psubscribe $channels + } + + proc punsubscribe {client {channels {}}} { + $client punsubscribe {*}$channels + __consume_subscribe_messages $client punsubscribe $channels + } + +# test "Pub/Sub PING" { +# set rd1 [redis_deferring_client] +# subscribe $rd1 somechannel +# # While subscribed to non-zero channels PING works in Pub/Sub mode. +# $rd1 ping +# set reply1 [$rd1 read] +# unsubscribe $rd1 somechannel +# # Now we are unsubscribed, PING should just return PONG. +# $rd1 ping +# set reply2 [$rd1 read] +# $rd1 close +# list $reply1 $reply2 +# } {PONG PONG} + + test "PUBLISH/SUBSCRIBE basics" { + set rd1 [redis_deferring_client] + + # subscribe to two channels + assert_equal {1 2} [subscribe $rd1 {chan1 chan2}] + assert_equal 1 [r publish chan1 hello] + assert_equal 1 [r publish chan2 world] + assert_equal {message chan1 hello} [$rd1 read] + assert_equal {message chan2 world} [$rd1 read] + + # unsubscribe from one of the channels + unsubscribe $rd1 {chan1} + assert_equal 0 [r publish chan1 hello] + assert_equal 1 [r publish chan2 world] + assert_equal {message chan2 world} [$rd1 read] + + # unsubscribe from the remaining channel + unsubscribe $rd1 {chan2} + assert_equal 0 [r publish chan1 hello] + assert_equal 0 [r publish chan2 world] + + # clean up clients + $rd1 close + } + + test "PUBLISH/SUBSCRIBE with two clients" { + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + + assert_equal {1} [subscribe $rd1 {chan1}] + assert_equal {1} [subscribe $rd2 {chan1}] + assert_equal 2 [r publish chan1 hello] + assert_equal {message chan1 hello} [$rd1 read] + assert_equal {message chan1 hello} [$rd2 read] + + # clean up clients + $rd1 close + $rd2 close + } + + test "PUBLISH/SUBSCRIBE after UNSUBSCRIBE without arguments" { + set rd1 [redis_deferring_client] + assert_equal {1 2 3} [subscribe $rd1 {chan1 chan2 chan3}] + unsubscribe $rd1 + assert_equal 0 [r publish chan1 hello] + assert_equal 0 [r publish chan2 hello] + assert_equal 0 [r publish chan3 hello] + + # clean up clients + $rd1 close + } + + test "SUBSCRIBE to one channel more than once" { + set rd1 [redis_deferring_client] + assert_equal {1 1 1} [subscribe $rd1 {chan1 chan1 chan1}] + assert_equal 1 [r publish chan1 hello] + assert_equal {message chan1 hello} [$rd1 read] + + # clean up clients + $rd1 close + } + + test "UNSUBSCRIBE from non-subscribed channels" { + set rd1 [redis_deferring_client] + assert_equal {0 0 0} [unsubscribe $rd1 {foo bar quux}] + + # clean up clients + $rd1 close + } + + test "PUBLISH/PSUBSCRIBE basics" { + set rd1 [redis_deferring_client] + + # subscribe to two patterns + assert_equal {1 2} [psubscribe $rd1 {foo.* bar.*}] + assert_equal 1 [r publish foo.1 hello] + assert_equal 1 [r publish bar.1 hello] + assert_equal 0 [r publish foo1 hello] + assert_equal 0 [r publish barfoo.1 hello] + assert_equal 0 [r publish qux.1 hello] + assert_equal {pmessage foo.* foo.1 hello} [$rd1 read] + assert_equal {pmessage bar.* bar.1 hello} [$rd1 read] + + # unsubscribe from one of the patterns + assert_equal {1} [punsubscribe $rd1 {foo.*}] + assert_equal 0 [r publish foo.1 hello] + assert_equal 1 [r publish bar.1 hello] + assert_equal {pmessage bar.* bar.1 hello} [$rd1 read] + + # unsubscribe from the remaining pattern + assert_equal {0} [punsubscribe $rd1 {bar.*}] + assert_equal 0 [r publish foo.1 hello] + assert_equal 0 [r publish bar.1 hello] + + # clean up clients + $rd1 close + } + + test "PUBLISH/PSUBSCRIBE with two clients" { + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + + assert_equal {1} [psubscribe $rd1 {chan.*}] + assert_equal {1} [psubscribe $rd2 {chan.*}] + assert_equal 2 [r publish chan.foo hello] + assert_equal {pmessage chan.* chan.foo hello} [$rd1 read] + assert_equal {pmessage chan.* chan.foo hello} [$rd2 read] + + # clean up clients + $rd1 close + $rd2 close + } + + test "PUBLISH/PSUBSCRIBE after PUNSUBSCRIBE without arguments" { + set rd1 [redis_deferring_client] + assert_equal {1 2 3} [psubscribe $rd1 {chan1.* chan2.* chan3.*}] + punsubscribe $rd1 + assert_equal 0 [r publish chan1.hi hello] + assert_equal 0 [r publish chan2.hi hello] + assert_equal 0 [r publish chan3.hi hello] + + # clean up clients + $rd1 close + } + + test "PUNSUBSCRIBE from non-subscribed channels" { + set rd1 [redis_deferring_client] + assert_equal {0 0 0} [punsubscribe $rd1 {foo.* bar.* quux.*}] + + # clean up clients + $rd1 close + } + + test "NUMSUB returns numbers, not strings (#1561)" { + r pubsub numsub abc def + } {abc 0 def 0} + + test "PubSub return value" { + set rd1 [redis_deferring_client] + assert_equal {1} [subscribe $rd1 {foo.bar}] + assert_equal {2} [psubscribe $rd1 {foo.*}] + assert_equal {foo.bar} [r pubsub channels] + assert_equal {1} [r pubsub numpat] + assert_equal {foo.bar 1} [r pubsub numsub foo.bar] + + $rd1 close + } + + test "Mix SUBSCRIBE and PSUBSCRIBE" { + set rd1 [redis_deferring_client] + assert_equal {1} [subscribe $rd1 {foo.bar}] + assert_equal {2} [psubscribe $rd1 {foo.*}] + + assert_equal 2 [r publish foo.bar hello] + assert_equal {message foo.bar hello} [$rd1 read] + assert_equal {pmessage foo.* foo.bar hello} [$rd1 read] + + # clean up clients + $rd1 close + } + + test "PUNSUBSCRIBE and UNSUBSCRIBE should always reply" { + # Make sure we are not subscribed to any channel at all. + r punsubscribe + r unsubscribe + # Now check if the commands still reply correctly. + set reply1 [r punsubscribe] + set reply2 [r unsubscribe] + concat $reply1 $reply2 + } {punsubscribe {} 0 unsubscribe {} 0} + + ### Keyspace events notification tests + +# test "Keyspace notifications: we receive keyspace notifications" { +# r config set notify-keyspace-events KA +# set rd1 [redis_deferring_client] +# assert_equal {1} [psubscribe $rd1 *] +# r set foo bar +# assert_equal {pmessage * __keyspace@9__:foo set} [$rd1 read] +# $rd1 close +# } +# +# test "Keyspace notifications: we receive keyevent notifications" { +# r config set notify-keyspace-events EA +# set rd1 [redis_deferring_client] +# assert_equal {1} [psubscribe $rd1 *] +# r set foo bar +# assert_equal {pmessage * __keyevent@9__:set foo} [$rd1 read] +# $rd1 close +# } +# +# test "Keyspace notifications: we can receive both kind of events" { +# r config set notify-keyspace-events KEA +# set rd1 [redis_deferring_client] +# assert_equal {1} [psubscribe $rd1 *] +# r set foo bar +# assert_equal {pmessage * __keyspace@9__:foo set} [$rd1 read] +# assert_equal {pmessage * __keyevent@9__:set foo} [$rd1 read] +# $rd1 close +# } +# +# test "Keyspace notifications: we are able to mask events" { +# r config set notify-keyspace-events KEl +# r del mylist +# set rd1 [redis_deferring_client] +# assert_equal {1} [psubscribe $rd1 *] +# r set foo bar +# r lpush mylist a +# # No notification for set, because only list commands are enabled. +# assert_equal {pmessage * __keyspace@9__:mylist lpush} [$rd1 read] +# assert_equal {pmessage * __keyevent@9__:lpush mylist} [$rd1 read] +# $rd1 close +# } +# +# test "Keyspace notifications: general events test" { +# r config set notify-keyspace-events KEg +# set rd1 [redis_deferring_client] +# assert_equal {1} [psubscribe $rd1 *] +# r set foo bar +# r expire foo 1 +# r del foo +# assert_equal {pmessage * __keyspace@9__:foo expire} [$rd1 read] +# assert_equal {pmessage * __keyevent@9__:expire foo} [$rd1 read] +# assert_equal {pmessage * __keyspace@9__:foo del} [$rd1 read] +# assert_equal {pmessage * __keyevent@9__:del foo} [$rd1 read] +# $rd1 close +# } +# +# test "Keyspace notifications: list events test" { +# r config set notify-keyspace-events KEl +# r del mylist +# set rd1 [redis_deferring_client] +# assert_equal {1} [psubscribe $rd1 *] +# r lpush mylist a +# r rpush mylist a +# r rpop mylist +# assert_equal {pmessage * __keyspace@9__:mylist lpush} [$rd1 read] +# assert_equal {pmessage * __keyevent@9__:lpush mylist} [$rd1 read] +# assert_equal {pmessage * __keyspace@9__:mylist rpush} [$rd1 read] +# assert_equal {pmessage * __keyevent@9__:rpush mylist} [$rd1 read] +# assert_equal {pmessage * __keyspace@9__:mylist rpop} [$rd1 read] +# assert_equal {pmessage * __keyevent@9__:rpop mylist} [$rd1 read] +# $rd1 close +# } +# +# test "Keyspace notifications: set events test" { +# r config set notify-keyspace-events Ks +# r del myset +# set rd1 [redis_deferring_client] +# assert_equal {1} [psubscribe $rd1 *] +# r sadd myset a b c d +# r srem myset x +# r sadd myset x y z +# r srem myset x +# assert_equal {pmessage * __keyspace@9__:myset sadd} [$rd1 read] +# assert_equal {pmessage * __keyspace@9__:myset sadd} [$rd1 read] +# assert_equal {pmessage * __keyspace@9__:myset srem} [$rd1 read] +# $rd1 close +# } +# +# test "Keyspace notifications: zset events test" { +# r config set notify-keyspace-events Kz +# r del myzset +# set rd1 [redis_deferring_client] +# assert_equal {1} [psubscribe $rd1 *] +# r zadd myzset 1 a 2 b +# r zrem myzset x +# r zadd myzset 3 x 4 y 5 z +# r zrem myzset x +# assert_equal {pmessage * __keyspace@9__:myzset zadd} [$rd1 read] +# assert_equal {pmessage * __keyspace@9__:myzset zadd} [$rd1 read] +# assert_equal {pmessage * __keyspace@9__:myzset zrem} [$rd1 read] +# $rd1 close +# } +# +# test "Keyspace notifications: hash events test" { +# r config set notify-keyspace-events Kh +# r del myhash +# set rd1 [redis_deferring_client] +# assert_equal {1} [psubscribe $rd1 *] +# r hmset myhash yes 1 no 0 +# r hincrby myhash yes 10 +# assert_equal {pmessage * __keyspace@9__:myhash hset} [$rd1 read] +# assert_equal {pmessage * __keyspace@9__:myhash hincrby} [$rd1 read] +# $rd1 close +# } +# +# test "Keyspace notifications: expired events (triggered expire)" { +# r config set notify-keyspace-events Ex +# r del foo +# set rd1 [redis_deferring_client] +# assert_equal {1} [psubscribe $rd1 *] +# r psetex foo 100 1 +# wait_for_condition 50 100 { +# [r exists foo] == 0 +# } else { +# fail "Key does not expire?!" +# } +# assert_equal {pmessage * __keyevent@9__:expired foo} [$rd1 read] +# $rd1 close +# } +# +# test "Keyspace notifications: expired events (background expire)" { +# r config set notify-keyspace-events Ex +# r del foo +# set rd1 [redis_deferring_client] +# assert_equal {1} [psubscribe $rd1 *] +# r psetex foo 100 1 +# assert_equal {pmessage * __keyevent@9__:expired foo} [$rd1 read] +# $rd1 close +# } +# +# test "Keyspace notifications: evicted events" { +# r config set notify-keyspace-events Ee +# r config set maxmemory-policy allkeys-lru +# r flushdb +# set rd1 [redis_deferring_client] +# assert_equal {1} [psubscribe $rd1 *] +# r set foo bar +# r config set maxmemory 1 +# assert_equal {pmessage * __keyevent@9__:evicted foo} [$rd1 read] +# r config set maxmemory 0 +# $rd1 close +# } +# +# test "Keyspace notifications: test CONFIG GET/SET of event flags" { +# r config set notify-keyspace-events gKE +# assert_equal {gKE} [lindex [r config get notify-keyspace-events] 1] +# r config set notify-keyspace-events {$lshzxeKE} +# assert_equal {$lshzxeKE} [lindex [r config get notify-keyspace-events] 1] +# r config set notify-keyspace-events KA +# assert_equal {AK} [lindex [r config get notify-keyspace-events] 1] +# r config set notify-keyspace-events EA +# assert_equal {AE} [lindex [r config get notify-keyspace-events] 1] +# } +#} diff --git a/tests/unit/quit.tcl b/tests/unit/quit.tcl new file mode 100644 index 000000000..090fe54ce --- /dev/null +++ b/tests/unit/quit.tcl @@ -0,0 +1,40 @@ +start_server {tags {"quit"}} { + proc format_command {args} { + set cmd "*[llength $args]\r\n" + foreach a $args { + append cmd "$[string length $a]\r\n$a\r\n" + } + set _ $cmd + } + + test "QUIT returns OK" { + reconnect + assert_equal OK [r quit] + assert_error * {r ping} + } + +# test "Pipelined commands after QUIT must not be executed" { +# reconnect +# r write [format_command quit] +# r write [format_command set foo bar] +# r flush +# assert_equal OK [r read] +# assert_error * {r read} + +# reconnect +# assert_equal {} [r get foo] +# } + +# test "Pipelined commands after QUIT that exceed read buffer size" { +# reconnect +# r write [format_command quit] +# r write [format_command set foo [string repeat "x" 1024]] +# r flush +# assert_equal OK [r read] +# assert_error * {r read} +# +# reconnect +# assert_equal {} [r get foo] +# +# } +} diff --git a/tests/unit/scan.tcl b/tests/unit/scan.tcl new file mode 100644 index 000000000..ed2b41bdb --- /dev/null +++ b/tests/unit/scan.tcl @@ -0,0 +1,286 @@ +start_server {tags {"scan"}} { + test "SCAN basic" { + r flushdb + populate 1000 + #populate 1000 + + set cur 0 + set keys {} + while 1 { + set res [r scan $cur] + set cur [lindex $res 0] + set k [lindex $res 1] + lappend keys {*}$k + if {$cur == 0} break + } + + set keys [lsort -unique $keys] + assert_equal 1000 [llength $keys] + } + + test "SCAN COUNT" { + r flushdb + populate 1000 + + set cur 0 + set keys {} + while 1 { + set res [r scan $cur count 5] + set cur [lindex $res 0] + set k [lindex $res 1] + lappend keys {*}$k + if {$cur == 0} break + } + + set keys [lsort -unique $keys] + assert_equal 1000 [llength $keys] + } + + test "SCAN MATCH" { + r flushdb + populate 1000 + + set cur 0 + set keys {} + while 1 { + set res [r scan $cur match "key:1??"] + set cur [lindex $res 0] + set k [lindex $res 1] + lappend keys {*}$k + if {$cur == 0} break + } + + set keys [lsort -unique $keys] + assert_equal 100 [llength $keys] + } + + test "SCAN TYPE" { + r flushdb + # populate only creates strings + populate 1000 + + # Check non-strings are excluded + set cur 0 + set keys {} + while 1 { + set res [r scan $cur type "list"] + set cur [lindex $res 0] + set k [lindex $res 1] + lappend keys {*}$k + if {$cur == 0} break + } + + assert_equal 0 [llength $keys] + + # Check strings are included + set cur 0 + set keys {} + while 1 { + set res [r scan $cur type "string"] + set cur [lindex $res 0] + set k [lindex $res 1] + lappend keys {*}$k + if {$cur == 0} break + } + + assert_equal 1000 [llength $keys] + + # Check all three args work together + set cur 0 + set keys {} + while 1 { + set res [r scan $cur type "string" match "key:*" count 10] + set cur [lindex $res 0] + set k [lindex $res 1] + lappend keys {*}$k + if {$cur == 0} break + } + + assert_equal 1000 [llength $keys] + } + + foreach enc {intset hashtable} { + test "SSCAN with encoding $enc" { + # Create the Set + r del set + if {$enc eq {intset}} { + set prefix "" + } else { + set prefix "ele:" + } + set elements {} + for {set j 0} {$j < 100} {incr j} { + lappend elements ${prefix}${j} + } + r sadd set {*}$elements + + # Verify that the encoding matches. + # assert {[r object encoding set] eq $enc} + + # Test SSCAN + set cur 0 + set keys {} + while 1 { + set res [r sscan set $cur] + set cur [lindex $res 0] + set k [lindex $res 1] + lappend keys {*}$k + if {$cur == 0} break + } + + set keys [lsort -unique $keys] + assert_equal 100 [llength $keys] + } + } + + foreach enc {ziplist hashtable} { + test "HSCAN with encoding $enc" { + # Create the Hash + r del hash + if {$enc eq {ziplist}} { + set count 30 + } else { + set count 1000 + } + set elements {} + for {set j 0} {$j < $count} {incr j} { + lappend elements key:$j $j + } + r hmset hash {*}$elements + + # Verify that the encoding matches. + # assert {[r object encoding hash] eq $enc} + + # Test HSCAN + set cur 0 + set keys {} + while 1 { + set res [r hscan hash $cur] + set cur [lindex $res 0] + set k [lindex $res 1] + lappend keys {*}$k + if {$cur == 0} break + } + + set keys2 {} + foreach {k v} $keys { + assert {$k eq "key:$v"} + lappend keys2 $k + } + + set keys2 [lsort -unique $keys2] + assert_equal $count [llength $keys2] + } + } + + foreach enc {ziplist skiplist} { + test "ZSCAN with encoding $enc" { + # Create the Sorted Set + r del zset + if {$enc eq {ziplist}} { + set count 30 + } else { + set count 1000 + } + set elements {} + for {set j 0} {$j < $count} {incr j} { + lappend elements $j key:$j + } + r zadd zset {*}$elements + + # Verify that the encoding matches. + # + # assert {[r object encoding zset] eq $enc} + + # Test ZSCAN + set cur 0 + set keys {} + while 1 { + set res [r zscan zset $cur] + set cur [lindex $res 0] + set k [lindex $res 1] + lappend keys {*}$k + if {$cur == 0} break + } + + set keys2 {} + foreach {k v} $keys { + assert {$k eq "key:$v"} + lappend keys2 $k + } + + set keys2 [lsort -unique $keys2] + assert_equal $count [llength $keys2] + } + } + + test "SCAN guarantees check under write load" { + r flushdb + populate 100 + + # We start scanning here, so keys from 0 to 99 should all be + # reported at the end of the iteration. + set keys {} + while 1 { + set res [r scan $cur] + set cur [lindex $res 0] + set k [lindex $res 1] + lappend keys {*}$k + if {$cur == 0} break + # Write 10 random keys at every SCAN iteration. + for {set j 0} {$j < 10} {incr j} { + r set addedkey:[randomInt 1000] foo + } + } + + set keys2 {} + foreach k $keys { + if {[string length $k] > 6} continue + lappend keys2 $k + } + + set keys2 [lsort -unique $keys2] + assert_equal 100 [llength $keys2] + } + +# test "SSCAN with integer encoded object (issue #1345)" { +# set objects {1 a} +# r del set +# r sadd set {*}$objects +# set res [r sscan set 0 MATCH *a* COUNT 100] +# assert_equal [lsort -unique [lindex $res 1]] {a} +# set res [r sscan set 0 MATCH *1* COUNT 100] +# assert_equal [lsort -unique [lindex $res 1]] {1} +# } + + test "SSCAN with PATTERN" { + r del mykey + r sadd mykey foo fab fiz foobar 1 2 3 4 + set res [r sscan mykey 0 MATCH foo* COUNT 10000] + lsort -unique [lindex $res 1] + } {foo foobar} + + test "HSCAN with PATTERN" { + r del mykey + r hmset mykey foo 1 fab 2 fiz 3 foobar 10 1 a 2 b 3 c 4 d + set res [r hscan mykey 0 MATCH foo* COUNT 10000] + lsort -unique [lindex $res 1] + } {1 10 foo foobar} + + test "ZSCAN with PATTERN" { + r del mykey + r zadd mykey 1 foo 2 fab 3 fiz 10 foobar + set res [r zscan mykey 0 MATCH foo* COUNT 10000] + lsort -unique [lindex $res 1] + } + + test "ZSCAN scores: regression test for issue #2175" { + r del mykey + for {set j 0} {$j < 500} {incr j} { + r zadd mykey 9.8813129168249309e-323 $j + } + set res [lindex [r zscan mykey 0] 1] + set first_score [lindex $res 1] + assert {$first_score != 0} + } +} diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl new file mode 100644 index 000000000..e1cd2174b --- /dev/null +++ b/tests/unit/scripting.tcl @@ -0,0 +1,606 @@ +start_server {tags {"scripting"}} { + test {EVAL - Does Lua interpreter replies to our requests?} { + r eval {return 'hello'} 0 + } {hello} + + test {EVAL - Lua integer -> Redis protocol type conversion} { + r eval {return 100.5} 0 + } {100} + + test {EVAL - Lua string -> Redis protocol type conversion} { + r eval {return 'hello world'} 0 + } {hello world} + + test {EVAL - Lua true boolean -> Redis protocol type conversion} { + r eval {return true} 0 + } {1} + + test {EVAL - Lua false boolean -> Redis protocol type conversion} { + r eval {return false} 0 + } {} + + test {EVAL - Lua status code reply -> Redis protocol type conversion} { + r eval {return {ok='fine'}} 0 + } {fine} + + test {EVAL - Lua error reply -> Redis protocol type conversion} { + catch { + r eval {return {err='this is an error'}} 0 + } e + set _ $e + } {this is an error} + + test {EVAL - Lua table -> Redis protocol type conversion} { + r eval {return {1,2,3,'ciao',{1,2}}} 0 + } {1 2 3 ciao {1 2}} + + test {EVAL - Are the KEYS and ARGV arrays populated correctly?} { + r eval {return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}} 2 a b c d + } {a b c d} + + test {EVAL - is Lua able to call Redis API?} { + r set mykey myval + r eval {return redis.call('get',KEYS[1])} 1 mykey + } {myval} + + test {EVALSHA - Can we call a SHA1 if already defined?} { + r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey + } {myval} + + test {EVALSHA - Can we call a SHA1 in uppercase?} { + r evalsha FD758D1589D044DD850A6F05D52F2EEFD27F033F 1 mykey + } {myval} + + test {EVALSHA - Do we get an error on invalid SHA1?} { + catch {r evalsha NotValidShaSUM 0} e + set _ $e + } {NOSCRIPT*} + + test {EVALSHA - Do we get an error on non defined SHA1?} { + catch {r evalsha ffd632c7d33e571e9f24556ebed26c3479a87130 0} e + set _ $e + } {NOSCRIPT*} + + test {EVAL - Redis integer -> Lua type conversion} { + r eval { + local foo = redis.pcall('incr','x') + return {type(foo),foo} + } 0 + } {number 1} + + test {EVAL - Redis bulk -> Lua type conversion} { + r set mykey myval + r eval { + local foo = redis.pcall('get','mykey') + return {type(foo),foo} + } 0 + } {string myval} + + test {EVAL - Redis multi bulk -> Lua type conversion} { + r del mylist + r rpush mylist a + r rpush mylist b + r rpush mylist c + r eval { + local foo = redis.pcall('lrange','mylist',0,-1) + return {type(foo),foo[1],foo[2],foo[3],# foo} + } 0 + } {table a b c 3} + + test {EVAL - Redis status reply -> Lua type conversion} { + r eval { + local foo = redis.pcall('set','mykey','myval') + return {type(foo),foo['ok']} + } 0 + } {table OK} + + test {EVAL - Redis error reply -> Lua type conversion} { + r set mykey myval + r eval { + local foo = redis.pcall('incr','mykey') + return {type(foo),foo['err']} + } 0 + } {table {ERR value is not an integer or out of range}} + + test {EVAL - Redis nil bulk reply -> Lua type conversion} { + r del mykey + r eval { + local foo = redis.pcall('get','mykey') + return {type(foo),foo == false} + } 0 + } {boolean 1} + + test {EVAL - Is the Lua client using the currently selected DB?} { + r set mykey "this is DB 9" + r select 10 + r set mykey "this is DB 10" + r eval {return redis.pcall('get','mykey')} 0 + } {this is DB 10} + + test {EVAL - SELECT inside Lua should not affect the caller} { + # here we DB 10 is selected + r set mykey "original value" + r eval {return redis.pcall('select','9')} 0 + set res [r get mykey] + r select 9 + set res + } {original value} + + if 0 { + test {EVAL - Script can't run more than configured time limit} { + r config set lua-time-limit 1 + catch { + r eval { + local i = 0 + while true do i=i+1 end + } 0 + } e + set _ $e + } {*execution time*} + } + + test {EVAL - Scripts can't run certain commands} { + set e {} + catch {r eval {return redis.pcall('spop','x')} 0} e + set e + } {*not allowed*} + + test {EVAL - Scripts can't run certain commands} { + set e {} + catch { + r eval "redis.pcall('randomkey'); return redis.pcall('set','x','ciao')" 0 + } e + set e + } {*not allowed after*} + + test {EVAL - No arguments to redis.call/pcall is considered an error} { + set e {} + catch {r eval {return redis.call()} 0} e + set e + } {*one argument*} + + test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} { + set e {} + catch { + r eval "redis.call('nosuchcommand')" 0 + } e + set e + } {*Unknown Redis*} + + test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} { + set e {} + catch { + r eval "redis.call('get','a','b','c')" 0 + } e + set e + } {*number of args*} + + test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} { + set e {} + r set foo bar + catch { + r eval {redis.call('lpush',KEYS[1],'val')} 1 foo + } e + set e + } {*against a key*} + + test {EVAL - JSON numeric decoding} { + # We must return the table as a string because otherwise + # Redis converts floats to ints and we get 0 and 1023 instead + # of 0.0003 and 1023.2 as the parsed output. + r eval {return + table.concat( + cjson.decode( + "[0.0, -5e3, -1, 0.3e-3, 1023.2, 0e10]"), " ") + } 0 + } {0 -5000 -1 0.0003 1023.2 0} + + test {EVAL - JSON string decoding} { + r eval {local decoded = cjson.decode('{"keya": "a", "keyb": "b"}') + return {decoded.keya, decoded.keyb} + } 0 + } {a b} + + test {EVAL - cmsgpack can pack double?} { + r eval {local encoded = cmsgpack.pack(0.1) + local h = "" + for i = 1, #encoded do + h = h .. string.format("%02x",string.byte(encoded,i)) + end + return h + } 0 + } {cb3fb999999999999a} + + test {EVAL - cmsgpack can pack negative int64?} { + r eval {local encoded = cmsgpack.pack(-1099511627776) + local h = "" + for i = 1, #encoded do + h = h .. string.format("%02x",string.byte(encoded,i)) + end + return h + } 0 + } {d3ffffff0000000000} + + test {EVAL - cmsgpack can pack and unpack circular references?} { + r eval {local a = {x=nil,y=5} + local b = {x=a} + a['x'] = b + local encoded = cmsgpack.pack(a) + local h = "" + -- cmsgpack encodes to a depth of 16, but can't encode + -- references, so the encoded object has a deep copy recusive + -- depth of 16. + for i = 1, #encoded do + h = h .. string.format("%02x",string.byte(encoded,i)) + end + -- when unpacked, re.x.x != re because the unpack creates + -- individual tables down to a depth of 16. + -- (that's why the encoded output is so large) + local re = cmsgpack.unpack(encoded) + assert(re) + assert(re.x) + assert(re.x.x.y == re.y) + assert(re.x.x.x.x.y == re.y) + assert(re.x.x.x.x.x.x.y == re.y) + assert(re.x.x.x.x.x.x.x.x.x.x.y == re.y) + -- maximum working depth: + assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.y == re.y) + -- now the last x would be b above and has no y + assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x) + -- so, the final x.x is at the depth limit and was assigned nil + assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x == nil) + return {h, re.x.x.x.x.x.x.x.x.y == re.y, re.y == 5} + } 0 + } {82a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a178c0 1 1} + + test {EVAL - Numerical sanity check from bitop} { + r eval {assert(0x7fffffff == 2147483647, "broken hex literals"); + assert(0xffffffff == -1 or 0xffffffff == 2^32-1, + "broken hex literals"); + assert(tostring(-1) == "-1", "broken tostring()"); + assert(tostring(0xffffffff) == "-1" or + tostring(0xffffffff) == "4294967295", + "broken tostring()") + } 0 + } {} + + test {EVAL - Verify minimal bitop functionality} { + r eval {assert(bit.tobit(1) == 1); + assert(bit.band(1) == 1); + assert(bit.bxor(1,2) == 3); + assert(bit.bor(1,2,4,8,16,32,64,128) == 255) + } 0 + } {} + + test {SCRIPTING FLUSH - is able to clear the scripts cache?} { + r set mykey myval + set v [r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey] + assert_equal $v myval + set e "" + r script flush + catch {r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey} e + set e + } {NOSCRIPT*} + + test {SCRIPT EXISTS - can detect already defined scripts?} { + r eval "return 1+1" 0 + r script exists a27e7e8a43702b7046d4f6a7ccf5b60cef6b9bd9 a27e7e8a43702b7046d4f6a7ccf5b60cef6b9bda + } {1 0} + + test {SCRIPT LOAD - is able to register scripts in the scripting cache} { + list \ + [r script load "return 'loaded'"] \ + [r evalsha b534286061d4b9e4026607613b95c06c06015ae8 0] + } {b534286061d4b9e4026607613b95c06c06015ae8 loaded} + + test "In the context of Lua the output of random commands gets ordered" { + r del myset + r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz + r eval {return redis.call('smembers',KEYS[1])} 1 myset + } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} + + test "SORT is normally not alpha re-ordered for the scripting engine" { + r del myset + r sadd myset 1 2 3 4 10 + r eval {return redis.call('sort',KEYS[1],'desc')} 1 myset + } {10 4 3 2 1} + + test "SORT BY output gets ordered for scripting" { + r del myset + r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz + r eval {return redis.call('sort',KEYS[1],'by','_')} 1 myset + } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} + + test "SORT BY with GET gets ordered for scripting" { + r del myset + r sadd myset a b c + r eval {return redis.call('sort',KEYS[1],'by','_','get','#','get','_:*')} 1 myset + } {a {} b {} c {}} + + test "redis.sha1hex() implementation" { + list [r eval {return redis.sha1hex('')} 0] \ + [r eval {return redis.sha1hex('Pizza & Mandolino')} 0] + } {da39a3ee5e6b4b0d3255bfef95601890afd80709 74822d82031af7493c20eefa13bd07ec4fada82f} + + test {Globals protection reading an undeclared global variable} { + catch {r eval {return a} 0} e + set e + } {*ERR*attempted to access unexisting global*} + + test {Globals protection setting an undeclared global*} { + catch {r eval {a=10} 0} e + set e + } {*ERR*attempted to create global*} + + test {Test an example script DECR_IF_GT} { + set decr_if_gt { + local current + + current = redis.call('get',KEYS[1]) + if not current then return nil end + if current > ARGV[1] then + return redis.call('decr',KEYS[1]) + else + return redis.call('get',KEYS[1]) + end + } + r set foo 5 + set res {} + lappend res [r eval $decr_if_gt 1 foo 2] + lappend res [r eval $decr_if_gt 1 foo 2] + lappend res [r eval $decr_if_gt 1 foo 2] + lappend res [r eval $decr_if_gt 1 foo 2] + lappend res [r eval $decr_if_gt 1 foo 2] + set res + } {4 3 2 2 2} + + test {Scripting engine resets PRNG at every script execution} { + set rand1 [r eval {return tostring(math.random())} 0] + set rand2 [r eval {return tostring(math.random())} 0] + assert_equal $rand1 $rand2 + } + + test {Scripting engine PRNG can be seeded correctly} { + set rand1 [r eval { + math.randomseed(ARGV[1]); return tostring(math.random()) + } 0 10] + set rand2 [r eval { + math.randomseed(ARGV[1]); return tostring(math.random()) + } 0 10] + set rand3 [r eval { + math.randomseed(ARGV[1]); return tostring(math.random()) + } 0 20] + assert_equal $rand1 $rand2 + assert {$rand2 ne $rand3} + } + + test {EVAL does not leak in the Lua stack} { + r set x 0 + # Use a non blocking client to speedup the loop. + set rd [redis_deferring_client] + for {set j 0} {$j < 10000} {incr j} { + $rd eval {return redis.call("incr",KEYS[1])} 1 x + } + for {set j 0} {$j < 10000} {incr j} { + $rd read + } + assert {[s used_memory_lua] < 1024*100} + $rd close + r get x + } {10000} + + test {EVAL processes writes from AOF in read-only slaves} { + r flushall + r config set appendonly yes + r eval {redis.call("set",KEYS[1],"100")} 1 foo + r eval {redis.call("incr",KEYS[1])} 1 foo + r eval {redis.call("incr",KEYS[1])} 1 foo + wait_for_condition 50 100 { + [s aof_rewrite_in_progress] == 0 + } else { + fail "AOF rewrite can't complete after CONFIG SET appendonly yes." + } + r config set slave-read-only yes + r slaveof 127.0.0.1 0 + r debug loadaof + set res [r get foo] + r slaveof no one + set res + } {102} + + test {We can call scripts rewriting client->argv from Lua} { + r del myset + r sadd myset a b c + r mset a 1 b 2 c 3 d 4 + assert {[r spop myset] ne {}} + assert {[r spop myset] ne {}} + assert {[r spop myset] ne {}} + assert {[r mget a b c d] eq {1 2 3 4}} + assert {[r spop myset] eq {}} + } + + test {Call Redis command with many args from Lua (issue #1764)} { + r eval { + local i + local x={} + redis.call('del','mylist') + for i=1,100 do + table.insert(x,i) + end + redis.call('rpush','mylist',unpack(x)) + return redis.call('lrange','mylist',0,-1) + } 0 + } {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100} + + test {Number conversion precision test (issue #1118)} { + r eval { + local value = 9007199254740991 + redis.call("set","foo",value) + return redis.call("get","foo") + } 0 + } {9007199254740991} + + test {String containing number precision test (regression of issue #1118)} { + r eval { + redis.call("set", "key", "12039611435714932082") + return redis.call("get", "key") + } 0 + } {12039611435714932082} + + test {Verify negative arg count is error instead of crash (issue #1842)} { + catch { r eval { return "hello" } -12 } e + set e + } {ERR Number of keys can't be negative} + + test {Correct handling of reused argv (issue #1939)} { + r eval { + for i = 0, 10 do + redis.call('SET', 'a', '1') + redis.call('MGET', 'a', 'b', 'c') + redis.call('EXPIRE', 'a', 0) + redis.call('GET', 'a') + redis.call('MGET', 'a', 'b', 'c') + end + } 0 + } +} + +# Start a new server since the last test in this stanza will kill the +# instance at all. +start_server {tags {"scripting"}} { + test {Timedout read-only scripts can be killed by SCRIPT KILL} { + set rd [redis_deferring_client] + r config set lua-time-limit 10 + $rd eval {while true do end} 0 + after 200 + catch {r ping} e + assert_match {BUSY*} $e + r script kill + after 200 ; # Give some time to Lua to call the hook again... + assert_equal [r ping] "PONG" + } + + test {Timedout script link is still usable after Lua returns} { + r config set lua-time-limit 10 + r eval {for i=1,100000 do redis.call('ping') end return 'ok'} 0 + r ping + } {PONG} + + test {Timedout scripts that modified data can't be killed by SCRIPT KILL} { + set rd [redis_deferring_client] + r config set lua-time-limit 10 + $rd eval {redis.call('set',KEYS[1],'y'); while true do end} 1 x + after 200 + catch {r ping} e + assert_match {BUSY*} $e + catch {r script kill} e + assert_match {UNKILLABLE*} $e + catch {r ping} e + assert_match {BUSY*} $e + } + + # Note: keep this test at the end of this server stanza because it + # kills the server. + test {SHUTDOWN NOSAVE can kill a timedout script anyway} { + # The server sould be still unresponding to normal commands. + catch {r ping} e + assert_match {BUSY*} $e + catch {r shutdown nosave} + # Make sure the server was killed + catch {set rd [redis_deferring_client]} e + assert_match {*connection refused*} $e + } +} + +start_server {tags {"scripting repl"}} { + start_server {} { + test {Before the slave connects we issue two EVAL commands} { + # One with an error, but still executing a command. + # SHA is: 67164fc43fa971f76fd1aaeeaf60c1c178d25876 + catch { + r eval {redis.call('incr',KEYS[1]); redis.call('nonexisting')} 1 x + } + # One command is correct: + # SHA is: 6f5ade10a69975e903c6d07b10ea44c6382381a5 + r eval {return redis.call('incr',KEYS[1])} 1 x + } {2} + + test {Connect a slave to the main instance} { + r -1 slaveof [srv 0 host] [srv 0 port] + wait_for_condition 50 100 { + [s -1 role] eq {slave} && + [string match {*master_link_status:up*} [r -1 info replication]] + } else { + fail "Can't turn the instance into a slave" + } + } + + test {Now use EVALSHA against the master, with both SHAs} { + # The server should replicate successful and unsuccessful + # commands as EVAL instead of EVALSHA. + catch { + r evalsha 67164fc43fa971f76fd1aaeeaf60c1c178d25876 1 x + } + r evalsha 6f5ade10a69975e903c6d07b10ea44c6382381a5 1 x + } {4} + + test {If EVALSHA was replicated as EVAL, 'x' should be '4'} { + wait_for_condition 50 100 { + [r -1 get x] eq {4} + } else { + fail "Expected 4 in x, but value is '[r -1 get x]'" + } + } + + test {Replication of script multiple pushes to list with BLPOP} { + set rd [redis_deferring_client] + $rd brpop a 0 + r eval { + redis.call("lpush",KEYS[1],"1"); + redis.call("lpush",KEYS[1],"2"); + } 1 a + set res [$rd read] + $rd close + wait_for_condition 50 100 { + [r -1 lrange a 0 -1] eq [r lrange a 0 -1] + } else { + fail "Expected list 'a' in slave and master to be the same, but they are respectively '[r -1 lrange a 0 -1]' and '[r lrange a 0 -1]'" + } + set res + } {a 1} + + test {EVALSHA replication when first call is readonly} { + r del x + r eval {if tonumber(ARGV[1]) > 0 then redis.call('incr', KEYS[1]) end} 1 x 0 + r evalsha 6e0e2745aa546d0b50b801a20983b70710aef3ce 1 x 0 + r evalsha 6e0e2745aa546d0b50b801a20983b70710aef3ce 1 x 1 + wait_for_condition 50 100 { + [r -1 get x] eq {1} + } else { + fail "Expected 1 in x, but value is '[r -1 get x]'" + } + } + + test {Lua scripts using SELECT are replicated correctly} { + r eval { + redis.call("set","foo1","bar1") + redis.call("select","10") + redis.call("incr","x") + redis.call("select","11") + redis.call("incr","z") + } 0 + r eval { + redis.call("set","foo1","bar1") + redis.call("select","10") + redis.call("incr","x") + redis.call("select","11") + redis.call("incr","z") + } 0 + wait_for_condition 50 100 { + [r -1 debug digest] eq [r debug digest] + } else { + fail "Master-Slave desync after Lua script using SELECT." + } + } + } +} diff --git a/tests/unit/slowlog.tcl b/tests/unit/slowlog.tcl new file mode 100644 index 000000000..9fa20a5c8 --- /dev/null +++ b/tests/unit/slowlog.tcl @@ -0,0 +1,70 @@ +start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} { + test {SLOWLOG - check that it starts with an empty log} { + r slowlog len + } {0} + +# test {SLOWLOG - only logs commands taking more time than specified} { +# r config set slowlog-log-slower-than 100000 +# r ping +# assert_equal [r slowlog len] 0 +# r debug sleep 0.2 +# assert_equal [r slowlog len] 1 +# } + + test {SLOWLOG - max entries is correctly handled} { + r config set slowlog-log-slower-than 0 + r config set slowlog-max-len 10 + for {set i 0} {$i < 100} {incr i} { + r ping + } + r slowlog len + } {10} + + test {SLOWLOG - GET optional argument to limit output len works} { + llength [r slowlog get 5] + } {5} + + test {SLOWLOG - RESET subcommand works} { + r config set slowlog-log-slower-than 100000 + r slowlog reset + r slowlog len + } {0} + +# test {SLOWLOG - logged entry sanity check} { +# r debug sleep 0.2 +# set e [lindex [r slowlog get] 0] +# assert_equal [llength $e] 4 +# assert_equal [lindex $e 0] 105 +# assert_equal [expr {[lindex $e 2] > 100000}] 1 +# assert_equal [lindex $e 3] {debug sleep 0.2} +# } + + test {SLOWLOG - commands with too many arguments are trimmed} { + r config set slowlog-log-slower-than 0 + r slowlog reset + r sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 + set e [lindex [r slowlog get] 0] + lindex $e 3 + } {sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 {... (2 more arguments)}} + + test {SLOWLOG - too long arguments are trimmed} { + r config set slowlog-log-slower-than 0 + r slowlog reset + set arg [string repeat A 129] + r sadd set foo $arg + set e [lindex [r slowlog get] 0] + lindex $e 3 + } {sadd set foo {AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA... (1 more bytes)}} + +# test {SLOWLOG - EXEC is not logged, just executed commands} { +# r config set slowlog-log-slower-than 100000 +# r slowlog reset +# assert_equal [r slowlog len] 0 +# r multi +# r debug sleep 0.2 +# r exec +# assert_equal [r slowlog len] 1 +# set e [lindex [r slowlog get] 0] +# assert_equal [lindex $e 3] {debug sleep 0.2} +# } +} diff --git a/tests/unit/sort.tcl b/tests/unit/sort.tcl new file mode 100644 index 000000000..a25ffeb5c --- /dev/null +++ b/tests/unit/sort.tcl @@ -0,0 +1,311 @@ +start_server { + tags {"sort"} + overrides { + "list-max-ziplist-value" 16 + "list-max-ziplist-entries" 32 + "set-max-intset-entries" 32 + } +} { + proc create_random_dataset {num cmd} { + set tosort {} + set result {} + array set seenrand {} + r del tosort + for {set i 0} {$i < $num} {incr i} { + # Make sure all the weights are different because + # Redis does not use a stable sort but Tcl does. + while 1 { + randpath { + set rint [expr int(rand()*1000000)] + } { + set rint [expr rand()] + } + if {![info exists seenrand($rint)]} break + } + set seenrand($rint) x + r $cmd tosort $i + r set weight_$i $rint + r hset wobj_$i weight $rint + lappend tosort [list $i $rint] + } + set sorted [lsort -index 1 -real $tosort] + for {set i 0} {$i < $num} {incr i} { + lappend result [lindex $sorted $i 0] + } + set _ $result + } + + foreach {num cmd enc title} { + 16 lpush ziplist "Ziplist" + 1000 lpush linkedlist "Linked list" + 10000 lpush linkedlist "Big Linked list" + 16 sadd intset "Intset" + 1000 sadd hashtable "Hash table" + 10000 sadd hashtable "Big Hash table" + } { + set result [create_random_dataset $num $cmd] + assert_encoding $enc tosort + + test "$title: SORT BY key" { + assert_equal $result [r sort tosort BY weight_*] + } + + test "$title: SORT BY key with limit" { + assert_equal [lrange $result 5 9] [r sort tosort BY weight_* LIMIT 5 5] + } + + test "$title: SORT BY hash field" { + assert_equal $result [r sort tosort BY wobj_*->weight] + } + } + + set result [create_random_dataset 16 lpush] + test "SORT GET #" { + assert_equal [lsort -integer $result] [r sort tosort GET #] + } + + test "SORT GET " { + r del foo + set res [r sort tosort GET foo] + assert_equal 16 [llength $res] + foreach item $res { assert_equal {} $item } + } + + test "SORT GET (key and hash) with sanity check" { + set l1 [r sort tosort GET # GET weight_*] + set l2 [r sort tosort GET # GET wobj_*->weight] + foreach {id1 w1} $l1 {id2 w2} $l2 { + assert_equal $id1 $id2 + assert_equal $w1 [r get weight_$id1] + assert_equal $w2 [r get weight_$id1] + } + } + + test "SORT BY key STORE" { + r sort tosort BY weight_* store sort-res + assert_equal $result [r lrange sort-res 0 -1] + assert_equal 16 [r llen sort-res] + assert_encoding ziplist sort-res + } + + test "SORT BY hash field STORE" { + r sort tosort BY wobj_*->weight store sort-res + assert_equal $result [r lrange sort-res 0 -1] + assert_equal 16 [r llen sort-res] + assert_encoding ziplist sort-res + } + + test "SORT DESC" { + assert_equal [lsort -decreasing -integer $result] [r sort tosort DESC] + } + + test "SORT ALPHA against integer encoded strings" { + r del mylist + r lpush mylist 2 + r lpush mylist 1 + r lpush mylist 3 + r lpush mylist 10 + r sort mylist alpha + } {1 10 2 3} + + test "SORT sorted set" { + r del zset + r zadd zset 1 a + r zadd zset 5 b + r zadd zset 2 c + r zadd zset 10 d + r zadd zset 3 e + r sort zset alpha desc + } {e d c b a} + + test "SORT sorted set BY nosort should retain ordering" { + r del zset + r zadd zset 1 a + r zadd zset 5 b + r zadd zset 2 c + r zadd zset 10 d + r zadd zset 3 e + r multi + r sort zset by nosort asc + r sort zset by nosort desc + r exec + } {{a c e b d} {d b e c a}} + + test "SORT sorted set BY nosort + LIMIT" { + r del zset + r zadd zset 1 a + r zadd zset 5 b + r zadd zset 2 c + r zadd zset 10 d + r zadd zset 3 e + assert_equal [r sort zset by nosort asc limit 0 1] {a} + assert_equal [r sort zset by nosort desc limit 0 1] {d} + assert_equal [r sort zset by nosort asc limit 0 2] {a c} + assert_equal [r sort zset by nosort desc limit 0 2] {d b} + assert_equal [r sort zset by nosort limit 5 10] {} + assert_equal [r sort zset by nosort limit -10 100] {a c e b d} + } + + test "SORT sorted set BY nosort works as expected from scripts" { + r del zset + r zadd zset 1 a + r zadd zset 5 b + r zadd zset 2 c + r zadd zset 10 d + r zadd zset 3 e + r eval { + return {redis.call('sort',KEYS[1],'by','nosort','asc'), + redis.call('sort',KEYS[1],'by','nosort','desc')} + } 1 zset + } {{a c e b d} {d b e c a}} + + test "SORT sorted set: +inf and -inf handling" { + r del zset + r zadd zset -100 a + r zadd zset 200 b + r zadd zset -300 c + r zadd zset 1000000 d + r zadd zset +inf max + r zadd zset -inf min + r zrange zset 0 -1 + } {min c a b d max} + + test "SORT regression for issue #19, sorting floats" { + r flushdb + set floats {1.1 5.10 3.10 7.44 2.1 5.75 6.12 0.25 1.15} + foreach x $floats { + r lpush mylist $x + } + assert_equal [lsort -real $floats] [r sort mylist] + } + + test "SORT with STORE returns zero if result is empty (github issue 224)" { + r flushdb + r sort foo store bar + } {0} + + test "SORT with STORE does not create empty lists (github issue 224)" { + r flushdb + r lpush foo bar + r sort foo alpha limit 10 10 store zap + r exists zap + } {0} + + test "SORT with STORE removes key if result is empty (github issue 227)" { + r flushdb + r lpush foo bar + r sort emptylist store foo + r exists foo + } {0} + + test "SORT with BY and STORE should still order output" { + r del myset mylist + r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz + r sort myset alpha by _ store mylist + r lrange mylist 0 -1 + } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} + + test "SORT will complain with numerical sorting and bad doubles (1)" { + r del myset + r sadd myset 1 2 3 4 not-a-double + set e {} + catch {r sort myset} e + set e + } {*ERR*double*} + + test "SORT will complain with numerical sorting and bad doubles (2)" { + r del myset + r sadd myset 1 2 3 4 + r mset score:1 10 score:2 20 score:3 30 score:4 not-a-double + set e {} + catch {r sort myset by score:*} e + set e + } {*ERR*double*} + + test "SORT BY sub-sorts lexicographically if score is the same" { + r del myset + r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz + foreach ele {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} { + set score:$ele 100 + } + r sort myset by score:* + } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} + + test "SORT GET with pattern ending with just -> does not get hash field" { + r del mylist + r lpush mylist a + r set x:a-> 100 + r sort mylist by num get x:*-> + } {100} + + test "SORT by nosort retains native order for lists" { + r del testa + r lpush testa 2 1 4 3 5 + r sort testa by nosort + } {5 3 4 1 2} + + test "SORT by nosort plus store retains native order for lists" { + r del testa + r lpush testa 2 1 4 3 5 + r sort testa by nosort store testb + r lrange testb 0 -1 + } {5 3 4 1 2} + + test "SORT by nosort with limit returns based on original list order" { + r sort testa by nosort limit 0 3 store testb + r lrange testb 0 -1 + } {5 3 4} + + tags {"slow"} { + set num 100 + set res [create_random_dataset $num lpush] + + test "SORT speed, $num element list BY key, 100 times" { + set start [clock clicks -milliseconds] + for {set i 0} {$i < 100} {incr i} { + set sorted [r sort tosort BY weight_* LIMIT 0 10] + } + set elapsed [expr [clock clicks -milliseconds]-$start] + if {$::verbose} { + puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " + flush stdout + } + } + + test "SORT speed, $num element list BY hash field, 100 times" { + set start [clock clicks -milliseconds] + for {set i 0} {$i < 100} {incr i} { + set sorted [r sort tosort BY wobj_*->weight LIMIT 0 10] + } + set elapsed [expr [clock clicks -milliseconds]-$start] + if {$::verbose} { + puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " + flush stdout + } + } + + test "SORT speed, $num element list directly, 100 times" { + set start [clock clicks -milliseconds] + for {set i 0} {$i < 100} {incr i} { + set sorted [r sort tosort LIMIT 0 10] + } + set elapsed [expr [clock clicks -milliseconds]-$start] + if {$::verbose} { + puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " + flush stdout + } + } + + test "SORT speed, $num element list BY , 100 times" { + set start [clock clicks -milliseconds] + for {set i 0} {$i < 100} {incr i} { + set sorted [r sort tosort BY nokey LIMIT 0 10] + } + set elapsed [expr [clock clicks -milliseconds]-$start] + if {$::verbose} { + puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " + flush stdout + } + } + } +} diff --git a/tests/unit/tcl/aof-race.tcl b/tests/unit/tcl/aof-race.tcl new file mode 100644 index 000000000..207f20739 --- /dev/null +++ b/tests/unit/tcl/aof-race.tcl @@ -0,0 +1,35 @@ +set defaults { appendonly {yes} appendfilename {appendonly.aof} } +set server_path [tmpdir server.aof] +set aof_path "$server_path/appendonly.aof" + +proc start_server_aof {overrides code} { + upvar defaults defaults srv srv server_path server_path + set config [concat $defaults $overrides] + start_server [list overrides $config] $code +} + +tags {"aof"} { + # Specific test for a regression where internal buffers were not properly + # cleaned after a child responsible for an AOF rewrite exited. This buffer + # was subsequently appended to the new AOF, resulting in duplicate commands. + start_server_aof [list dir $server_path] { + set client [redis [srv host] [srv port]] + set bench [open "|src/redis-benchmark -q -p [srv port] -c 20 -n 20000 incr foo" "r+"] + after 100 + + # Benchmark should be running by now: start background rewrite + $client bgrewriteaof + + # Read until benchmark pipe reaches EOF + while {[string length [read $bench]] > 0} {} + + # Check contents of foo + assert_equal 20000 [$client get foo] + } + + # Restart server to replay AOF + start_server_aof [list dir $server_path] { + set client [redis [srv host] [srv port]] + assert_equal 20000 [$client get foo] + } +} diff --git a/tests/unit/tcl/aof.tcl b/tests/unit/tcl/aof.tcl new file mode 100644 index 000000000..7ea70943c --- /dev/null +++ b/tests/unit/tcl/aof.tcl @@ -0,0 +1,236 @@ +set defaults { appendonly {yes} appendfilename {appendonly.aof} } +set server_path [tmpdir server.aof] +set aof_path "$server_path/appendonly.aof" + +proc append_to_aof {str} { + upvar fp fp + puts -nonewline $fp $str +} + +proc create_aof {code} { + upvar fp fp aof_path aof_path + set fp [open $aof_path w+] + uplevel 1 $code + close $fp +} + +proc start_server_aof {overrides code} { + upvar defaults defaults srv srv server_path server_path + set config [concat $defaults $overrides] + set srv [start_server [list overrides $config]] + uplevel 1 $code + kill_server $srv +} + +tags {"aof"} { + ## Server can start when aof-load-truncated is set to yes and AOF + ## is truncated, with an incomplete MULTI block. + create_aof { + append_to_aof [formatCommand set foo hello] + append_to_aof [formatCommand multi] + append_to_aof [formatCommand set bar world] + } + + start_server_aof [list dir $server_path aof-load-truncated yes] { + test "Unfinished MULTI: Server should start if load-truncated is yes" { + assert_equal 1 [is_alive $srv] + } + } + + ## Should also start with truncated AOF without incomplete MULTI block. + create_aof { + append_to_aof [formatCommand incr foo] + append_to_aof [formatCommand incr foo] + append_to_aof [formatCommand incr foo] + append_to_aof [formatCommand incr foo] + append_to_aof [formatCommand incr foo] + append_to_aof [string range [formatCommand incr foo] 0 end-1] + } + + start_server_aof [list dir $server_path aof-load-truncated yes] { + test "Short read: Server should start if load-truncated is yes" { + assert_equal 1 [is_alive $srv] + } + + set client [redis [dict get $srv host] [dict get $srv port]] + + test "Truncated AOF loaded: we expect foo to be equal to 5" { + assert {[$client get foo] eq "5"} + } + + test "Append a new command after loading an incomplete AOF" { + $client incr foo + } + } + + # Now the AOF file is expected to be correct + start_server_aof [list dir $server_path aof-load-truncated yes] { + test "Short read + command: Server should start" { + assert_equal 1 [is_alive $srv] + } + + set client [redis [dict get $srv host] [dict get $srv port]] + + test "Truncated AOF loaded: we expect foo to be equal to 6 now" { + assert {[$client get foo] eq "6"} + } + } + + ## Test that the server exits when the AOF contains a format error + create_aof { + append_to_aof [formatCommand set foo hello] + append_to_aof "!!!" + append_to_aof [formatCommand set foo hello] + } + + start_server_aof [list dir $server_path aof-load-truncated yes] { + test "Bad format: Server should have logged an error" { + set pattern "*Bad file format reading the append only file*" + set retry 10 + while {$retry} { + set result [exec tail -n1 < [dict get $srv stdout]] + if {[string match $pattern $result]} { + break + } + incr retry -1 + after 1000 + } + if {$retry == 0} { + error "assertion:expected error not found on config file" + } + } + } + + ## Test the server doesn't start when the AOF contains an unfinished MULTI + create_aof { + append_to_aof [formatCommand set foo hello] + append_to_aof [formatCommand multi] + append_to_aof [formatCommand set bar world] + } + + start_server_aof [list dir $server_path aof-load-truncated no] { + test "Unfinished MULTI: Server should have logged an error" { + set pattern "*Unexpected end of file reading the append only file*" + set retry 10 + while {$retry} { + set result [exec tail -n1 < [dict get $srv stdout]] + if {[string match $pattern $result]} { + break + } + incr retry -1 + after 1000 + } + if {$retry == 0} { + error "assertion:expected error not found on config file" + } + } + } + + ## Test that the server exits when the AOF contains a short read + create_aof { + append_to_aof [formatCommand set foo hello] + append_to_aof [string range [formatCommand set bar world] 0 end-1] + } + + start_server_aof [list dir $server_path aof-load-truncated no] { + test "Short read: Server should have logged an error" { + set pattern "*Unexpected end of file reading the append only file*" + set retry 10 + while {$retry} { + set result [exec tail -n1 < [dict get $srv stdout]] + if {[string match $pattern $result]} { + break + } + incr retry -1 + after 1000 + } + if {$retry == 0} { + error "assertion:expected error not found on config file" + } + } + } + + ## Test that redis-check-aof indeed sees this AOF is not valid + test "Short read: Utility should confirm the AOF is not valid" { + catch { + exec src/redis-check-aof $aof_path + } result + assert_match "*not valid*" $result + } + + test "Short read: Utility should be able to fix the AOF" { + set result [exec src/redis-check-aof --fix $aof_path << "y\n"] + assert_match "*Successfully truncated AOF*" $result + } + + ## Test that the server can be started using the truncated AOF + start_server_aof [list dir $server_path aof-load-truncated no] { + test "Fixed AOF: Server should have been started" { + assert_equal 1 [is_alive $srv] + } + + test "Fixed AOF: Keyspace should contain values that were parseable" { + set client [redis [dict get $srv host] [dict get $srv port]] + wait_for_condition 50 100 { + [catch {$client ping} e] == 0 + } else { + fail "Loading DB is taking too much time." + } + assert_equal "hello" [$client get foo] + assert_equal "" [$client get bar] + } + } + + ## Test that SPOP (that modifies the client's argc/argv) is correctly free'd + create_aof { + append_to_aof [formatCommand sadd set foo] + append_to_aof [formatCommand sadd set bar] + append_to_aof [formatCommand spop set] + } + + start_server_aof [list dir $server_path aof-load-truncated no] { + test "AOF+SPOP: Server should have been started" { + assert_equal 1 [is_alive $srv] + } + + test "AOF+SPOP: Set should have 1 member" { + set client [redis [dict get $srv host] [dict get $srv port]] + wait_for_condition 50 100 { + [catch {$client ping} e] == 0 + } else { + fail "Loading DB is taking too much time." + } + assert_equal 1 [$client scard set] + } + } + + ## Test that EXPIREAT is loaded correctly + create_aof { + append_to_aof [formatCommand rpush list foo] + append_to_aof [formatCommand expireat list 1000] + append_to_aof [formatCommand rpush list bar] + } + + start_server_aof [list dir $server_path aof-load-truncated no] { + test "AOF+EXPIRE: Server should have been started" { + assert_equal 1 [is_alive $srv] + } + + test "AOF+EXPIRE: List should be empty" { + set client [redis [dict get $srv host] [dict get $srv port]] + wait_for_condition 50 100 { + [catch {$client ping} e] == 0 + } else { + fail "Loading DB is taking too much time." + } + assert_equal 0 [$client llen list] + } + } + + start_server {overrides {appendonly {yes} appendfilename {appendonly.aof}}} { + test {Redis should not try to convert DEL into EXPIREAT for EXPIRE -1} { + r set x 10 + r expire x -1 + } + } +} diff --git a/tests/unit/tcl/convert-zipmap-hash-on-load.tcl b/tests/unit/tcl/convert-zipmap-hash-on-load.tcl new file mode 100644 index 000000000..cf3577f28 --- /dev/null +++ b/tests/unit/tcl/convert-zipmap-hash-on-load.tcl @@ -0,0 +1,35 @@ +# Copy RDB with zipmap encoded hash to server path +set server_path [tmpdir "server.convert-zipmap-hash-on-load"] + +exec cp -f tests/assets/hash-zipmap.rdb $server_path +start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb"]] { + test "RDB load zipmap hash: converts to ziplist" { + r select 0 + + assert_match "*ziplist*" [r debug object hash] + assert_equal 2 [r hlen hash] + assert_match {v1 v2} [r hmget hash f1 f2] + } +} + +exec cp -f tests/assets/hash-zipmap.rdb $server_path +start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb" "hash-max-ziplist-entries" 1]] { + test "RDB load zipmap hash: converts to hash table when hash-max-ziplist-entries is exceeded" { + r select 0 + + assert_match "*hashtable*" [r debug object hash] + assert_equal 2 [r hlen hash] + assert_match {v1 v2} [r hmget hash f1 f2] + } +} + +exec cp -f tests/assets/hash-zipmap.rdb $server_path +start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb" "hash-max-ziplist-value" 1]] { + test "RDB load zipmap hash: converts to hash table when hash-max-ziplist-value is exceeded" { + r select 0 + + assert_match "*hashtable*" [r debug object hash] + assert_equal 2 [r hlen hash] + assert_match {v1 v2} [r hmget hash f1 f2] + } +} diff --git a/tests/unit/tcl/rdb.tcl b/tests/unit/tcl/rdb.tcl new file mode 100644 index 000000000..71876a6ed --- /dev/null +++ b/tests/unit/tcl/rdb.tcl @@ -0,0 +1,98 @@ +set server_path [tmpdir "server.rdb-encoding-test"] + +# Copy RDB with different encodings in server path +exec cp tests/assets/encodings.rdb $server_path + +start_server [list overrides [list "dir" $server_path "dbfilename" "encodings.rdb"]] { + test "RDB encoding loading test" { + r select 0 + csvdump r + } {"compressible","string","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +"hash","hash","a","1","aa","10","aaa","100","b","2","bb","20","bbb","200","c","3","cc","30","ccc","300","ddd","400","eee","5000000000", +"hash_zipped","hash","a","1","b","2","c","3", +"list","list","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000", +"list_zipped","list","1","2","3","a","b","c","100000","6000000000", +"number","string","10" +"set","set","1","100000","2","3","6000000000","a","b","c", +"set_zipped_1","set","1","2","3","4", +"set_zipped_2","set","100000","200000","300000","400000", +"set_zipped_3","set","1000000000","2000000000","3000000000","4000000000","5000000000","6000000000", +"string","string","Hello World" +"zset","zset","a","1","b","2","c","3","aa","10","bb","20","cc","30","aaa","100","bbb","200","ccc","300","aaaa","1000","cccc","123456789","bbbb","5000000000", +"zset_zipped","zset","a","1","b","2","c","3", +} +} + +set server_path [tmpdir "server.rdb-startup-test"] + +start_server [list overrides [list "dir" $server_path]] { + test {Server started empty with non-existing RDB file} { + r debug digest + } {0000000000000000000000000000000000000000} + # Save an RDB file, needed for the next test. + r save +} + +start_server [list overrides [list "dir" $server_path]] { + test {Server started empty with empty RDB file} { + r debug digest + } {0000000000000000000000000000000000000000} +} + +# Helper function to start a server and kill it, just to check the error +# logged. +set defaults {} +proc start_server_and_kill_it {overrides code} { + upvar defaults defaults srv srv server_path server_path + set config [concat $defaults $overrides] + set srv [start_server [list overrides $config]] + uplevel 1 $code + kill_server $srv +} + +# Make the RDB file unreadable +file attributes [file join $server_path dump.rdb] -permissions 0222 + +# Detect root account (it is able to read the file even with 002 perm) +set isroot 0 +catch { + open [file join $server_path dump.rdb] + set isroot 1 +} + +# Now make sure the server aborted with an error +if {!$isroot} { + start_server_and_kill_it [list "dir" $server_path] { + test {Server should not start if RDB file can't be open} { + wait_for_condition 50 100 { + [string match {*Fatal error loading*} \ + [exec tail -n1 < [dict get $srv stdout]]] + } else { + fail "Server started even if RDB was unreadable!" + } + } + } +} + +# Fix permissions of the RDB file. +file attributes [file join $server_path dump.rdb] -permissions 0666 + +# Corrupt its CRC64 checksum. +set filesize [file size [file join $server_path dump.rdb]] +set fd [open [file join $server_path dump.rdb] r+] +fconfigure $fd -translation binary +seek $fd -8 end +puts -nonewline $fd "foobar00"; # Corrupt the checksum +close $fd + +# Now make sure the server aborted with an error +start_server_and_kill_it [list "dir" $server_path] { + test {Server should not start if RDB is corrupted} { + wait_for_condition 50 100 { + [string match {*RDB checksum*} \ + [exec tail -n1 < [dict get $srv stdout]]] + } else { + fail "Server started even if RDB was corrupted!" + } + } +} diff --git a/tests/unit/tcl/redis-cli.tcl b/tests/unit/tcl/redis-cli.tcl new file mode 100644 index 000000000..40e4222e3 --- /dev/null +++ b/tests/unit/tcl/redis-cli.tcl @@ -0,0 +1,208 @@ +start_server {tags {"cli"}} { + proc open_cli {} { + set ::env(TERM) dumb + set fd [open [format "|src/redis-cli -p %d -n 9" [srv port]] "r+"] + fconfigure $fd -buffering none + fconfigure $fd -blocking false + fconfigure $fd -translation binary + assert_equal "redis> " [read_cli $fd] + set _ $fd + } + + proc close_cli {fd} { + close $fd + } + + proc read_cli {fd} { + set buf [read $fd] + while {[string length $buf] == 0} { + # wait some time and try again + after 10 + set buf [read $fd] + } + set _ $buf + } + + proc write_cli {fd buf} { + puts $fd $buf + flush $fd + } + + # Helpers to run tests in interactive mode + proc run_command {fd cmd} { + write_cli $fd $cmd + set lines [split [read_cli $fd] "\n"] + assert_equal "redis> " [lindex $lines end] + join [lrange $lines 0 end-1] "\n" + } + + proc test_interactive_cli {name code} { + set ::env(FAKETTY) 1 + set fd [open_cli] + test "Interactive CLI: $name" $code + close_cli $fd + unset ::env(FAKETTY) + } + + # Helpers to run tests where stdout is not a tty + proc write_tmpfile {contents} { + set tmp [tmpfile "cli"] + set tmpfd [open $tmp "w"] + puts -nonewline $tmpfd $contents + close $tmpfd + set _ $tmp + } + + proc _run_cli {opts args} { + set cmd [format "src/redis-cli -p %d -n 9 $args" [srv port]] + foreach {key value} $opts { + if {$key eq "pipe"} { + set cmd "sh -c \"$value | $cmd\"" + } + if {$key eq "path"} { + set cmd "$cmd < $value" + } + } + + set fd [open "|$cmd" "r"] + fconfigure $fd -buffering none + fconfigure $fd -translation binary + set resp [read $fd 1048576] + close $fd + set _ $resp + } + + proc run_cli {args} { + _run_cli {} {*}$args + } + + proc run_cli_with_input_pipe {cmd args} { + _run_cli [list pipe $cmd] {*}$args + } + + proc run_cli_with_input_file {path args} { + _run_cli [list path $path] {*}$args + } + + proc test_nontty_cli {name code} { + test "Non-interactive non-TTY CLI: $name" $code + } + + # Helpers to run tests where stdout is a tty (fake it) + proc test_tty_cli {name code} { + set ::env(FAKETTY) 1 + test "Non-interactive TTY CLI: $name" $code + unset ::env(FAKETTY) + } + + test_interactive_cli "INFO response should be printed raw" { + set lines [split [run_command $fd info] "\n"] + foreach line $lines { + assert [regexp {^[a-z0-9_]+:[a-z0-9_]+} $line] + } + } + + test_interactive_cli "Status reply" { + assert_equal "OK" [run_command $fd "set key foo"] + } + + test_interactive_cli "Integer reply" { + assert_equal "(integer) 1" [run_command $fd "incr counter"] + } + + test_interactive_cli "Bulk reply" { + r set key foo + assert_equal "\"foo\"" [run_command $fd "get key"] + } + + test_interactive_cli "Multi-bulk reply" { + r rpush list foo + r rpush list bar + assert_equal "1. \"foo\"\n2. \"bar\"" [run_command $fd "lrange list 0 -1"] + } + + test_interactive_cli "Parsing quotes" { + assert_equal "OK" [run_command $fd "set key \"bar\""] + assert_equal "bar" [r get key] + assert_equal "OK" [run_command $fd "set key \" bar \""] + assert_equal " bar " [r get key] + assert_equal "OK" [run_command $fd "set key \"\\\"bar\\\"\""] + assert_equal "\"bar\"" [r get key] + assert_equal "OK" [run_command $fd "set key \"\tbar\t\""] + assert_equal "\tbar\t" [r get key] + + # invalid quotation + assert_equal "Invalid argument(s)" [run_command $fd "get \"\"key"] + assert_equal "Invalid argument(s)" [run_command $fd "get \"key\"x"] + + # quotes after the argument are weird, but should be allowed + assert_equal "OK" [run_command $fd "set key\"\" bar"] + assert_equal "bar" [r get key] + } + + test_tty_cli "Status reply" { + assert_equal "OK\n" [run_cli set key bar] + assert_equal "bar" [r get key] + } + + test_tty_cli "Integer reply" { + r del counter + assert_equal "(integer) 1\n" [run_cli incr counter] + } + + test_tty_cli "Bulk reply" { + r set key "tab\tnewline\n" + assert_equal "\"tab\\tnewline\\n\"\n" [run_cli get key] + } + + test_tty_cli "Multi-bulk reply" { + r del list + r rpush list foo + r rpush list bar + assert_equal "1. \"foo\"\n2. \"bar\"\n" [run_cli lrange list 0 -1] + } + + test_tty_cli "Read last argument from pipe" { + assert_equal "OK\n" [run_cli_with_input_pipe "echo foo" set key] + assert_equal "foo\n" [r get key] + } + + test_tty_cli "Read last argument from file" { + set tmpfile [write_tmpfile "from file"] + assert_equal "OK\n" [run_cli_with_input_file $tmpfile set key] + assert_equal "from file" [r get key] + } + + test_nontty_cli "Status reply" { + assert_equal "OK" [run_cli set key bar] + assert_equal "bar" [r get key] + } + + test_nontty_cli "Integer reply" { + r del counter + assert_equal "1" [run_cli incr counter] + } + + test_nontty_cli "Bulk reply" { + r set key "tab\tnewline\n" + assert_equal "tab\tnewline\n" [run_cli get key] + } + + test_nontty_cli "Multi-bulk reply" { + r del list + r rpush list foo + r rpush list bar + assert_equal "foo\nbar" [run_cli lrange list 0 -1] + } + + test_nontty_cli "Read last argument from pipe" { + assert_equal "OK" [run_cli_with_input_pipe "echo foo" set key] + assert_equal "foo\n" [r get key] + } + + test_nontty_cli "Read last argument from file" { + set tmpfile [write_tmpfile "from file"] + assert_equal "OK" [run_cli_with_input_file $tmpfile set key] + assert_equal "from file" [r get key] + } +} diff --git a/tests/unit/tcl/replication-2.tcl b/tests/unit/tcl/replication-2.tcl new file mode 100644 index 000000000..9446e5cd9 --- /dev/null +++ b/tests/unit/tcl/replication-2.tcl @@ -0,0 +1,87 @@ +start_server {tags {"repl"}} { + start_server {} { + test {First server should have role slave after SLAVEOF} { + r -1 slaveof [srv 0 host] [srv 0 port] + after 1000 + s -1 role + } {slave} + + test {If min-slaves-to-write is honored, write is accepted} { + r config set min-slaves-to-write 1 + r config set min-slaves-max-lag 10 + r set foo 12345 + wait_for_condition 50 100 { + [r -1 get foo] eq {12345} + } else { + fail "Write did not reached slave" + } + } + + test {No write if min-slaves-to-write is < attached slaves} { + r config set min-slaves-to-write 2 + r config set min-slaves-max-lag 10 + catch {r set foo 12345} err + set err + } {NOREPLICAS*} + + test {If min-slaves-to-write is honored, write is accepted (again)} { + r config set min-slaves-to-write 1 + r config set min-slaves-max-lag 10 + r set foo 12345 + wait_for_condition 50 100 { + [r -1 get foo] eq {12345} + } else { + fail "Write did not reached slave" + } + } + + test {No write if min-slaves-max-lag is > of the slave lag} { + r -1 deferred 1 + r config set min-slaves-to-write 1 + r config set min-slaves-max-lag 2 + r -1 debug sleep 6 + assert {[r set foo 12345] eq {OK}} + after 4000 + catch {r set foo 12345} err + assert {[r -1 read] eq {OK}} + r -1 deferred 0 + set err + } {NOREPLICAS*} + + test {min-slaves-to-write is ignored by slaves} { + r config set min-slaves-to-write 1 + r config set min-slaves-max-lag 10 + r -1 config set min-slaves-to-write 1 + r -1 config set min-slaves-max-lag 10 + r set foo aaabbb + wait_for_condition 50 100 { + [r -1 get foo] eq {aaabbb} + } else { + fail "Write did not reached slave" + } + } + + # Fix parameters for the next test to work + r config set min-slaves-to-write 0 + r -1 config set min-slaves-to-write 0 + r flushall + + test {MASTER and SLAVE dataset should be identical after complex ops} { + createComplexDataset r 10000 + after 500 + if {[r debug digest] ne [r -1 debug digest]} { + set csv1 [csvdump r] + set csv2 [csvdump {r -1}] + set fd [open /tmp/repldump1.txt w] + puts -nonewline $fd $csv1 + close $fd + set fd [open /tmp/repldump2.txt w] + puts -nonewline $fd $csv2 + close $fd + puts "Master - Slave inconsistency" + puts "Run diff -u against /tmp/repldump*.txt for more info" + } + assert_equal [r debug digest] [r -1 debug digest] + } + } +} diff --git a/tests/unit/tcl/replication-3.tcl b/tests/unit/tcl/replication-3.tcl new file mode 100644 index 000000000..0fcbad45b --- /dev/null +++ b/tests/unit/tcl/replication-3.tcl @@ -0,0 +1,101 @@ +start_server {tags {"repl"}} { + start_server {} { + test {First server should have role slave after SLAVEOF} { + r -1 slaveof [srv 0 host] [srv 0 port] + wait_for_condition 50 100 { + [s -1 master_link_status] eq {up} + } else { + fail "Replication not started." + } + } + + if {$::accurate} {set numops 50000} else {set numops 5000} + + test {MASTER and SLAVE consistency with expire} { + createComplexDataset r $numops useexpire + after 4000 ;# Make sure everything expired before taking the digest + r keys * ;# Force DEL syntesizing to slave + after 1000 ;# Wait another second. Now everything should be fine. + if {[r debug digest] ne [r -1 debug digest]} { + set csv1 [csvdump r] + set csv2 [csvdump {r -1}] + set fd [open /tmp/repldump1.txt w] + puts -nonewline $fd $csv1 + close $fd + set fd [open /tmp/repldump2.txt w] + puts -nonewline $fd $csv2 + close $fd + puts "Master - Slave inconsistency" + puts "Run diff -u against /tmp/repldump*.txt for more info" + } + assert_equal [r debug digest] [r -1 debug digest] + } + } +} + +start_server {tags {"repl"}} { + start_server {} { + test {First server should have role slave after SLAVEOF} { + r -1 slaveof [srv 0 host] [srv 0 port] + wait_for_condition 50 100 { + [s -1 master_link_status] eq {up} + } else { + fail "Replication not started." + } + } + + set numops 20000 ;# Enough to trigger the Script Cache LRU eviction. + + # While we are at it, enable AOF to test it will be consistent as well + # after the test. + r config set appendonly yes + + test {MASTER and SLAVE consistency with EVALSHA replication} { + array set oldsha {} + for {set j 0} {$j < $numops} {incr j} { + set key "key:$j" + # Make sure to create scripts that have different SHA1s + set script "return redis.call('incr','$key')" + set sha1 [r eval "return redis.sha1hex(\"$script\")" 0] + set oldsha($j) $sha1 + r eval $script 0 + set res [r evalsha $sha1 0] + assert {$res == 2} + # Additionally call one of the old scripts as well, at random. + set res [r evalsha $oldsha([randomInt $j]) 0] + assert {$res > 2} + + # Trigger an AOF rewrite while we are half-way, this also + # forces the flush of the script cache, and we will cover + # more code as a result. + if {$j == $numops / 2} { + catch {r bgrewriteaof} + } + } + + wait_for_condition 50 100 { + [r dbsize] == $numops && + [r -1 dbsize] == $numops && + [r debug digest] eq [r -1 debug digest] + } else { + set csv1 [csvdump r] + set csv2 [csvdump {r -1}] + set fd [open /tmp/repldump1.txt w] + puts -nonewline $fd $csv1 + close $fd + set fd [open /tmp/repldump2.txt w] + puts -nonewline $fd $csv2 + close $fd + puts "Master - Slave inconsistency" + puts "Run diff -u against /tmp/repldump*.txt for more info" + + } + + set old_digest [r debug digest] + r config set appendonly no + r debug loadaof + set new_digest [r debug digest] + assert {$old_digest eq $new_digest} + } + } +} diff --git a/tests/unit/tcl/replication-4.tcl b/tests/unit/tcl/replication-4.tcl new file mode 100644 index 000000000..6db9ffe2b --- /dev/null +++ b/tests/unit/tcl/replication-4.tcl @@ -0,0 +1,136 @@ +proc start_bg_complex_data {host port db ops} { + set tclsh [info nameofexecutable] + exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops & +} + +proc stop_bg_complex_data {handle} { + catch {exec /bin/kill -9 $handle} +} + +start_server {tags {"repl"}} { + start_server {} { + + set master [srv -1 client] + set master_host [srv -1 host] + set master_port [srv -1 port] + set slave [srv 0 client] + + set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000] + set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000] + set load_handle2 [start_bg_complex_data $master_host $master_port 12 100000] + + test {First server should have role slave after SLAVEOF} { + $slave slaveof $master_host $master_port + after 1000 + s 0 role + } {slave} + + test {Test replication with parallel clients writing in differnet DBs} { + after 5000 + stop_bg_complex_data $load_handle0 + stop_bg_complex_data $load_handle1 + stop_bg_complex_data $load_handle2 + set retry 10 + while {$retry && ([$master debug digest] ne [$slave debug digest])}\ + { + after 1000 + incr retry -1 + } + assert {[$master dbsize] > 0} + + if {[$master debug digest] ne [$slave debug digest]} { + set csv1 [csvdump r] + set csv2 [csvdump {r -1}] + set fd [open /tmp/repldump1.txt w] + puts -nonewline $fd $csv1 + close $fd + set fd [open /tmp/repldump2.txt w] + puts -nonewline $fd $csv2 + close $fd + puts "Master - Slave inconsistency" + puts "Run diff -u against /tmp/repldump*.txt for more info" + } + assert_equal [r debug digest] [r -1 debug digest] + } + } +} + +start_server {tags {"repl"}} { + start_server {} { + set master [srv -1 client] + set master_host [srv -1 host] + set master_port [srv -1 port] + set slave [srv 0 client] + + test {First server should have role slave after SLAVEOF} { + $slave slaveof $master_host $master_port + wait_for_condition 50 100 { + [s 0 master_link_status] eq {up} + } else { + fail "Replication not started." + } + } + + test {With min-slaves-to-write (1,3): master should be writable} { + $master config set min-slaves-max-lag 3 + $master config set min-slaves-to-write 1 + $master set foo bar + } {OK} + + test {With min-slaves-to-write (2,3): master should not be writable} { + $master config set min-slaves-max-lag 3 + $master config set min-slaves-to-write 2 + catch {$master set foo bar} e + set e + } {NOREPLICAS*} + + test {With min-slaves-to-write: master not writable with lagged slave} { + $master config set min-slaves-max-lag 2 + $master config set min-slaves-to-write 1 + assert {[$master set foo bar] eq {OK}} + $slave deferred 1 + $slave debug sleep 6 + after 4000 + catch {$master set foo bar} e + set e + } {NOREPLICAS*} + } +} + +start_server {tags {"repl"}} { + start_server {} { + set master [srv -1 client] + set master_host [srv -1 host] + set master_port [srv -1 port] + set slave [srv 0 client] + + test {First server should have role slave after SLAVEOF} { + $slave slaveof $master_host $master_port + wait_for_condition 50 100 { + [s 0 role] eq {slave} + } else { + fail "Replication not started." + } + } + + test {Replication: commands with many arguments (issue #1221)} { + # We now issue large MSET commands, that may trigger a specific + # class of bugs, see issue #1221. + for {set j 0} {$j < 100} {incr j} { + set cmd [list mset] + for {set x 0} {$x < 1000} {incr x} { + lappend cmd [randomKey] [randomValue] + } + $master {*}$cmd + } + + set retry 10 + while {$retry && ([$master debug digest] ne [$slave debug digest])}\ + { + after 1000 + incr retry -1 + } + assert {[$master dbsize] > 0} + } + } +} diff --git a/tests/unit/tcl/replication-psync.tcl b/tests/unit/tcl/replication-psync.tcl new file mode 100644 index 000000000..f131dafe3 --- /dev/null +++ b/tests/unit/tcl/replication-psync.tcl @@ -0,0 +1,115 @@ +proc start_bg_complex_data {host port db ops} { + set tclsh [info nameofexecutable] + exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops & +} + +proc stop_bg_complex_data {handle} { + catch {exec /bin/kill -9 $handle} +} + +# Creates a master-slave pair and breaks the link continuously to force +# partial resyncs attempts, all this while flooding the master with +# write queries. +# +# You can specifiy backlog size, ttl, delay before reconnection, test duration +# in seconds, and an additional condition to verify at the end. +proc test_psync {descr duration backlog_size backlog_ttl delay cond} { + start_server {tags {"repl"}} { + start_server {} { + + set master [srv -1 client] + set master_host [srv -1 host] + set master_port [srv -1 port] + set slave [srv 0 client] + + $master config set repl-backlog-size $backlog_size + $master config set repl-backlog-ttl $backlog_ttl + + set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000] + set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000] + set load_handle2 [start_bg_complex_data $master_host $master_port 12 100000] + + test {Slave should be able to synchronize with the master} { + $slave slaveof $master_host $master_port + wait_for_condition 50 100 { + [lindex [r role] 0] eq {slave} && + [lindex [r role] 3] eq {connected} + } else { + fail "Replication not started." + } + } + + # Check that the background clients are actually writing. + test {Detect write load to master} { + wait_for_condition 50 100 { + [$master dbsize] > 100 + } else { + fail "Can't detect write load from background clients." + } + } + + test "Test replication partial resync: $descr" { + # Now while the clients are writing data, break the maste-slave + # link multiple times. + for {set j 0} {$j < $duration*10} {incr j} { + after 100 + # catch {puts "MASTER [$master dbsize] keys, SLAVE [$slave dbsize] keys"} + + if {($j % 20) == 0} { + catch { + if {$delay} { + $slave multi + $slave client kill $master_host:$master_port + $slave debug sleep $delay + $slave exec + } else { + $slave client kill $master_host:$master_port + } + } + } + } + stop_bg_complex_data $load_handle0 + stop_bg_complex_data $load_handle1 + stop_bg_complex_data $load_handle2 + set retry 10 + while {$retry && ([$master debug digest] ne [$slave debug digest])}\ + { + after 1000 + incr retry -1 + } + assert {[$master dbsize] > 0} + + if {[$master debug digest] ne [$slave debug digest]} { + set csv1 [csvdump r] + set csv2 [csvdump {r -1}] + set fd [open /tmp/repldump1.txt w] + puts -nonewline $fd $csv1 + close $fd + set fd [open /tmp/repldump2.txt w] + puts -nonewline $fd $csv2 + close $fd + puts "Master - Slave inconsistency" + puts "Run diff -u against /tmp/repldump*.txt for more info" + } + assert_equal [r debug digest] [r -1 debug digest] + eval $cond + } + } + } +} + +test_psync {ok psync} 6 1000000 3600 0 { + assert {[s -1 sync_partial_ok] > 0} +} + +test_psync {no backlog} 6 100 3600 0.5 { + assert {[s -1 sync_partial_err] > 0} +} + +test_psync {ok after delay} 3 100000000 3600 3 { + assert {[s -1 sync_partial_ok] > 0} +} + +test_psync {backlog expired} 3 100000000 1 3 { + assert {[s -1 sync_partial_err] > 0} +} diff --git a/tests/unit/tcl/replication.tcl b/tests/unit/tcl/replication.tcl new file mode 100644 index 000000000..bb907eba8 --- /dev/null +++ b/tests/unit/tcl/replication.tcl @@ -0,0 +1,215 @@ +start_server {tags {"repl"}} { + set A [srv 0 client] + set A_host [srv 0 host] + set A_port [srv 0 port] + start_server {} { + set B [srv 0 client] + set B_host [srv 0 host] + set B_port [srv 0 port] + + test {Set instance A as slave of B} { + $A slaveof $B_host $B_port + wait_for_condition 50 100 { + [lindex [$A role] 0] eq {slave} && + [string match {*master_link_status:up*} [$A info replication]] + } else { + fail "Can't turn the instance into a slave" + } + } + + test {BRPOPLPUSH replication, when blocking against empty list} { + set rd [redis_deferring_client] + $rd brpoplpush a b 5 + r lpush a foo + wait_for_condition 50 100 { + [$A debug digest] eq [$B debug digest] + } else { + fail "Master and slave have different digest: [$A debug digest] VS [$B debug digest]" + } + } + + test {BRPOPLPUSH replication, list exists} { + set rd [redis_deferring_client] + r lpush c 1 + r lpush c 2 + r lpush c 3 + $rd brpoplpush c d 5 + after 1000 + assert_equal [$A debug digest] [$B debug digest] + } + + test {BLPOP followed by role change, issue #2473} { + set rd [redis_deferring_client] + $rd blpop foo 0 ; # Block while B is a master + + # Turn B into master of A + $A slaveof no one + $B slaveof $A_host $A_port + wait_for_condition 50 100 { + [lindex [$B role] 0] eq {slave} && + [string match {*master_link_status:up*} [$B info replication]] + } else { + fail "Can't turn the instance into a slave" + } + + # Push elements into the "foo" list of the new slave. + # If the client is still attached to the instance, we'll get + # a desync between the two instances. + $A rpush foo a b c + after 100 + + wait_for_condition 50 100 { + [$A debug digest] eq [$B debug digest] && + [$A lrange foo 0 -1] eq {a b c} && + [$B lrange foo 0 -1] eq {a b c} + } else { + fail "Master and slave have different digest: [$A debug digest] VS [$B debug digest]" + } + } + } +} + +start_server {tags {"repl"}} { + r set mykey foo + + start_server {} { + test {Second server should have role master at first} { + s role + } {master} + + test {SLAVEOF should start with link status "down"} { + r slaveof [srv -1 host] [srv -1 port] + s master_link_status + } {down} + + test {The role should immediately be changed to "slave"} { + s role + } {slave} + + wait_for_sync r + test {Sync should have transferred keys from master} { + r get mykey + } {foo} + + test {The link status should be up} { + s master_link_status + } {up} + + test {SET on the master should immediately propagate} { + r -1 set mykey bar + + wait_for_condition 500 100 { + [r 0 get mykey] eq {bar} + } else { + fail "SET on master did not propagated on slave" + } + } + + test {FLUSHALL should replicate} { + r -1 flushall + if {$::valgrind} {after 2000} + list [r -1 dbsize] [r 0 dbsize] + } {0 0} + + test {ROLE in master reports master with a slave} { + set res [r -1 role] + lassign $res role offset slaves + assert {$role eq {master}} + assert {$offset > 0} + assert {[llength $slaves] == 1} + lassign [lindex $slaves 0] master_host master_port slave_offset + assert {$slave_offset <= $offset} + } + + test {ROLE in slave reports slave in connected state} { + set res [r role] + lassign $res role master_host master_port slave_state slave_offset + assert {$role eq {slave}} + assert {$slave_state eq {connected}} + } + } +} + +foreach dl {no yes} { + start_server {tags {"repl"}} { + set master [srv 0 client] + $master config set repl-diskless-sync $dl + set master_host [srv 0 host] + set master_port [srv 0 port] + set slaves {} + set load_handle0 [start_write_load $master_host $master_port 3] + set load_handle1 [start_write_load $master_host $master_port 5] + set load_handle2 [start_write_load $master_host $master_port 20] + set load_handle3 [start_write_load $master_host $master_port 8] + set load_handle4 [start_write_load $master_host $master_port 4] + start_server {} { + lappend slaves [srv 0 client] + start_server {} { + lappend slaves [srv 0 client] + start_server {} { + lappend slaves [srv 0 client] + test "Connect multiple slaves at the same time (issue #141), diskless=$dl" { + # Send SLAVEOF commands to slaves + [lindex $slaves 0] slaveof $master_host $master_port + [lindex $slaves 1] slaveof $master_host $master_port + [lindex $slaves 2] slaveof $master_host $master_port + + # Wait for all the three slaves to reach the "online" + # state from the POV of the master. + set retry 500 + while {$retry} { + set info [r -3 info] + if {[string match {*slave0:*state=online*slave1:*state=online*slave2:*state=online*} $info]} { + break + } else { + incr retry -1 + after 100 + } + } + if {$retry == 0} { + error "assertion:Slaves not correctly synchronized" + } + + # Wait that slaves acknowledge they are online so + # we are sure that DBSIZE and DEBUG DIGEST will not + # fail because of timing issues. + wait_for_condition 500 100 { + [lindex [[lindex $slaves 0] role] 3] eq {connected} && + [lindex [[lindex $slaves 1] role] 3] eq {connected} && + [lindex [[lindex $slaves 2] role] 3] eq {connected} + } else { + fail "Slaves still not connected after some time" + } + + # Stop the write load + stop_write_load $load_handle0 + stop_write_load $load_handle1 + stop_write_load $load_handle2 + stop_write_load $load_handle3 + stop_write_load $load_handle4 + + # Make sure that slaves and master have same + # number of keys + wait_for_condition 500 100 { + [$master dbsize] == [[lindex $slaves 0] dbsize] && + [$master dbsize] == [[lindex $slaves 1] dbsize] && + [$master dbsize] == [[lindex $slaves 2] dbsize] + } else { + fail "Different number of keys between masted and slave after too long time." + } + + # Check digests + set digest [$master debug digest] + set digest0 [[lindex $slaves 0] debug digest] + set digest1 [[lindex $slaves 1] debug digest] + set digest2 [[lindex $slaves 2] debug digest] + assert {$digest ne 0000000000000000000000000000000000000000} + assert {$digest eq $digest0} + assert {$digest eq $digest1} + assert {$digest eq $digest2} + } + } + } + } + } +} diff --git a/tests/unit/type.tcl b/tests/unit/type.tcl new file mode 100644 index 000000000..2b5b9045a --- /dev/null +++ b/tests/unit/type.tcl @@ -0,0 +1,50 @@ +start_server {tags {"type"}} { + + test "type none" { + r flushdb + assert_equal none [r type key] + } + + test "type command" { + r flushdb + + r set key1 key1 + assert_equal string [r type key1] + + r hset key2 key key2 + assert_equal hash [r type key2] + + r lpush key3 key3 + assert_equal list [r type key3] + + r zadd key4 100 key4 + assert_equal zset [r type key4] + + r sadd key5 key5 + assert_equal set [r type key5] + } + + test "ptype none" { + r flushdb + assert_equal {} [r ptype key] + } + + test "ptype command" { + r flushdb + + r set key1 key1 + assert_equal string [r ptype key1] + + r hset key1 key key1 + assert_equal {string hash} [r ptype key1] + + r lpush key1 key1 + assert_equal {string hash list} [r ptype key1] + + r zadd key1 100 key1 + assert_equal {string hash list zset} [r ptype key1] + + r sadd key1 key1 + assert_equal {string hash list zset set} [r ptype key1] + } +} \ No newline at end of file diff --git a/tests/unit/type/hash.tcl b/tests/unit/type/hash.tcl new file mode 100644 index 000000000..5612892a8 --- /dev/null +++ b/tests/unit/type/hash.tcl @@ -0,0 +1,871 @@ +start_server {tags {"hash"}} { + test {HSET/HLEN - Small hash creation} { + array set smallhash {} + for {set i 0} {$i < 8} {incr i} { + set key __avoid_collisions__[randstring 0 8 alpha] + set val __avoid_collisions__[randstring 0 8 alpha] + if {[info exists smallhash($key)]} { + incr i -1 + continue + } + r hset smallhash $key $val + set smallhash($key) $val + } + list [r hlen smallhash] + } {8} + +# test {Is the small hash encoded with a listpack?} { +# assert_encoding listpack smallhash +# } + + proc create_hash {key entries} { + r del $key + foreach entry $entries { + r hset $key [lindex $entry 0] [lindex $entry 1] + } + } + + proc get_keys {l} { + set res {} + foreach entry $l { + set key [lindex $entry 0] + lappend res $key + } + return $res + } + +# This parameter is not available in PikiwiDB +# foreach {type contents} "listpack {{a 1} {b 2} {c 3}} hashtable {{a 1} {b 2} {[randstring 70 90 alpha] 3}}" { +# set original_max_value [lindex [r config get hash-max-ziplist-value] 1] +# r config set hash-max-ziplist-value 10 +# create_hash myhash $contents +# assert_encoding $type myhash +# +# # coverage for objectComputeSize +# assert_morethan [memory_usage myhash] 0 +# +# test "HRANDFIELD - $type" { +# unset -nocomplain myhash +# array set myhash {} +# for {set i 0} {$i < 100} {incr i} { +# set key [r hrandfield myhash] +# set myhash($key) 1 +# } +# assert_equal [lsort [get_keys $contents]] [lsort [array names myhash]] +# } +# r config set hash-max-ziplist-value $original_max_value +# } + +# PikiwiDB does not support the hello command +# test "HRANDFIELD with RESP3" { +# r hello 3 +# set res [r hrandfield myhash 3 withvalues] +# assert_equal [llength $res] 3 +# assert_equal [llength [lindex $res 1]] 2 +# +# set res [r hrandfield myhash 3] +# assert_equal [llength $res] 3 +# assert_equal [llength [lindex $res 1]] 1 +# r hello 2 +# } + + test "HRANDFIELD count of 0 is handled correctly" { + r hrandfield myhash 0 + } {} + +# The return value of PikiwiDB is inconsistent with Redis +# test "HRANDFIELD count overflow" { +# r hmset myhash a 1 +# assert_error {*value is out of range*} {r hrandfield myhash -9223372036854770000 withvalues} +# assert_error {*value is out of range*} {r hrandfield myhash -9223372036854775808 withvalues} +# assert_error {*value is out of range*} {r hrandfield myhash -9223372036854775808} +# } {} + +# test "HRANDFIELD with against non existing key" { +# r hrandfield nonexisting_key 100 +# } {} + +# # Make sure we can distinguish between an empty array and a null response +# r readraw 1 +# +# test "HRANDFIELD count of 0 is handled correctly - emptyarray" { +# r hrandfield myhash 0 +# } {*0} +# +# test "HRANDFIELD with against non existing key - emptyarray" { +# r hrandfield nonexisting_key 100 +# } {*0} +# +# r readraw 0 + +# This parameter is not available in PikiwiDB +# foreach {type contents} " +# hashtable {{a 1} {b 2} {c 3} {d 4} {e 5} {6 f} {7 g} {8 h} {9 i} {[randstring 70 90 alpha] 10}} +# listpack {{a 1} {b 2} {c 3} {d 4} {e 5} {6 f} {7 g} {8 h} {9 i} {10 j}} " { +# test "HRANDFIELD with - $type" { +# set original_max_value [lindex [r config get hash-max-ziplist-value] 1] +# r config set hash-max-ziplist-value 10 +# create_hash myhash $contents +# assert_encoding $type myhash +# +# # create a dict for easy lookup +# set mydict [dict create {*}[r hgetall myhash]] +# +# # We'll stress different parts of the code, see the implementation +# # of HRANDFIELD for more information, but basically there are +# # four different code paths. +# +# # PATH 1: Use negative count. +# +# # 1) Check that it returns repeated elements with and without values. +# set res [r hrandfield myhash -20] +# assert_equal [llength $res] 20 +# set res [r hrandfield myhash -1001] +# assert_equal [llength $res] 1001 +# # again with WITHVALUES +# set res [r hrandfield myhash -20 withvalues] +# assert_equal [llength $res] 40 +# set res [r hrandfield myhash -1001 withvalues] +# assert_equal [llength $res] 2002 +# +# # Test random uniform distribution +# # df = 9, 40 means 0.00001 probability +# set res [r hrandfield myhash -1000] +# assert_lessthan [chi_square_value $res] 40 +# +# # 2) Check that all the elements actually belong to the original hash. +# foreach {key val} $res { +# assert {[dict exists $mydict $key]} +# } +# +# # 3) Check that eventually all the elements are returned. +# # Use both WITHVALUES and without +# unset -nocomplain auxset +# set iterations 1000 +# while {$iterations != 0} { +# incr iterations -1 +# if {[expr {$iterations % 2}] == 0} { +# set res [r hrandfield myhash -3 withvalues] +# foreach {key val} $res { +# dict append auxset $key $val +# } +# } else { +# set res [r hrandfield myhash -3] +# foreach key $res { +# dict append auxset $key $val +# } +# } +# if {[lsort [dict keys $mydict]] eq +# [lsort [dict keys $auxset]]} { +# break; +# } +# } +# assert {$iterations != 0} +# +# # PATH 2: positive count (unique behavior) with requested size +# # equal or greater than set size. +# foreach size {10 20} { +# set res [r hrandfield myhash $size] +# assert_equal [llength $res] 10 +# assert_equal [lsort $res] [lsort [dict keys $mydict]] +# +# # again with WITHVALUES +# set res [r hrandfield myhash $size withvalues] +# assert_equal [llength $res] 20 +# assert_equal [lsort $res] [lsort $mydict] +# } +# +# # PATH 3: Ask almost as elements as there are in the set. +# # In this case the implementation will duplicate the original +# # set and will remove random elements up to the requested size. +# # +# # PATH 4: Ask a number of elements definitely smaller than +# # the set size. +# # +# # We can test both the code paths just changing the size but +# # using the same code. +# foreach size {8 2} { +# set res [r hrandfield myhash $size] +# assert_equal [llength $res] $size +# # again with WITHVALUES +# set res [r hrandfield myhash $size withvalues] +# assert_equal [llength $res] [expr {$size * 2}] +# +# # 1) Check that all the elements actually belong to the +# # original set. +# foreach ele [dict keys $res] { +# assert {[dict exists $mydict $ele]} +# } +# +# # 2) Check that eventually all the elements are returned. +# # Use both WITHVALUES and without +# unset -nocomplain auxset +# unset -nocomplain allkey +# set iterations [expr {1000 / $size}] +# set all_ele_return false +# while {$iterations != 0} { +# incr iterations -1 +# if {[expr {$iterations % 2}] == 0} { +# set res [r hrandfield myhash $size withvalues] +# foreach {key value} $res { +# dict append auxset $key $value +# lappend allkey $key +# } +# } else { +# set res [r hrandfield myhash $size] +# foreach key $res { +# dict append auxset $key +# lappend allkey $key +# } +# } +# if {[lsort [dict keys $mydict]] eq +# [lsort [dict keys $auxset]]} { +# set all_ele_return true +# } +# } +# assert_equal $all_ele_return true +# # df = 9, 40 means 0.00001 probability +# assert_lessthan [chi_square_value $allkey] 40 +# } +# } +# r config set hash-max-ziplist-value $original_max_value +# } + +# This parameter is not available in PikiwiDB +# test {HSET/HLEN - Big hash creation} { +# array set bighash {} +# for {set i 0} {$i < 1024} {incr i} { +# set key __avoid_collisions__[randstring 0 8 alpha] +# set val __avoid_collisions__[randstring 0 8 alpha] +# if {[info exists bighash($key)]} { +# incr i -1 +# continue +# } +# r hset bighash $key $val +# set bighash($key) $val +# } +# list [r hlen bighash] +# } {1024} +# +# test {Is the big hash encoded with an hash table?} { +# assert_encoding hashtable bighash +# } + + test {HGET against the small hash} { + set err {} + foreach k [array names smallhash *] { + if {$smallhash($k) ne [r hget smallhash $k]} { + set err "$smallhash($k) != [r hget smallhash $k]" + break + } + } + set _ $err + } {} + + test {HGET against the big hash} { + set err {} + foreach k [array names bighash *] { + if {$bighash($k) ne [r hget bighash $k]} { + set err "$bighash($k) != [r hget bighash $k]" + break + } + } + set _ $err + } {} + + test {HGET against non existing key} { + set rv {} + lappend rv [r hget smallhash __123123123__] + lappend rv [r hget bighash __123123123__] + set _ $rv + } {{} {}} + +# This parameter is not available in PikiwiDB +# test {HSET in update and insert mode} { +# set rv {} +# set k [lindex [array names smallhash *] 0] +# lappend rv [r hset smallhash $k newval1] +# set smallhash($k) newval1 +# lappend rv [r hget smallhash $k] +# lappend rv [r hset smallhash __foobar123__ newval] +# set k [lindex [array names bighash *] 0] +# lappend rv [r hset bighash $k newval2] +# set bighash($k) newval2 +# lappend rv [r hget bighash $k] +# lappend rv [r hset bighash __foobar123__ newval] +# lappend rv [r hdel smallhash __foobar123__] +# lappend rv [r hdel bighash __foobar123__] +# set _ $rv +# } {0 newval1 1 0 newval2 1 1 1} + + test {HSETNX target key missing - small hash} { + r hsetnx smallhash __123123123__ foo + r hget smallhash __123123123__ + } {foo} + + test {HSETNX target key exists - small hash} { + r hsetnx smallhash __123123123__ bar + set result [r hget smallhash __123123123__] + r hdel smallhash __123123123__ + set _ $result + } {foo} + + test {HSETNX target key missing - big hash} { + r hsetnx bighash __123123123__ foo + r hget bighash __123123123__ + } {foo} + + test {HSETNX target key exists - big hash} { + r hsetnx bighash __123123123__ bar + set result [r hget bighash __123123123__] + r hdel bighash __123123123__ + set _ $result + } {foo} + +# The return value of PikiwiDB is inconsistent with Redis +# test {HSET/HMSET wrong number of args} { +# assert_error {*wrong number of arguments for 'hset' command} {r hset smallhash key1 val1 key2} +# assert_error {*wrong number of arguments for 'hmset' command} {r hmset smallhash key1 val1 key2} +# } + + test {HMSET - small hash} { + set args {} + foreach {k v} [array get smallhash] { + set newval [randstring 0 8 alpha] + set smallhash($k) $newval + lappend args $k $newval + } + r hmset smallhash {*}$args + } {OK} + +# The return value of PikiwiDB is inconsistent with Redis +# test {HMSET - big hash} { +# set args {} +# foreach {k v} [array get bighash] { +# set newval [randstring 0 8 alpha] +# set bighash($k) $newval +# lappend args $k $newval +# } +# r hmset bighash {*}$args +# } {OK} + +# The return value of PikiwiDB is inconsistent with Redis +# test {HMGET against non existing key and fields} { +# set rv {} +# lappend rv [r hmget doesntexist __123123123__ __456456456__] +# lappend rv [r hmget smallhash __123123123__ __456456456__] +# lappend rv [r hmget bighash __123123123__ __456456456__] +# set _ $rv +# } {{{} {}} {{} {}} {{} {}}} + + test {Hash commands against wrong type} { + r set wrongtype somevalue + assert_error "WRONGTYPE Operation against a key*" {r hmget wrongtype field1 field2} + assert_error "WRONGTYPE Operation against a key*" {r hrandfield wrongtype} + assert_error "WRONGTYPE Operation against a key*" {r hget wrongtype field1} + assert_error "WRONGTYPE Operation against a key*" {r hgetall wrongtype} + assert_error "WRONGTYPE Operation against a key*" {r hdel wrongtype field1} + assert_error "WRONGTYPE Operation against a key*" {r hincrby wrongtype field1 2} + assert_error "WRONGTYPE Operation against a key*" {r hincrbyfloat wrongtype field1 2.5} + assert_error "WRONGTYPE Operation against a key*" {r hstrlen wrongtype field1} + assert_error "WRONGTYPE Operation against a key*" {r hvals wrongtype} + assert_error "WRONGTYPE Operation against a key*" {r hkeys wrongtype} + assert_error "WRONGTYPE Operation against a key*" {r hexists wrongtype field1} + assert_error "WRONGTYPE Operation against a key*" {r hset wrongtype field1 val1} + assert_error "WRONGTYPE Operation against a key*" {r hmset wrongtype field1 val1 field2 val2} + assert_error "WRONGTYPE Operation against a key*" {r hsetnx wrongtype field1 val1} + assert_error "WRONGTYPE Operation against a key*" {r hlen wrongtype} + assert_error "WRONGTYPE Operation against a key*" {r hscan wrongtype 0} + } + + test {HMGET - small hash} { + set keys {} + set vals {} + foreach {k v} [array get smallhash] { + lappend keys $k + lappend vals $v + } + set err {} + set result [r hmget smallhash {*}$keys] + if {$vals ne $result} { + set err "$vals != $result" + break + } + set _ $err + } {} + + test {HMGET - big hash} { + set keys {} + set vals {} + foreach {k v} [array get bighash] { + lappend keys $k + lappend vals $v + } + set err {} + set result [r hmget bighash {*}$keys] + if {$vals ne $result} { + set err "$vals != $result" + break + } + set _ $err + } {} + + test {HKEYS - small hash} { + lsort [r hkeys smallhash] + } [lsort [array names smallhash *]] + + test {HKEYS - big hash} { + lsort [r hkeys bighash] + } [lsort [array names bighash *]] + + test {HVALS - small hash} { + set vals {} + foreach {k v} [array get smallhash] { + lappend vals $v + } + set _ [lsort $vals] + } [lsort [r hvals smallhash]] + + test {HVALS - big hash} { + set vals {} + foreach {k v} [array get bighash] { + lappend vals $v + } + set _ [lsort $vals] + } [lsort [r hvals bighash]] + + test {HGETALL - small hash} { + lsort [r hgetall smallhash] + } [lsort [array get smallhash]] + + test {HGETALL - big hash} { + lsort [r hgetall bighash] + } [lsort [array get bighash]] + + test {HGETALL against non-existing key} { + r del htest + r hgetall htest + } {} + + test {HDEL and return value} { + set rv {} + lappend rv [r hdel smallhash nokey] + lappend rv [r hdel bighash nokey] + set k [lindex [array names smallhash *] 0] + lappend rv [r hdel smallhash $k] + lappend rv [r hdel smallhash $k] + lappend rv [r hget smallhash $k] + unset smallhash($k) + set k [lindex [array names bighash *] 0] + lappend rv [r hdel bighash $k] + lappend rv [r hdel bighash $k] + lappend rv [r hget bighash $k] + unset bighash($k) + set _ $rv + } {0 0 1 0 {} 1 0 {}} + + test {HDEL - more than a single value} { + set rv {} + r del myhash + r hmset myhash a 1 b 2 c 3 + assert_equal 0 [r hdel myhash x y] + assert_equal 2 [r hdel myhash a c f] + r hgetall myhash + } {b 2} + + test {HDEL - hash becomes empty before deleting all specified fields} { + r del myhash + r hmset myhash a 1 b 2 c 3 + assert_equal 3 [r hdel myhash a b c d e] + assert_equal 0 [r exists myhash] + } + + test {HEXISTS} { + set rv {} + set k [lindex [array names smallhash *] 0] + lappend rv [r hexists smallhash $k] + lappend rv [r hexists smallhash nokey] + set k [lindex [array names bighash *] 0] + lappend rv [r hexists bighash $k] + lappend rv [r hexists bighash nokey] + } {1 0 1 0} + + test {Is a ziplist encoded Hash promoted on big payload?} { + r hset smallhash foo [string repeat a 1024] + r debug object smallhash + } {*hashtable*} {needs:debug} + + test {HINCRBY against non existing database key} { + r del htest + list [r hincrby htest foo 2] + } {2} + + test {HINCRBY HINCRBYFLOAT against non-integer increment value} { + r del incrhash + r hset incrhash field 5 + assert_error "*value is not an integer*" {r hincrby incrhash field v} + assert_error "*value is not a*" {r hincrbyfloat incrhash field v} + } + + test {HINCRBY against non existing hash key} { + set rv {} + r hdel smallhash tmp + r hdel bighash tmp + lappend rv [r hincrby smallhash tmp 2] + lappend rv [r hget smallhash tmp] + lappend rv [r hincrby bighash tmp 2] + lappend rv [r hget bighash tmp] + } {2 2 2 2} + + test {HINCRBY against hash key created by hincrby itself} { + set rv {} + lappend rv [r hincrby smallhash tmp 3] + lappend rv [r hget smallhash tmp] + lappend rv [r hincrby bighash tmp 3] + lappend rv [r hget bighash tmp] + } {5 5 5 5} + + test {HINCRBY against hash key originally set with HSET} { + r hset smallhash tmp 100 + r hset bighash tmp 100 + list [r hincrby smallhash tmp 2] [r hincrby bighash tmp 2] + } {102 102} + + test {HINCRBY over 32bit value} { + r hset smallhash tmp 17179869184 + r hset bighash tmp 17179869184 + list [r hincrby smallhash tmp 1] [r hincrby bighash tmp 1] + } {17179869185 17179869185} + + test {HINCRBY over 32bit value with over 32bit increment} { + r hset smallhash tmp 17179869184 + r hset bighash tmp 17179869184 + list [r hincrby smallhash tmp 17179869184] [r hincrby bighash tmp 17179869184] + } {34359738368 34359738368} + + test {HINCRBY fails against hash value with spaces (left)} { + r hset smallhash str " 11" + r hset bighash str " 11" + catch {r hincrby smallhash str 1} smallerr + catch {r hincrby bighash str 1} bigerr + set rv {} + lappend rv [string match "ERR *not an integer*" $smallerr] + lappend rv [string match "ERR *not an integer*" $bigerr] + } {1 1} + + test {HINCRBY fails against hash value with spaces (right)} { + r hset smallhash str "11 " + r hset bighash str "11 " + catch {r hincrby smallhash str 1} smallerr + catch {r hincrby bighash str 1} bigerr + set rv {} + lappend rv [string match "ERR *not an integer*" $smallerr] + lappend rv [string match "ERR *not an integer*" $bigerr] + } {1 1} + + test {HINCRBY can detect overflows} { + set e {} + r hset hash n -9223372036854775484 + assert {[r hincrby hash n -1] == -9223372036854775485} + catch {r hincrby hash n -10000} e + set e + } {*overflow*} + + test {HINCRBYFLOAT against non existing database key} { + r del htest + list [r hincrbyfloat htest foo 2.5] + } {2.5} + + test {HINCRBYFLOAT against non existing hash key} { + set rv {} + r hdel smallhash tmp + r hdel bighash tmp + lappend rv [roundFloat [r hincrbyfloat smallhash tmp 2.5]] + lappend rv [roundFloat [r hget smallhash tmp]] + lappend rv [roundFloat [r hincrbyfloat bighash tmp 2.5]] + lappend rv [roundFloat [r hget bighash tmp]] + } {2.5 2.5 2.5 2.5} + + test {HINCRBYFLOAT against hash key created by hincrby itself} { + set rv {} + lappend rv [roundFloat [r hincrbyfloat smallhash tmp 3.5]] + lappend rv [roundFloat [r hget smallhash tmp]] + lappend rv [roundFloat [r hincrbyfloat bighash tmp 3.5]] + lappend rv [roundFloat [r hget bighash tmp]] + } {6 6 6 6} + + test {HINCRBYFLOAT against hash key originally set with HSET} { + r hset smallhash tmp 100 + r hset bighash tmp 100 + list [roundFloat [r hincrbyfloat smallhash tmp 2.5]] \ + [roundFloat [r hincrbyfloat bighash tmp 2.5]] + } {102.5 102.5} + + test {HINCRBYFLOAT over 32bit value} { + r hset smallhash tmp 17179869184 + r hset bighash tmp 17179869184 + list [r hincrbyfloat smallhash tmp 1] \ + [r hincrbyfloat bighash tmp 1] + } {17179869185 17179869185} + + test {HINCRBYFLOAT over 32bit value with over 32bit increment} { + r hset smallhash tmp 17179869184 + r hset bighash tmp 17179869184 + list [r hincrbyfloat smallhash tmp 17179869184] \ + [r hincrbyfloat bighash tmp 17179869184] + } {34359738368 34359738368} + + test {HINCRBYFLOAT fails against hash value with spaces (left)} { + r hset smallhash str " 11" + r hset bighash str " 11" + catch {r hincrbyfloat smallhash str 1} smallerr + catch {r hincrbyfloat bighash str 1} bigerr + set rv {} + lappend rv [string match "ERR *not*float*" $smallerr] + lappend rv [string match "ERR *not*float*" $bigerr] + } {1 1} + + test {HINCRBYFLOAT fails against hash value with spaces (right)} { + r hset smallhash str "11 " + r hset bighash str "11 " + catch {r hincrbyfloat smallhash str 1} smallerr + catch {r hincrbyfloat bighash str 1} bigerr + set rv {} + lappend rv [string match "ERR *not*float*" $smallerr] + lappend rv [string match "ERR *not*float*" $bigerr] + } {1 1} + + test {HINCRBYFLOAT fails against hash value that contains a null-terminator in the middle} { + r hset h f "1\x002" + catch {r hincrbyfloat h f 1} err + set rv {} + lappend rv [string match "ERR *not*float*" $err] + } {1} + + test {HSTRLEN against the small hash} { + set err {} + foreach k [array names smallhash *] { + if {[string length $smallhash($k)] ne [r hstrlen smallhash $k]} { + set err "[string length $smallhash($k)] != [r hstrlen smallhash $k]" + break + } + } + set _ $err + } {} + + test {HSTRLEN against the big hash} { + set err {} + foreach k [array names bighash *] { + if {[string length $bighash($k)] ne [r hstrlen bighash $k]} { + set err "[string length $bighash($k)] != [r hstrlen bighash $k]" + puts "HSTRLEN and logical length mismatch:" + puts "key: $k" + puts "Logical content: $bighash($k)" + puts "Server content: [r hget bighash $k]" + } + } + set _ $err + } {} + + test {HSTRLEN against non existing field} { + set rv {} + lappend rv [r hstrlen smallhash __123123123__] + lappend rv [r hstrlen bighash __123123123__] + set _ $rv + } {0 0} + + test {HSTRLEN corner cases} { + set vals { + -9223372036854775808 9223372036854775807 9223372036854775808 + {} 0 -1 x + } + foreach v $vals { + r hmset smallhash field $v + r hmset bighash field $v + set len1 [string length $v] + set len2 [r hstrlen smallhash field] + set len3 [r hstrlen bighash field] + assert {$len1 == $len2} + assert {$len2 == $len3} + } + } + + test {HINCRBYFLOAT over hash-max-listpack-value encoded with a listpack} { + set original_max_value [lindex [r config get hash-max-ziplist-value] 1] + r config set hash-max-listpack-value 8 + + # hash's value exceeds hash-max-listpack-value + r del smallhash + r del bighash + r hset smallhash tmp 0 + r hset bighash tmp 0 + r hincrbyfloat smallhash tmp 0.000005 + r hincrbyfloat bighash tmp 0.0000005 + assert_encoding listpack smallhash + assert_encoding hashtable bighash + + # hash's field exceeds hash-max-listpack-value + r del smallhash + r del bighash + r hincrbyfloat smallhash abcdefgh 1 + r hincrbyfloat bighash abcdefghi 1 + assert_encoding listpack smallhash + assert_encoding hashtable bighash + + r config set hash-max-listpack-value $original_max_value + } + + test {Hash ziplist regression test for large keys} { + r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk a + r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk b + r hget hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk + } {b} + + foreach size {10 512} { + test "Hash fuzzing #1 - $size fields" { + for {set times 0} {$times < 10} {incr times} { + catch {unset hash} + array set hash {} + r del hash + + # Create + for {set j 0} {$j < $size} {incr j} { + set field [randomValue] + set value [randomValue] + r hset hash $field $value + set hash($field) $value + } + + # Verify + foreach {k v} [array get hash] { + assert_equal $v [r hget hash $k] + } + assert_equal [array size hash] [r hlen hash] + } + } + + test "Hash fuzzing #2 - $size fields" { + for {set times 0} {$times < 10} {incr times} { + catch {unset hash} + array set hash {} + r del hash + + # Create + for {set j 0} {$j < $size} {incr j} { + randpath { + set field [randomValue] + set value [randomValue] + r hset hash $field $value + set hash($field) $value + } { + set field [randomSignedInt 512] + set value [randomSignedInt 512] + r hset hash $field $value + set hash($field) $value + } { + randpath { + set field [randomValue] + } { + set field [randomSignedInt 512] + } + r hdel hash $field + unset -nocomplain hash($field) + } + } + + # Verify + foreach {k v} [array get hash] { + assert_equal $v [r hget hash $k] + } + assert_equal [array size hash] [r hlen hash] + } + } + } + + test {Stress test the hash ziplist -> hashtable encoding conversion} { + r config set hash-max-ziplist-entries 32 + for {set j 0} {$j < 100} {incr j} { + r del myhash + for {set i 0} {$i < 64} {incr i} { + r hset myhash [randomValue] [randomValue] + } + assert_encoding hashtable myhash + } + } + + # The following test can only be executed if we don't use Valgrind, and if + # we are using x86_64 architecture, because: + # + # 1) Valgrind has floating point limitations, no support for 80 bits math. + # 2) Other archs may have the same limits. + # + # 1.23 cannot be represented correctly with 64 bit doubles, so we skip + # the test, since we are only testing pretty printing here and is not + # a bug if the program outputs things like 1.299999... + if {!$::valgrind && [string match *x86_64* [exec uname -a]]} { + test {Test HINCRBYFLOAT for correct float representation (issue #2846)} { + r del myhash + assert {[r hincrbyfloat myhash float 1.23] eq {1.23}} + assert {[r hincrbyfloat myhash float 0.77] eq {2}} + assert {[r hincrbyfloat myhash float -0.1] eq {1.9}} + } + } + + test {Hash ziplist of various encodings} { + r del k + config_set hash-max-ziplist-entries 1000000000 + config_set hash-max-ziplist-value 1000000000 + r hset k ZIP_INT_8B 127 + r hset k ZIP_INT_16B 32767 + r hset k ZIP_INT_32B 2147483647 + r hset k ZIP_INT_64B 9223372036854775808 + r hset k ZIP_INT_IMM_MIN 0 + r hset k ZIP_INT_IMM_MAX 12 + r hset k ZIP_STR_06B [string repeat x 31] + r hset k ZIP_STR_14B [string repeat x 8191] + r hset k ZIP_STR_32B [string repeat x 65535] + set k [r hgetall k] + set dump [r dump k] + + # will be converted to dict at RESTORE + config_set hash-max-ziplist-entries 2 + config_set sanitize-dump-payload no mayfail + r restore kk 0 $dump + set kk [r hgetall kk] + + # make sure the values are right + assert_equal [lsort $k] [lsort $kk] + assert_equal [dict get $k ZIP_STR_06B] [string repeat x 31] + set k [dict remove $k ZIP_STR_06B] + assert_equal [dict get $k ZIP_STR_14B] [string repeat x 8191] + set k [dict remove $k ZIP_STR_14B] + assert_equal [dict get $k ZIP_STR_32B] [string repeat x 65535] + set k [dict remove $k ZIP_STR_32B] + set _ $k + } {ZIP_INT_8B 127 ZIP_INT_16B 32767 ZIP_INT_32B 2147483647 ZIP_INT_64B 9223372036854775808 ZIP_INT_IMM_MIN 0 ZIP_INT_IMM_MAX 12} + + test {Hash ziplist of various encodings - sanitize dump} { + config_set sanitize-dump-payload yes mayfail + r restore kk 0 $dump replace + set k [r hgetall k] + set kk [r hgetall kk] + + # make sure the values are right + assert_equal [lsort $k] [lsort $kk] + assert_equal [dict get $k ZIP_STR_06B] [string repeat x 31] + set k [dict remove $k ZIP_STR_06B] + assert_equal [dict get $k ZIP_STR_14B] [string repeat x 8191] + set k [dict remove $k ZIP_STR_14B] + assert_equal [dict get $k ZIP_STR_32B] [string repeat x 65535] + set k [dict remove $k ZIP_STR_32B] + set _ $k + } {ZIP_INT_8B 127 ZIP_INT_16B 32767 ZIP_INT_32B 2147483647 ZIP_INT_64B 9223372036854775808 ZIP_INT_IMM_MIN 0 ZIP_INT_IMM_MAX 12} + + # On some platforms strtold("+inf") with valgrind returns a non-inf result + if {!$::valgrind} { + test {HINCRBYFLOAT does not allow NaN or Infinity} { + assert_error "*value is NaN or Infinity*" {r hincrbyfloat hfoo field +inf} + assert_equal 0 [r exists hfoo] + } + } +} \ No newline at end of file diff --git a/tests/unit/type/list-2.tcl b/tests/unit/type/list-2.tcl new file mode 100644 index 000000000..b54bdc85a --- /dev/null +++ b/tests/unit/type/list-2.tcl @@ -0,0 +1,47 @@ +start_server { + tags {"list"} + overrides { + "list-max-ziplist-size" 4 + } +} { + source "tests/unit/type/list-common.tcl" + + foreach {type large} [array get largevalue] { + tags {"slow"} { + test "LTRIM stress testing - $type" { + set mylist {} + set startlen 32 + r del mylist + + # Start with the large value to ensure the + # right encoding is used. + r rpush mylist $large + lappend mylist $large + + for {set i 0} {$i < $startlen} {incr i} { + set str [randomInt 9223372036854775807] + r rpush mylist $str + lappend mylist $str + } + + for {set i 0} {$i < 1000} {incr i} { + set min [expr {int(rand()*$startlen)}] + set max [expr {$min+int(rand()*$startlen)}] + set before_len [llength $mylist] + set before_len_r [r llen mylist] + assert_equal $before_len $before_len_r + set mylist [lrange $mylist $min $max] + r ltrim mylist $min $max + assert_equal $mylist [r lrange mylist 0 -1] "failed trim" + + for {set j [r llen mylist]} {$j < $startlen} {incr j} { + set str [randomInt 9223372036854775807] + r rpush mylist $str + lappend mylist $str + assert_equal $mylist [r lrange mylist 0 -1] "failed append match" + } + } + } + } + } +} \ No newline at end of file diff --git a/tests/unit/type/list-3.tcl b/tests/unit/type/list-3.tcl new file mode 100644 index 000000000..eba209f9f --- /dev/null +++ b/tests/unit/type/list-3.tcl @@ -0,0 +1,232 @@ +proc generate_cmd_on_list_key {key} { + set op [randomInt 7] + set small_signed_count [expr 5-[randomInt 10]] + if {[randomInt 2] == 0} { + set ele [randomInt 1000] + } else { + set ele [string repeat x [randomInt 10000]][randomInt 1000] + } + switch $op { + 0 {return "lpush $key $ele"} + 1 {return "rpush $key $ele"} + 2 {return "lpop $key"} + 3 {return "rpop $key"} + 4 { + return "lset $key $small_signed_count $ele" + } + 5 { + set otherele [randomInt 1000] + if {[randomInt 2] == 0} { + set where before + } else { + set where after + } + return "linsert $key $where $otherele $ele" + } + 6 { + set otherele "" + catch { + set index [randomInt [r llen $key]] + set otherele [r lindex $key $index] + } + return "lrem $key 1 $otherele" + } + } +} + +start_server { + tags {"list ziplist"} + overrides { + "list-max-ziplist-size" 16 + } +} { + test {Explicit regression for a list bug} { + set mylist {49376042582 {BkG2o\pIC]4YYJa9cJ4GWZalG[4tin;1D2whSkCOW`mX;SFXGyS8sedcff3fQI^tgPCC@^Nu1J6o]meM@Lko]t_jRyotK?tH[\EvWqS]b`o2OCtjg:?nUTwdjpcUm]y:pg5q24q7LlCOwQE^}} + r del l + r rpush l [lindex $mylist 0] + r rpush l [lindex $mylist 1] + assert_equal [r lindex l 0] [lindex $mylist 0] + assert_equal [r lindex l 1] [lindex $mylist 1] + } + + test {Regression for quicklist #3343 bug} { + r del mylist + r lpush mylist 401 + r lpush mylist 392 + r rpush mylist [string repeat x 5105]"799" + r lset mylist -1 [string repeat x 1014]"702" + r lpop mylist + r lset mylist -1 [string repeat x 4149]"852" + r linsert mylist before 401 [string repeat x 9927]"12" + r lrange mylist 0 -1 + r ping ; # It's enough if the server is still alive + } {PONG} + + test {Check compression with recompress} { + r del key + config_set list-compress-depth 1 + config_set list-max-ziplist-size 16 + r rpush key a + r rpush key [string repeat b 50000] + r rpush key c + r lset key 1 d + r rpop key + r rpush key [string repeat e 5000] + r linsert key before f 1 + r rpush key g + r ping + } + + test {Crash due to wrongly recompress after lrem} { + r del key + config_set list-compress-depth 2 + r lpush key a + r lpush key [string repeat a 5000] + r lpush key [string repeat b 5000] + r lpush key [string repeat c 5000] + r rpush key [string repeat x 10000]"969" + r rpush key b + r lrem key 1 a + r rpop key + r lrem key 1 [string repeat x 10000]"969" + r rpush key crash + r ping + } + + test {LINSERT correctly recompress full quicklistNode after inserting a element before it} { + r del key + config_set list-compress-depth 1 + r rpush key b + r rpush key c + r lset key -1 [string repeat x 8192]"969" + r lpush key a + r rpush key d + r linsert key before b f + r rpop key + r ping + } + + test {LINSERT correctly recompress full quicklistNode after inserting a element after it} { + r del key + config_set list-compress-depth 1 + r rpush key b + r rpush key c + r lset key 0 [string repeat x 8192]"969" + r lpush key a + r rpush key d + r linsert key after c f + r lpop key + r ping + } + +foreach comp {2 1 0} { + set cycles 1000 + if {$::accurate} { set cycles 10000 } + config_set list-compress-depth $comp + + test "Stress tester for #3343-alike bugs comp: $comp" { + r del key + set sent {} + for {set j 0} {$j < $cycles} {incr j} { + catch { + set cmd [generate_cmd_on_list_key key] + lappend sent $cmd + + # execute the command, we expect commands to fail on syntax errors + r {*}$cmd + } + } + + set print_commands false + set crash false + if {[catch {r ping}]} { + puts "Server crashed" + set print_commands true + set crash true + } + + if {!$::external} { + # check valgrind and asan report for invalid reads after execute + # command so that we have a report that is easier to reproduce + set valgrind_errors [find_valgrind_errors [srv 0 stderr] false] + set asan_errors [sanitizer_errors_from_file [srv 0 stderr]] + if {$valgrind_errors != "" || $asan_errors != ""} { + puts "valgrind or asan found an issue" + set print_commands true + } + } + + if {$print_commands} { + puts "violating commands:" + foreach cmd $sent { + puts $cmd + } + } + + assert_equal $crash false + } +} ;# foreach comp + + tags {slow} { + test {ziplist implementation: value encoding and backlink} { + if {$::accurate} {set iterations 100} else {set iterations 10} + for {set j 0} {$j < $iterations} {incr j} { + r del l + set l {} + for {set i 0} {$i < 200} {incr i} { + randpath { + set data [string repeat x [randomInt 100000]] + } { + set data [randomInt 65536] + } { + set data [randomInt 4294967296] + } { + set data [randomInt 18446744073709551616] + } { + set data -[randomInt 65536] + if {$data eq {-0}} {set data 0} + } { + set data -[randomInt 4294967296] + if {$data eq {-0}} {set data 0} + } { + set data -[randomInt 18446744073709551616] + if {$data eq {-0}} {set data 0} + } + lappend l $data + r rpush l $data + } + assert_equal [llength $l] [r llen l] + # Traverse backward + for {set i 199} {$i >= 0} {incr i -1} { + if {[lindex $l $i] ne [r lindex l $i]} { + assert_equal [lindex $l $i] [r lindex l $i] + } + } + } + } + + test {ziplist implementation: encoding stress testing} { + for {set j 0} {$j < 200} {incr j} { + r del l + set l {} + set len [randomInt 400] + for {set i 0} {$i < $len} {incr i} { + set rv [randomValue] + randpath { + lappend l $rv + r rpush l $rv + } { + set l [concat [list $rv] $l] + r lpush l $rv + } + } + assert_equal [llength $l] [r llen l] + for {set i 0} {$i < $len} {incr i} { + if {[lindex $l $i] ne [r lindex l $i]} { + assert_equal [lindex $l $i] [r lindex l $i] + } + } + } + } + } +} \ No newline at end of file diff --git a/tests/unit/type/list-common.tcl b/tests/unit/type/list-common.tcl new file mode 100644 index 000000000..ab45f0b31 --- /dev/null +++ b/tests/unit/type/list-common.tcl @@ -0,0 +1,5 @@ +# We need a value larger than list-max-ziplist-value to make sure +# the list has the right encoding when it is swapped in again. +array set largevalue {} +set largevalue(ziplist) "hello" +set largevalue(linkedlist) [string repeat "hello" 4] diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl new file mode 100644 index 000000000..68eea8f66 --- /dev/null +++ b/tests/unit/type/list.tcl @@ -0,0 +1,2431 @@ +# check functionality compression of plain and packed nodes +start_server [list overrides [list save ""] ] { + r config set list-compress-depth 2 + r config set list-max-ziplist-size 1 + + # 3 test to check compression with plain and packed nodes + # 1. using push + insert + # 2. using push + insert + trim + # 3. using push + insert + set + + foreach {container size} {packed 500 plain 8193} { + test "$container node check compression with insert and pop" { + r flushdb + r lpush list1 [string repeat a $size] + r lpush list1 [string repeat b $size] + r lpush list1 [string repeat c $size] + r lpush list1 [string repeat d $size] + r linsert list1 after [string repeat d $size] [string repeat e $size] + r linsert list1 after [string repeat d $size] [string repeat f $size] + r linsert list1 after [string repeat d $size] [string repeat g $size] + r linsert list1 after [string repeat d $size] [string repeat j $size] + assert_equal [r lpop list1] [string repeat d $size] + assert_equal [r lpop list1] [string repeat j $size] + assert_equal [r lpop list1] [string repeat g $size] + assert_equal [r lpop list1] [string repeat f $size] + assert_equal [r lpop list1] [string repeat e $size] + assert_equal [r lpop list1] [string repeat c $size] + assert_equal [r lpop list1] [string repeat b $size] + assert_equal [r lpop list1] [string repeat a $size] + }; + + test "$container node check compression combined with trim" { + r flushdb + r lpush list2 [string repeat a $size] + r linsert list2 after [string repeat a $size] [string repeat b $size] + r rpush list2 [string repeat c $size] + assert_equal [string repeat b $size] [r lindex list2 1] + r LTRIM list2 1 -1 + r llen list2 + } {2} + + test {LINSERT against non-list value error} { + r set k1 v1 + assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} {r linsert k1 after 0 0} + } + + test "$container node check compression with lset" { + r flushdb + r lpush list3 [string repeat a $size] + r LSET list3 0 [string repeat b $size] + assert_equal [string repeat b $size] [r lindex list3 0] + r lpush list3 [string repeat c $size] + r LSET list3 0 [string repeat d $size] + assert_equal [string repeat d $size] [r lindex list3 0] + } + } ;# foreach + + # revert config for external mode tests. + r config set list-compress-depth 0 +} + +# check functionality of plain nodes using low packed-threshold +start_server [list overrides [list save ""] ] { +foreach type {listpack quicklist} { + if {$type eq "listpack"} { + r config set list-max-listpack-size -2 + } else { + r config set list-max-listpack-size 1 + } + + # basic command check for plain nodes - "LPUSH & LPOP" + test {Test LPUSH and LPOP on plain nodes} { + r flushdb + r debug quicklist-packed-threshold 1b + r lpush lst 9 + r lpush lst xxxxxxxxxx + r lpush lst xxxxxxxxxx + assert_encoding $type lst + set s0 [s used_memory] + assert {$s0 > 10} + assert {[r llen lst] == 3} + set s0 [r rpop lst] + set s1 [r rpop lst] + assert {$s0 eq "9"} + assert {[r llen lst] == 1} + r lpop lst + assert {[string length $s1] == 10} + # check rdb + r lpush lst xxxxxxxxxx + r lpush lst bb + r debug reload + assert_equal [r rpop lst] "xxxxxxxxxx" + r debug quicklist-packed-threshold 0 + } {OK} {needs:debug} + + # basic command check for plain nodes - "LINDEX & LINSERT" + test {Test LINDEX and LINSERT on plain nodes} { + r flushdb + r debug quicklist-packed-threshold 1b + r lpush lst xxxxxxxxxxx + r lpush lst 9 + r lpush lst xxxxxxxxxxx + assert_encoding $type lst + r linsert lst before "9" "8" + assert {[r lindex lst 1] eq "8"} + r linsert lst BEFORE "9" "7" + r linsert lst BEFORE "9" "xxxxxxxxxxx" + assert {[r lindex lst 3] eq "xxxxxxxxxxx"} + r debug quicklist-packed-threshold 0 + } {OK} {needs:debug} + + # basic command check for plain nodes - "LTRIM" + test {Test LTRIM on plain nodes} { + r flushdb + r debug quicklist-packed-threshold 1b + r lpush lst1 9 + r lpush lst1 xxxxxxxxxxx + r lpush lst1 9 + assert_encoding $type lst1 + r LTRIM lst1 1 -1 + assert_equal [r llen lst1] 2 + r debug quicklist-packed-threshold 0 + } {OK} {needs:debug} + + # basic command check for plain nodes - "LREM" + test {Test LREM on plain nodes} { + r flushdb + r debug quicklist-packed-threshold 1b + r lpush lst one + r lpush lst xxxxxxxxxxx + assert_encoding $type lst + set s0 [s used_memory] + assert {$s0 > 10} + r lpush lst 9 + r LREM lst -2 "one" + assert_equal [r llen lst] 2 + r debug quicklist-packed-threshold 0 + } {OK} {needs:debug} + + # basic command check for plain nodes - "LPOS" + test {Test LPOS on plain nodes} { + r flushdb + r debug quicklist-packed-threshold 1b + r RPUSH lst "aa" + r RPUSH lst "bb" + r RPUSH lst "cc" + assert_encoding $type lst + r LSET lst 0 "xxxxxxxxxxx" + assert_equal [r LPOS lst "xxxxxxxxxxx"] 0 + r debug quicklist-packed-threshold 0 + } {OK} {needs:debug} + + # basic command check for plain nodes - "LMOVE" + test {Test LMOVE on plain nodes} { + r flushdb + r debug quicklist-packed-threshold 1b + r RPUSH lst2{t} "aa" + r RPUSH lst2{t} "bb" + assert_encoding $type lst2{t} + r LSET lst2{t} 0 xxxxxxxxxxx + r RPUSH lst2{t} "cc" + r RPUSH lst2{t} "dd" + r LMOVE lst2{t} lst{t} RIGHT LEFT + r LMOVE lst2{t} lst{t} LEFT RIGHT + assert_equal [r llen lst{t}] 2 + assert_equal [r llen lst2{t}] 2 + assert_equal [r lpop lst2{t}] "bb" + assert_equal [r lpop lst2{t}] "cc" + assert_equal [r lpop lst{t}] "dd" + assert_equal [r lpop lst{t}] "xxxxxxxxxxx" + r debug quicklist-packed-threshold 0 + } {OK} {needs:debug} + + # testing LSET with combinations of node types + # plain->packed , packed->plain, plain->plain, packed->packed + test {Test LSET with packed / plain combinations} { + r debug quicklist-packed-threshold 5b + r RPUSH lst "aa" + r RPUSH lst "bb" + assert_encoding $type lst + r lset lst 0 [string repeat d 50001] + set s1 [r lpop lst] + assert_equal $s1 [string repeat d 50001] + r RPUSH lst [string repeat f 50001] + r lset lst 0 [string repeat e 50001] + set s1 [r lpop lst] + assert_equal $s1 [string repeat e 50001] + r RPUSH lst [string repeat m 50001] + r lset lst 0 "bb" + set s1 [r lpop lst] + assert_equal $s1 "bb" + r RPUSH lst "bb" + r lset lst 0 "cc" + set s1 [r lpop lst] + assert_equal $s1 "cc" + r debug quicklist-packed-threshold 0 + } {OK} {needs:debug} + + # checking LSET in case ziplist needs to be split + test {Test LSET with packed is split in the middle} { + set original_config [config_get_set list-max-listpack-size 4] + r flushdb + r debug quicklist-packed-threshold 5b + r RPUSH lst "aa" + r RPUSH lst "bb" + r RPUSH lst "cc" + r RPUSH lst "dd" + r RPUSH lst "ee" + assert_encoding quicklist lst + r lset lst 2 [string repeat e 10] + assert_equal [r lpop lst] "aa" + assert_equal [r lpop lst] "bb" + assert_equal [r lpop lst] [string repeat e 10] + assert_equal [r lpop lst] "dd" + assert_equal [r lpop lst] "ee" + r debug quicklist-packed-threshold 0 + r config set list-max-listpack-size $original_config + } {OK} {needs:debug} + + + # repeating "plain check LSET with combinations" + # but now with single item in each ziplist + test {Test LSET with packed consist only one item} { + r flushdb + set original_config [config_get_set list-max-ziplist-size 1] + r debug quicklist-packed-threshold 1b + r RPUSH lst "aa" + r RPUSH lst "bb" + r lset lst 0 [string repeat d 50001] + set s1 [r lpop lst] + assert_equal $s1 [string repeat d 50001] + r RPUSH lst [string repeat f 50001] + r lset lst 0 [string repeat e 50001] + set s1 [r lpop lst] + assert_equal $s1 [string repeat e 50001] + r RPUSH lst [string repeat m 50001] + r lset lst 0 "bb" + set s1 [r lpop lst] + assert_equal $s1 "bb" + r RPUSH lst "bb" + r lset lst 0 "cc" + set s1 [r lpop lst] + assert_equal $s1 "cc" + r debug quicklist-packed-threshold 0 + r config set list-max-ziplist-size $original_config + } {OK} {needs:debug} + + test {Crash due to delete entry from a compress quicklist node} { + r flushdb + r debug quicklist-packed-threshold 100b + set original_config [config_get_set list-compress-depth 1] + + set small_ele [string repeat x 32] + set large_ele [string repeat x 100] + + # Push a large element + r RPUSH lst $large_ele + + # Insert two elements and keep them in the same node + r RPUSH lst $small_ele + r RPUSH lst $small_ele + assert_encoding $type lst + + # When setting the position of -1 to a large element, we first insert + # a large element at the end and then delete its previous element. + r LSET lst -1 $large_ele + assert_equal "$large_ele $small_ele $large_ele" [r LRANGE lst 0 -1] + + r debug quicklist-packed-threshold 0 + r config set list-compress-depth $original_config + } {OK} {needs:debug} + + test {Crash due to split quicklist node wrongly} { + r flushdb + r debug quicklist-packed-threshold 10b + + r LPUSH lst "aa" + r LPUSH lst "bb" + assert_encoding $type lst + r LSET lst -2 [string repeat x 10] + r RPOP lst + assert_equal [string repeat x 10] [r LRANGE lst 0 -1] + + r debug quicklist-packed-threshold 0 + } {OK} {needs:debug} +} +} + +run_solo {list-large-memory} { +start_server [list overrides [list save ""] ] { + +# test if the server supports such large configs (avoid 32 bit builds) +catch { + r config set proto-max-bulk-len 10000000000 ;#10gb + r config set client-query-buffer-limit 10000000000 ;#10gb +} +if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} { + + set str_length 5000000000 + + # repeating all the plain nodes basic checks with 5gb values + test {Test LPUSH and LPOP on plain nodes over 4GB} { + r flushdb + r lpush lst 9 + r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n" + write_big_bulk $str_length; + r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n" + write_big_bulk $str_length; + set s0 [s used_memory] + assert {$s0 > $str_length} + assert {[r llen lst] == 3} + assert_equal [r rpop lst] "9" + assert_equal [read_big_bulk {r rpop lst}] $str_length + assert {[r llen lst] == 1} + assert_equal [read_big_bulk {r rpop lst}] $str_length + } {} {large-memory} + + test {Test LINDEX and LINSERT on plain nodes over 4GB} { + r flushdb + r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n" + write_big_bulk $str_length; + r lpush lst 9 + r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n" + write_big_bulk $str_length; + r linsert lst before "9" "8" + assert_equal [r lindex lst 1] "8" + r LINSERT lst BEFORE "9" "7" + r write "*5\r\n\$7\r\nLINSERT\r\n\$3\r\nlst\r\n\$6\r\nBEFORE\r\n\$3\r\n\"9\"\r\n" + write_big_bulk 10; + assert_equal [read_big_bulk {r rpop lst}] $str_length + } {} {large-memory} + + test {Test LTRIM on plain nodes over 4GB} { + r flushdb + r lpush lst 9 + r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n" + write_big_bulk $str_length; + r lpush lst 9 + r LTRIM lst 1 -1 + assert_equal [r llen lst] 2 + assert_equal [r rpop lst] 9 + assert_equal [read_big_bulk {r rpop lst}] $str_length + } {} {large-memory} + + test {Test LREM on plain nodes over 4GB} { + r flushdb + r lpush lst one + r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n" + write_big_bulk $str_length; + r lpush lst 9 + r LREM lst -2 "one" + assert_equal [read_big_bulk {r rpop lst}] $str_length + r llen lst + } {1} {large-memory} + + test {Test LSET on plain nodes over 4GB} { + r flushdb + r RPUSH lst "aa" + r RPUSH lst "bb" + r RPUSH lst "cc" + r write "*4\r\n\$4\r\nLSET\r\n\$3\r\nlst\r\n\$1\r\n0\r\n" + write_big_bulk $str_length; + assert_equal [r rpop lst] "cc" + assert_equal [r rpop lst] "bb" + assert_equal [read_big_bulk {r rpop lst}] $str_length + } {} {large-memory} + + test {Test LSET on plain nodes with large elements under packed_threshold over 4GB} { + r flushdb + r rpush lst a b c d e + for {set i 0} {$i < 5} {incr i} { + r write "*4\r\n\$4\r\nlset\r\n\$3\r\nlst\r\n\$1\r\n$i\r\n" + write_big_bulk 1000000000 + } + r ping + } {PONG} {large-memory} + + test {Test LSET splits a quicklist node, and then merge} { + # Test when a quicklist node can't be inserted and is split, the split + # node merges with the node before it and the `before` node is kept. + r flushdb + r rpush lst [string repeat "x" 4096] + r lpush lst a b c d e f g + r lpush lst [string repeat "y" 4096] + # now: [y...] [g f e d c b a x...] + # (node0) (node1) + # Keep inserting elements into node1 until node1 is split into two + # nodes([g] [...]), eventually node0 will merge with the [g] node. + # Since node0 is larger, after the merge node0 will be kept and + # the [g] node will be deleted. + for {set i 7} {$i >= 3} {incr i -1} { + r write "*4\r\n\$4\r\nlset\r\n\$3\r\nlst\r\n\$1\r\n$i\r\n" + write_big_bulk 1000000000 + } + assert_equal "g" [r lindex lst 1] + r ping + } {PONG} {large-memory} + + test {Test LSET splits a LZF compressed quicklist node, and then merge} { + # Test when a LZF compressed quicklist node can't be inserted and is split, + # the split node merges with the node before it and the split node is kept. + r flushdb + r config set list-compress-depth 1 + r lpush lst [string repeat "x" 2000] + r rpush lst [string repeat "y" 7000] + r rpush lst a b c d e f g + r rpush lst [string repeat "z" 8000] + r lset lst 0 h + # now: [h] [y... a b c d e f g] [z...] + # node0 node1(LZF) + # Keep inserting elements into node1 until node1 is split into two + # nodes([y...] [...]), eventually node0 will merge with the [y...] node. + # Since [y...] node is larger, after the merge node0 will be deleted and + # the [y...] node will be kept. + for {set i 7} {$i >= 3} {incr i -1} { + r write "*4\r\n\$4\r\nlset\r\n\$3\r\nlst\r\n\$1\r\n$i\r\n" + write_big_bulk 1000000000 + } + assert_equal "h" [r lindex lst 0] + r config set list-compress-depth 0 + r ping + } {PONG} {large-memory} + + test {Test LMOVE on plain nodes over 4GB} { + r flushdb + r RPUSH lst2{t} "aa" + r RPUSH lst2{t} "bb" + r write "*4\r\n\$4\r\nLSET\r\n\$7\r\nlst2{t}\r\n\$1\r\n0\r\n" + write_big_bulk $str_length; + r RPUSH lst2{t} "cc" + r RPUSH lst2{t} "dd" + r LMOVE lst2{t} lst{t} RIGHT LEFT + assert_equal [read_big_bulk {r LMOVE lst2{t} lst{t} LEFT RIGHT}] $str_length + assert_equal [r llen lst{t}] 2 + assert_equal [r llen lst2{t}] 2 + assert_equal [r lpop lst2{t}] "bb" + assert_equal [r lpop lst2{t}] "cc" + assert_equal [r lpop lst{t}] "dd" + assert_equal [read_big_bulk {r rpop lst{t}}] $str_length + } {} {large-memory} + + # restore defaults + r config set proto-max-bulk-len 536870912 + r config set client-query-buffer-limit 1073741824 + +} ;# skip 32bit builds +} +} ;# run_solo + +start_server { + tags {"list"} + overrides { + "list-max-ziplist-size" -1 + } +} { + source "tests/unit/type/list-common.tcl" + + # A helper function to execute either B*POP or BLMPOP* with one input key. + proc bpop_command {rd pop key timeout} { + if {$pop == "BLMPOP_LEFT"} { + $rd blmpop $timeout 1 $key left count 1 + } elseif {$pop == "BLMPOP_RIGHT"} { + $rd blmpop $timeout 1 $key right count 1 + } else { + $rd $pop $key $timeout + } + } + + # A helper function to execute either B*POP or BLMPOP* with two input keys. + proc bpop_command_two_key {rd pop key key2 timeout} { + if {$pop == "BLMPOP_LEFT"} { + $rd blmpop $timeout 2 $key $key2 left count 1 + } elseif {$pop == "BLMPOP_RIGHT"} { + $rd blmpop $timeout 2 $key $key2 right count 1 + } else { + $rd $pop $key $key2 $timeout + } + } + + proc create_listpack {key entries} { + r del $key + foreach entry $entries { r rpush $key $entry } + assert_encoding listpack $key + } + + proc create_quicklist {key entries} { + r del $key + foreach entry $entries { r rpush $key $entry } + assert_encoding quicklist $key + } + +foreach {type large} [array get largevalue] { + test "LPOS basic usage - $type" { + r DEL mylist + r RPUSH mylist a b c $large 2 3 c c + assert {[r LPOS mylist a] == 0} + assert {[r LPOS mylist c] == 2} + } + + test {LPOS RANK (positive, negative and zero rank) option} { + assert {[r LPOS mylist c RANK 1] == 2} + assert {[r LPOS mylist c RANK 2] == 6} + assert {[r LPOS mylist c RANK 4] eq ""} + assert {[r LPOS mylist c RANK -1] == 7} + assert {[r LPOS mylist c RANK -2] == 6} + assert_error "*RANK can't be zero: use 1 to start from the first match, 2 from the second ... or use negative to start*" {r LPOS mylist c RANK 0} + assert_error "*value is out of range*" {r LPOS mylist c RANK -9223372036854775808} + } + + test {LPOS COUNT option} { + assert {[r LPOS mylist c COUNT 0] == {2 6 7}} + assert {[r LPOS mylist c COUNT 1] == {2}} + assert {[r LPOS mylist c COUNT 2] == {2 6}} + assert {[r LPOS mylist c COUNT 100] == {2 6 7}} + } + + test {LPOS COUNT + RANK option} { + assert {[r LPOS mylist c COUNT 0 RANK 2] == {6 7}} + assert {[r LPOS mylist c COUNT 2 RANK -1] == {7 6}} + } + + test {LPOS non existing key} { + assert {[r LPOS mylistxxx c COUNT 0 RANK 2] eq {}} + } + + test {LPOS no match} { + assert {[r LPOS mylist x COUNT 2 RANK -1] eq {}} + assert {[r LPOS mylist x RANK -1] eq {}} + } + + test {LPOS MAXLEN} { + assert {[r LPOS mylist a COUNT 0 MAXLEN 1] == {0}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 1] == {}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 3] == {2}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 3 RANK -1] == {7 6}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 7 RANK 2] == {6}} + } + + test {LPOS when RANK is greater than matches} { + r DEL mylist + r LPUSH mylist a + assert {[r LPOS mylist b COUNT 10 RANK 5] eq {}} + } + + test "LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - $type" { + # first lpush then rpush + r del mylist1 + assert_equal 1 [r lpush mylist1 $large] + assert_encoding $type mylist1 + assert_equal 2 [r rpush mylist1 b] + assert_equal 3 [r rpush mylist1 c] + assert_equal 3 [r llen mylist1] + assert_equal $large [r lindex mylist1 0] + assert_equal b [r lindex mylist1 1] + assert_equal c [r lindex mylist1 2] + assert_equal {} [r lindex mylist1 3] + assert_equal c [r rpop mylist1] + assert_equal $large [r lpop mylist1] + + # first rpush then lpush + r del mylist2 + assert_equal 1 [r rpush mylist2 $large] + assert_equal 2 [r lpush mylist2 b] + assert_equal 3 [r lpush mylist2 c] + assert_encoding $type mylist2 + assert_equal 3 [r llen mylist2] + assert_equal c [r lindex mylist2 0] + assert_equal b [r lindex mylist2 1] + assert_equal $large [r lindex mylist2 2] + assert_equal {} [r lindex mylist2 3] + assert_equal $large [r rpop mylist2] + assert_equal c [r lpop mylist2] + } + + test "LPOP/RPOP with wrong number of arguments" { + assert_error {*wrong number of arguments for 'lpop' command} {r lpop key 1 1} + assert_error {*wrong number of arguments for 'rpop' command} {r rpop key 2 2} + } + + test "RPOP/LPOP with the optional count argument - $type" { + assert_equal 7 [r lpush listcount aa $large cc dd ee ff gg] + assert_equal {gg} [r lpop listcount 1] + assert_equal {ff ee} [r lpop listcount 2] + assert_equal "aa $large" [r rpop listcount 2] + assert_equal {cc} [r rpop listcount 1] + assert_equal {dd} [r rpop listcount 123] + assert_error "*ERR*range*" {r lpop forbarqaz -123} + } +} + + proc verify_resp_response {resp response resp2_response resp3_response} { + if {$resp == 2} { + assert_equal $response $resp2_response + } elseif {$resp == 3} { + assert_equal $response $resp3_response + } + } + + foreach resp {3 2} { + if {[lsearch $::denytags "resp3"] >= 0} { + if {$resp == 3} {continue} + } elseif {$::force_resp3} { + if {$resp == 2} {continue} + } + r hello $resp + + # Make sure we can distinguish between an empty array and a null response + r readraw 1 + + test "LPOP/RPOP with the count 0 returns an empty array in RESP$resp" { + r lpush listcount zero + assert_equal {*0} [r lpop listcount 0] + assert_equal {*0} [r rpop listcount 0] + } + + test "LPOP/RPOP against non existing key in RESP$resp" { + r del non_existing_key + + verify_resp_response $resp [r lpop non_existing_key] {$-1} {_} + verify_resp_response $resp [r rpop non_existing_key] {$-1} {_} + } + + test "LPOP/RPOP with against non existing key in RESP$resp" { + r del non_existing_key + + verify_resp_response $resp [r lpop non_existing_key 0] {*-1} {_} + verify_resp_response $resp [r lpop non_existing_key 1] {*-1} {_} + + verify_resp_response $resp [r rpop non_existing_key 0] {*-1} {_} + verify_resp_response $resp [r rpop non_existing_key 1] {*-1} {_} + } + + r readraw 0 + r hello 2 + } + + test {Variadic RPUSH/LPUSH} { + r del mylist + assert_equal 4 [r lpush mylist a b c d] + assert_equal 8 [r rpush mylist 0 1 2 3] + assert_equal {d c b a 0 1 2 3} [r lrange mylist 0 -1] + } + + test {DEL a list} { + assert_equal 1 [r del mylist2] + assert_equal 0 [r exists mylist2] + assert_equal 0 [r llen mylist2] + } + + foreach {type large} [array get largevalue] { + foreach {pop} {BLPOP BLMPOP_LEFT} { + test "$pop: single existing list - $type" { + set rd [redis_deferring_client] + create_$type blist "a b $large c d" + + bpop_command $rd $pop blist 1 + assert_equal {blist a} [$rd read] + if {$pop == "BLPOP"} { + bpop_command $rd BRPOP blist 1 + } else { + bpop_command $rd BLMPOP_RIGHT blist 1 + } + assert_equal {blist d} [$rd read] + + bpop_command $rd $pop blist 1 + assert_equal {blist b} [$rd read] + if {$pop == "BLPOP"} { + bpop_command $rd BRPOP blist 1 + } else { + bpop_command $rd BLMPOP_RIGHT blist 1 + } + assert_equal {blist c} [$rd read] + + assert_equal 1 [r llen blist] + $rd close + } + + test "$pop: multiple existing lists - $type" { + set rd [redis_deferring_client] + create_$type blist1{t} "a $large c" + create_$type blist2{t} "d $large f" + + bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 + assert_equal {blist1{t} a} [$rd read] + if {$pop == "BLPOP"} { + bpop_command_two_key $rd BRPOP blist1{t} blist2{t} 1 + } else { + bpop_command_two_key $rd BLMPOP_RIGHT blist1{t} blist2{t} 1 + } + assert_equal {blist1{t} c} [$rd read] + assert_equal 1 [r llen blist1{t}] + assert_equal 3 [r llen blist2{t}] + + bpop_command_two_key $rd $pop blist2{t} blist1{t} 1 + assert_equal {blist2{t} d} [$rd read] + if {$pop == "BLPOP"} { + bpop_command_two_key $rd BRPOP blist2{t} blist1{t} 1 + } else { + bpop_command_two_key $rd BLMPOP_RIGHT blist2{t} blist1{t} 1 + } + assert_equal {blist2{t} f} [$rd read] + assert_equal 1 [r llen blist1{t}] + assert_equal 1 [r llen blist2{t}] + $rd close + } + + test "$pop: second list has an entry - $type" { + set rd [redis_deferring_client] + r del blist1{t} + create_$type blist2{t} "d $large f" + + bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 + assert_equal {blist2{t} d} [$rd read] + if {$pop == "BLPOP"} { + bpop_command_two_key $rd BRPOP blist1{t} blist2{t} 1 + } else { + bpop_command_two_key $rd BLMPOP_RIGHT blist1{t} blist2{t} 1 + } + assert_equal {blist2{t} f} [$rd read] + assert_equal 0 [r llen blist1{t}] + assert_equal 1 [r llen blist2{t}] + $rd close + } + } + + test "BRPOPLPUSH - $type" { + r del target{t} + r rpush target{t} bar + + set rd [redis_deferring_client] + create_$type blist{t} "a b $large c d" + + $rd brpoplpush blist{t} target{t} 1 + assert_equal d [$rd read] + + assert_equal d [r lpop target{t}] + assert_equal "a b $large c" [r lrange blist{t} 0 -1] + $rd close + } + + foreach wherefrom {left right} { + foreach whereto {left right} { + test "BLMOVE $wherefrom $whereto - $type" { + r del target{t} + r rpush target{t} bar + + set rd [redis_deferring_client] + create_$type blist{t} "a b $large c d" + + $rd blmove blist{t} target{t} $wherefrom $whereto 1 + set poppedelement [$rd read] + + if {$wherefrom eq "right"} { + assert_equal d $poppedelement + assert_equal "a b $large c" [r lrange blist{t} 0 -1] + } else { + assert_equal a $poppedelement + assert_equal "b $large c d" [r lrange blist{t} 0 -1] + } + + if {$whereto eq "right"} { + assert_equal $poppedelement [r rpop target{t}] + } else { + assert_equal $poppedelement [r lpop target{t}] + } + $rd close + } + } + } + } + +foreach {pop} {BLPOP BLMPOP_LEFT} { + test "$pop, LPUSH + DEL should not awake blocked client" { + set rd [redis_deferring_client] + r del list + + bpop_command $rd $pop list 0 + wait_for_blocked_client + + r multi + r lpush list a + r del list + r exec + r del list + r lpush list b + assert_equal {list b} [$rd read] + $rd close + } + + test "$pop, LPUSH + DEL + SET should not awake blocked client" { + set rd [redis_deferring_client] + r del list + + bpop_command $rd $pop list 0 + wait_for_blocked_client + + r multi + r lpush list a + r del list + r set list foo + r exec + r del list + r lpush list b + assert_equal {list b} [$rd read] + $rd close + } +} + + test "BLPOP with same key multiple times should work (issue #801)" { + set rd [redis_deferring_client] + r del list1{t} list2{t} + + # Data arriving after the BLPOP. + $rd blpop list1{t} list2{t} list2{t} list1{t} 0 + wait_for_blocked_client + r lpush list1{t} a + assert_equal [$rd read] {list1{t} a} + $rd blpop list1{t} list2{t} list2{t} list1{t} 0 + wait_for_blocked_client + r lpush list2{t} b + assert_equal [$rd read] {list2{t} b} + + # Data already there. + r lpush list1{t} a + r lpush list2{t} b + $rd blpop list1{t} list2{t} list2{t} list1{t} 0 + assert_equal [$rd read] {list1{t} a} + $rd blpop list1{t} list2{t} list2{t} list1{t} 0 + assert_equal [$rd read] {list2{t} b} + $rd close + } + +foreach {pop} {BLPOP BLMPOP_LEFT} { + test "MULTI/EXEC is isolated from the point of view of $pop" { + set rd [redis_deferring_client] + r del list + + bpop_command $rd $pop list 0 + wait_for_blocked_client + + r multi + r lpush list a + r lpush list b + r lpush list c + r exec + assert_equal {list c} [$rd read] + $rd close + } + + test "$pop with variadic LPUSH" { + set rd [redis_deferring_client] + r del blist + bpop_command $rd $pop blist 0 + wait_for_blocked_client + assert_equal 2 [r lpush blist foo bar] + assert_equal {blist bar} [$rd read] + assert_equal foo [lindex [r lrange blist 0 -1] 0] + $rd close + } +} + + test "BRPOPLPUSH with zero timeout should block indefinitely" { + set rd [redis_deferring_client] + r del blist{t} target{t} + r rpush target{t} bar + $rd brpoplpush blist{t} target{t} 0 + wait_for_blocked_clients_count 1 + r rpush blist{t} foo + assert_equal foo [$rd read] + assert_equal {foo bar} [r lrange target{t} 0 -1] + $rd close + } + + foreach wherefrom {left right} { + foreach whereto {left right} { + test "BLMOVE $wherefrom $whereto with zero timeout should block indefinitely" { + set rd [redis_deferring_client] + r del blist{t} target{t} + r rpush target{t} bar + $rd blmove blist{t} target{t} $wherefrom $whereto 0 + wait_for_blocked_clients_count 1 + r rpush blist{t} foo + assert_equal foo [$rd read] + if {$whereto eq "right"} { + assert_equal {bar foo} [r lrange target{t} 0 -1] + } else { + assert_equal {foo bar} [r lrange target{t} 0 -1] + } + $rd close + } + } + } + + foreach wherefrom {left right} { + foreach whereto {left right} { + test "BLMOVE ($wherefrom, $whereto) with a client BLPOPing the target list" { + set rd [redis_deferring_client] + set rd2 [redis_deferring_client] + r del blist{t} target{t} + $rd2 blpop target{t} 0 + wait_for_blocked_clients_count 1 + $rd blmove blist{t} target{t} $wherefrom $whereto 0 + wait_for_blocked_clients_count 2 + r rpush blist{t} foo + assert_equal foo [$rd read] + assert_equal {target{t} foo} [$rd2 read] + assert_equal 0 [r exists target{t}] + $rd close + $rd2 close + } + } + } + + test "BRPOPLPUSH with wrong source type" { + set rd [redis_deferring_client] + r del blist{t} target{t} + r set blist{t} nolist + $rd brpoplpush blist{t} target{t} 1 + assert_error "WRONGTYPE*" {$rd read} + $rd close + } + + test "BRPOPLPUSH with wrong destination type" { + set rd [redis_deferring_client] + r del blist{t} target{t} + r set target{t} nolist + r lpush blist{t} foo + $rd brpoplpush blist{t} target{t} 1 + assert_error "WRONGTYPE*" {$rd read} + $rd close + + set rd [redis_deferring_client] + r del blist{t} target{t} + r set target{t} nolist + $rd brpoplpush blist{t} target{t} 0 + wait_for_blocked_clients_count 1 + r rpush blist{t} foo + assert_error "WRONGTYPE*" {$rd read} + assert_equal {foo} [r lrange blist{t} 0 -1] + $rd close + } + + test "BRPOPLPUSH maintains order of elements after failure" { + set rd [redis_deferring_client] + r del blist{t} target{t} + r set target{t} nolist + $rd brpoplpush blist{t} target{t} 0 + wait_for_blocked_client + r rpush blist{t} a b c + assert_error "WRONGTYPE*" {$rd read} + $rd close + r lrange blist{t} 0 -1 + } {a b c} + + test "BRPOPLPUSH with multiple blocked clients" { + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + r del blist{t} target1{t} target2{t} + r set target1{t} nolist + $rd1 brpoplpush blist{t} target1{t} 0 + wait_for_blocked_clients_count 1 + $rd2 brpoplpush blist{t} target2{t} 0 + wait_for_blocked_clients_count 2 + r lpush blist{t} foo + + assert_error "WRONGTYPE*" {$rd1 read} + assert_equal {foo} [$rd2 read] + assert_equal {foo} [r lrange target2{t} 0 -1] + $rd1 close + $rd2 close + } + + test "BLMPOP with multiple blocked clients" { + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + set rd3 [redis_deferring_client] + set rd4 [redis_deferring_client] + r del blist{t} blist2{t} + + $rd1 blmpop 0 2 blist{t} blist2{t} left count 1 + wait_for_blocked_clients_count 1 + $rd2 blmpop 0 2 blist{t} blist2{t} right count 10 + wait_for_blocked_clients_count 2 + $rd3 blmpop 0 2 blist{t} blist2{t} left count 10 + wait_for_blocked_clients_count 3 + $rd4 blmpop 0 2 blist{t} blist2{t} right count 1 + wait_for_blocked_clients_count 4 + + r multi + r lpush blist{t} a b c d e + r lpush blist2{t} 1 2 3 4 5 + r exec + + assert_equal {blist{t} e} [$rd1 read] + assert_equal {blist{t} {a b c d}} [$rd2 read] + assert_equal {blist2{t} {5 4 3 2 1}} [$rd3 read] + + r lpush blist2{t} 1 2 3 + assert_equal {blist2{t} 1} [$rd4 read] + $rd1 close + $rd2 close + $rd3 close + $rd4 close + } + + test "Linked LMOVEs" { + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + + r del list1{t} list2{t} list3{t} + + $rd1 blmove list1{t} list2{t} right left 0 + wait_for_blocked_clients_count 1 + $rd2 blmove list2{t} list3{t} left right 0 + wait_for_blocked_clients_count 2 + + r rpush list1{t} foo + + assert_equal {} [r lrange list1{t} 0 -1] + assert_equal {} [r lrange list2{t} 0 -1] + assert_equal {foo} [r lrange list3{t} 0 -1] + $rd1 close + $rd2 close + } + + test "Circular BRPOPLPUSH" { + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + + r del list1{t} list2{t} + + $rd1 brpoplpush list1{t} list2{t} 0 + wait_for_blocked_clients_count 1 + $rd2 brpoplpush list2{t} list1{t} 0 + wait_for_blocked_clients_count 2 + + r rpush list1{t} foo + + assert_equal {foo} [r lrange list1{t} 0 -1] + assert_equal {} [r lrange list2{t} 0 -1] + $rd1 close + $rd2 close + } + + test "Self-referential BRPOPLPUSH" { + set rd [redis_deferring_client] + + r del blist{t} + + $rd brpoplpush blist{t} blist{t} 0 + wait_for_blocked_client + + r rpush blist{t} foo + + assert_equal {foo} [r lrange blist{t} 0 -1] + $rd close + } + + test "BRPOPLPUSH inside a transaction" { + r del xlist{t} target{t} + r lpush xlist{t} foo + r lpush xlist{t} bar + + r multi + r brpoplpush xlist{t} target{t} 0 + r brpoplpush xlist{t} target{t} 0 + r brpoplpush xlist{t} target{t} 0 + r lrange xlist{t} 0 -1 + r lrange target{t} 0 -1 + r exec + } {foo bar {} {} {bar foo}} + + test "PUSH resulting from BRPOPLPUSH affect WATCH" { + set blocked_client [redis_deferring_client] + set watching_client [redis_deferring_client] + r del srclist{t} dstlist{t} somekey{t} + r set somekey{t} somevalue + $blocked_client brpoplpush srclist{t} dstlist{t} 0 + wait_for_blocked_client + $watching_client watch dstlist{t} + $watching_client read + $watching_client multi + $watching_client read + $watching_client get somekey{t} + $watching_client read + r lpush srclist{t} element + $watching_client exec + set res [$watching_client read] + $blocked_client close + $watching_client close + set _ $res + } {} + + test "BRPOPLPUSH does not affect WATCH while still blocked" { + set blocked_client [redis_deferring_client] + set watching_client [redis_deferring_client] + r del srclist{t} dstlist{t} somekey{t} + r set somekey{t} somevalue + $blocked_client brpoplpush srclist{t} dstlist{t} 0 + wait_for_blocked_client + $watching_client watch dstlist{t} + $watching_client read + $watching_client multi + $watching_client read + $watching_client get somekey{t} + $watching_client read + $watching_client exec + # Blocked BLPOPLPUSH may create problems, unblock it. + r lpush srclist{t} element + set res [$watching_client read] + $blocked_client close + $watching_client close + set _ $res + } {somevalue} + + test {BRPOPLPUSH timeout} { + set rd [redis_deferring_client] + + $rd brpoplpush foo_list{t} bar_list{t} 1 + wait_for_blocked_clients_count 1 + wait_for_blocked_clients_count 0 500 10 + set res [$rd read] + $rd close + set _ $res + } {} + + test {SWAPDB awakes blocked client} { + r flushall + r select 1 + r rpush k hello + r select 9 + set rd [redis_deferring_client] + $rd brpop k 5 + wait_for_blocked_clients_count 1 + r swapdb 1 9 + $rd read + } {k hello} {singledb:skip} + + test {SWAPDB wants to wake blocked client, but the key already expired} { + set repl [attach_to_replication_stream] + r flushall + r debug set-active-expire 0 + r select 1 + r rpush k hello + r pexpire k 100 + set rd [redis_deferring_client] + $rd deferred 0 + $rd select 9 + set id [$rd client id] + $rd deferred 1 + $rd brpop k 1 + wait_for_blocked_clients_count 1 + after 101 + r swapdb 1 9 + # The SWAPDB command tries to awake the blocked client, but it remains + # blocked because the key is expired. Check that the deferred client is + # still blocked. Then unblock it. + assert_match "*flags=b*" [r client list id $id] + r client unblock $id + assert_equal {} [$rd read] + $rd deferred 0 + # We want to force key deletion to be propagated to the replica + # in order to verify it was expired on the replication stream. + $rd set somekey1 someval1 + $rd exists k + r set somekey2 someval2 + + assert_replication_stream $repl { + {select *} + {flushall} + {select 1} + {rpush k hello} + {pexpireat k *} + {swapdb 1 9} + {select 9} + {set somekey1 someval1} + {del k} + {select 1} + {set somekey2 someval2} + } + close_replication_stream $repl + r debug set-active-expire 1 + # Restore server and client state + r select 9 + } {OK} {singledb:skip needs:debug} + + test {MULTI + LPUSH + EXPIRE + DEBUG SLEEP on blocked client, key already expired} { + set repl [attach_to_replication_stream] + r flushall + r debug set-active-expire 0 + + set rd [redis_deferring_client] + $rd client id + set id [$rd read] + $rd brpop k 0 + wait_for_blocked_clients_count 1 + + r multi + r rpush k hello + r pexpire k 100 + r debug sleep 0.2 + r exec + + # The EXEC command tries to awake the blocked client, but it remains + # blocked because the key is expired. Check that the deferred client is + # still blocked. Then unblock it. + assert_match "*flags=b*" [r client list id $id] + r client unblock $id + assert_equal {} [$rd read] + # We want to force key deletion to be propagated to the replica + # in order to verify it was expired on the replication stream. + $rd exists k + assert_equal {0} [$rd read] + assert_replication_stream $repl { + {select *} + {flushall} + {multi} + {rpush k hello} + {pexpireat k *} + {exec} + {del k} + } + close_replication_stream $repl + # Restore server and client state + r debug set-active-expire 1 + r select 9 + } {OK} {singledb:skip needs:debug} + + test {BLPOP unblock but the key is expired and then block again - reprocessing command} { + r flushall + r debug set-active-expire 0 + set rd [redis_deferring_client] + + set start [clock milliseconds] + $rd blpop mylist 1 + wait_for_blocked_clients_count 1 + + # The exec will try to awake the blocked client, but the key is expired, + # so the client will be blocked again during the command reprocessing. + r multi + r rpush mylist a + r pexpire mylist 100 + r debug sleep 0.2 + r exec + + assert_equal {} [$rd read] + set end [clock milliseconds] + + # Before the fix in #13004, this time would have been 1200+ (i.e. more than 1200ms), + # now it should be 1000, but in order to avoid timing issues, we increase the range a bit. + assert_range [expr $end-$start] 1000 1150 + + r debug set-active-expire 1 + $rd close + } {0} {needs:debug} + +foreach {pop} {BLPOP BLMPOP_LEFT} { + test "$pop when new key is moved into place" { + set rd [redis_deferring_client] + r del foo{t} + + bpop_command $rd $pop foo{t} 0 + wait_for_blocked_client + r lpush bob{t} abc def hij + r rename bob{t} foo{t} + set res [$rd read] + $rd close + set _ $res + } {foo{t} hij} + + test "$pop when result key is created by SORT..STORE" { + set rd [redis_deferring_client] + + # zero out list from previous test without explicit delete + r lpop foo{t} + r lpop foo{t} + r lpop foo{t} + + bpop_command $rd $pop foo{t} 5 + wait_for_blocked_client + r lpush notfoo{t} hello hola aguacate konichiwa zanzibar + r sort notfoo{t} ALPHA store foo{t} + set res [$rd read] + $rd close + set _ $res + } {foo{t} aguacate} +} + + test "BLPOP: timeout value out of range" { + # Timeout is parsed as float and multiplied by 1000, added mstime() + # and stored in long-long which might lead to out-of-range value. + # (Even though given timeout is smaller than LLONG_MAX, the result + # will be bigger) + assert_error "ERR *is out of range*" {r BLPOP blist1 0x7FFFFFFFFFFFFF} + } + + foreach {pop} {BLPOP BRPOP BLMPOP_LEFT BLMPOP_RIGHT} { + test "$pop: with single empty list argument" { + set rd [redis_deferring_client] + r del blist1 + bpop_command $rd $pop blist1 1 + wait_for_blocked_client + r rpush blist1 foo + assert_equal {blist1 foo} [$rd read] + assert_equal 0 [r exists blist1] + $rd close + } + + test "$pop: with negative timeout" { + set rd [redis_deferring_client] + bpop_command $rd $pop blist1 -1 + assert_error "ERR *is negative*" {$rd read} + $rd close + } + + test "$pop: with non-integer timeout" { + set rd [redis_deferring_client] + r del blist1 + bpop_command $rd $pop blist1 0.1 + r rpush blist1 foo + assert_equal {blist1 foo} [$rd read] + assert_equal 0 [r exists blist1] + $rd close + } + + test "$pop: with zero timeout should block indefinitely" { + # To test this, use a timeout of 0 and wait a second. + # The blocking pop should still be waiting for a push. + set rd [redis_deferring_client] + bpop_command $rd $pop blist1 0 + wait_for_blocked_client + r rpush blist1 foo + assert_equal {blist1 foo} [$rd read] + $rd close + } + + test "$pop: with 0.001 timeout should not block indefinitely" { + # Use a timeout of 0.001 and wait for the number of blocked clients to equal 0. + # Validate the empty read from the deferring client. + set rd [redis_deferring_client] + bpop_command $rd $pop blist1 0.001 + wait_for_blocked_clients_count 0 + assert_equal {} [$rd read] + $rd close + } + + test "$pop: second argument is not a list" { + set rd [redis_deferring_client] + r del blist1{t} blist2{t} + r set blist2{t} nolist{t} + bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 + assert_error "WRONGTYPE*" {$rd read} + $rd close + } + + test "$pop: timeout" { + set rd [redis_deferring_client] + r del blist1{t} blist2{t} + bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 + wait_for_blocked_client + assert_equal {} [$rd read] + $rd close + } + + test "$pop: arguments are empty" { + set rd [redis_deferring_client] + r del blist1{t} blist2{t} + + bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 + wait_for_blocked_client + r rpush blist1{t} foo + assert_equal {blist1{t} foo} [$rd read] + assert_equal 0 [r exists blist1{t}] + assert_equal 0 [r exists blist2{t}] + + bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 + wait_for_blocked_client + r rpush blist2{t} foo + assert_equal {blist2{t} foo} [$rd read] + assert_equal 0 [r exists blist1{t}] + assert_equal 0 [r exists blist2{t}] + $rd close + } + } + +foreach {pop} {BLPOP BLMPOP_LEFT} { + test "$pop inside a transaction" { + r del xlist + r lpush xlist foo + r lpush xlist bar + r multi + + bpop_command r $pop xlist 0 + bpop_command r $pop xlist 0 + bpop_command r $pop xlist 0 + r exec + } {{xlist bar} {xlist foo} {}} +} + + test {BLMPOP propagate as pop with count command to replica} { + set rd [redis_deferring_client] + set repl [attach_to_replication_stream] + + # BLMPOP without being blocked. + r lpush mylist{t} a b c + r rpush mylist2{t} 1 2 3 + r blmpop 0 1 mylist{t} left count 1 + r blmpop 0 2 mylist{t} mylist2{t} right count 10 + r blmpop 0 2 mylist{t} mylist2{t} right count 10 + + # BLMPOP that gets blocked. + $rd blmpop 0 1 mylist{t} left count 1 + wait_for_blocked_client + r lpush mylist{t} a + $rd blmpop 0 2 mylist{t} mylist2{t} left count 5 + wait_for_blocked_client + r lpush mylist{t} a b c + $rd blmpop 0 2 mylist{t} mylist2{t} right count 10 + wait_for_blocked_client + r rpush mylist2{t} a b c + + # Released on timeout. + assert_equal {} [r blmpop 0.01 1 mylist{t} left count 10] + r set foo{t} bar ;# something else to propagate after, so we can make sure the above pop didn't. + + $rd close + + assert_replication_stream $repl { + {select *} + {lpush mylist{t} a b c} + {rpush mylist2{t} 1 2 3} + {lpop mylist{t} 1} + {rpop mylist{t} 2} + {rpop mylist2{t} 3} + {lpush mylist{t} a} + {lpop mylist{t} 1} + {lpush mylist{t} a b c} + {lpop mylist{t} 3} + {rpush mylist2{t} a b c} + {rpop mylist2{t} 3} + {set foo{t} bar} + } + close_replication_stream $repl + } {} {needs:repl} + + test {LPUSHX, RPUSHX - generic} { + r del xlist + assert_equal 0 [r lpushx xlist a] + assert_equal 0 [r llen xlist] + assert_equal 0 [r rpushx xlist a] + assert_equal 0 [r llen xlist] + } + + foreach {type large} [array get largevalue] { + test "LPUSHX, RPUSHX - $type" { + create_$type xlist "$large c" + assert_equal 3 [r rpushx xlist d] + assert_equal 4 [r lpushx xlist a] + assert_equal 6 [r rpushx xlist 42 x] + assert_equal 9 [r lpushx xlist y3 y2 y1] + assert_equal "y1 y2 y3 a $large c d 42 x" [r lrange xlist 0 -1] + } + + test "LINSERT - $type" { + create_$type xlist "a $large c d" + assert_equal 5 [r linsert xlist before c zz] "before c" + assert_equal "a $large zz c d" [r lrange xlist 0 10] "lrangeA" + assert_equal 6 [r linsert xlist after c yy] "after c" + assert_equal "a $large zz c yy d" [r lrange xlist 0 10] "lrangeB" + assert_equal 7 [r linsert xlist after d dd] "after d" + assert_equal -1 [r linsert xlist after bad ddd] "after bad" + assert_equal "a $large zz c yy d dd" [r lrange xlist 0 10] "lrangeC" + assert_equal 8 [r linsert xlist before a aa] "before a" + assert_equal -1 [r linsert xlist before bad aaa] "before bad" + assert_equal "aa a $large zz c yy d dd" [r lrange xlist 0 10] "lrangeD" + + # check inserting integer encoded value + assert_equal 9 [r linsert xlist before aa 42] "before aa" + assert_equal 42 [r lrange xlist 0 0] "lrangeE" + } + } + + test {LINSERT raise error on bad syntax} { + catch {[r linsert xlist aft3r aa 42]} e + set e + } {*ERR*syntax*error*} + + test {LINSERT against non-list value error} { + r set k1 v1 + assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} {r linsert k1 after 0 0} + } + + test {LINSERT against non existing key} { + assert_equal 0 [r linsert not-a-key before 0 0] + } + +foreach type {listpack quicklist} { + foreach {num} {250 500} { + if {$type == "quicklist"} { + set origin_config [config_get_set list-max-listpack-size 5] + } else { + set origin_config [config_get_set list-max-listpack-size -1] + } + + proc check_numbered_list_consistency {key} { + set len [r llen $key] + for {set i 0} {$i < $len} {incr i} { + assert_equal $i [r lindex $key $i] + assert_equal [expr $len-1-$i] [r lindex $key [expr (-$i)-1]] + } + } + + proc check_random_access_consistency {key} { + set len [r llen $key] + for {set i 0} {$i < $len} {incr i} { + set rint [expr int(rand()*$len)] + assert_equal $rint [r lindex $key $rint] + assert_equal [expr $len-1-$rint] [r lindex $key [expr (-$rint)-1]] + } + } + + test "LINDEX consistency test - $type" { + r del mylist + for {set i 0} {$i < $num} {incr i} { + r rpush mylist $i + } + assert_encoding $type mylist + check_numbered_list_consistency mylist + } + + test "LINDEX random access - $type" { + assert_encoding $type mylist + check_random_access_consistency mylist + } + + test "Check if list is still ok after a DEBUG RELOAD - $type" { + r debug reload + assert_encoding $type mylist + check_numbered_list_consistency mylist + check_random_access_consistency mylist + } {} {needs:debug} + + config_set list-max-listpack-size $origin_config + } +} + + test {LLEN against non-list value error} { + r del mylist + r set mylist foobar + assert_error WRONGTYPE* {r llen mylist} + } + + test {LLEN against non existing key} { + assert_equal 0 [r llen not-a-key] + } + + test {LINDEX against non-list value error} { + assert_error WRONGTYPE* {r lindex mylist 0} + } + + test {LINDEX against non existing key} { + assert_equal "" [r lindex not-a-key 10] + } + + test {LPUSH against non-list value error} { + assert_error WRONGTYPE* {r lpush mylist 0} + } + + test {RPUSH against non-list value error} { + assert_error WRONGTYPE* {r rpush mylist 0} + } + + foreach {type large} [array get largevalue] { + test "RPOPLPUSH base case - $type" { + r del mylist1{t} mylist2{t} + create_$type mylist1{t} "a $large c d" + assert_equal d [r rpoplpush mylist1{t} mylist2{t}] + assert_equal c [r rpoplpush mylist1{t} mylist2{t}] + assert_equal $large [r rpoplpush mylist1{t} mylist2{t}] + assert_equal "a" [r lrange mylist1{t} 0 -1] + assert_equal "$large c d" [r lrange mylist2{t} 0 -1] + assert_encoding listpack mylist1{t} ;# converted to listpack after shrinking + assert_encoding $type mylist2{t} + } + + foreach wherefrom {left right} { + foreach whereto {left right} { + test "LMOVE $wherefrom $whereto base case - $type" { + r del mylist1{t} mylist2{t} + + if {$wherefrom eq "right"} { + create_$type mylist1{t} "c d $large a" + } else { + create_$type mylist1{t} "a $large c d" + } + assert_equal a [r lmove mylist1{t} mylist2{t} $wherefrom $whereto] + assert_equal $large [r lmove mylist1{t} mylist2{t} $wherefrom $whereto] + assert_equal "c d" [r lrange mylist1{t} 0 -1] + if {$whereto eq "right"} { + assert_equal "a $large" [r lrange mylist2{t} 0 -1] + } else { + assert_equal "$large a" [r lrange mylist2{t} 0 -1] + } + assert_encoding $type mylist2{t} + } + } + } + + test "RPOPLPUSH with the same list as src and dst - $type" { + create_$type mylist{t} "a $large c" + assert_equal "a $large c" [r lrange mylist{t} 0 -1] + assert_equal c [r rpoplpush mylist{t} mylist{t}] + assert_equal "c a $large" [r lrange mylist{t} 0 -1] + } + + foreach wherefrom {left right} { + foreach whereto {left right} { + test "LMOVE $wherefrom $whereto with the same list as src and dst - $type" { + if {$wherefrom eq "right"} { + create_$type mylist{t} "a $large c" + assert_equal "a $large c" [r lrange mylist{t} 0 -1] + } else { + create_$type mylist{t} "c a $large" + assert_equal "c a $large" [r lrange mylist{t} 0 -1] + } + assert_equal c [r lmove mylist{t} mylist{t} $wherefrom $whereto] + if {$whereto eq "right"} { + assert_equal "a $large c" [r lrange mylist{t} 0 -1] + } else { + assert_equal "c a $large" [r lrange mylist{t} 0 -1] + } + } + } + } + + foreach {othertype otherlarge} [array get largevalue] { + test "RPOPLPUSH with $type source and existing target $othertype" { + create_$type srclist{t} "a b c $large" + create_$othertype dstlist{t} "$otherlarge" + assert_equal $large [r rpoplpush srclist{t} dstlist{t}] + assert_equal c [r rpoplpush srclist{t} dstlist{t}] + assert_equal "a b" [r lrange srclist{t} 0 -1] + assert_equal "c $large $otherlarge" [r lrange dstlist{t} 0 -1] + + # When we rpoplpush'ed a large value, dstlist should be + # converted to the same encoding as srclist. + if {$type eq "quicklist"} { + assert_encoding quicklist dstlist{t} + } + } + + foreach wherefrom {left right} { + foreach whereto {left right} { + test "LMOVE $wherefrom $whereto with $type source and existing target $othertype" { + create_$othertype dstlist{t} "$otherlarge" + + if {$wherefrom eq "right"} { + create_$type srclist{t} "a b c $large" + } else { + create_$type srclist{t} "$large c a b" + } + assert_equal $large [r lmove srclist{t} dstlist{t} $wherefrom $whereto] + assert_equal c [r lmove srclist{t} dstlist{t} $wherefrom $whereto] + assert_equal "a b" [r lrange srclist{t} 0 -1] + + if {$whereto eq "right"} { + assert_equal "$otherlarge $large c" [r lrange dstlist{t} 0 -1] + } else { + assert_equal "c $large $otherlarge" [r lrange dstlist{t} 0 -1] + } + + # When we lmoved a large value, dstlist should be + # converted to the same encoding as srclist. + if {$type eq "quicklist"} { + assert_encoding quicklist dstlist{t} + } + } + } + } + } + } + + test {RPOPLPUSH against non existing key} { + r del srclist{t} dstlist{t} + assert_equal {} [r rpoplpush srclist{t} dstlist{t}] + assert_equal 0 [r exists srclist{t}] + assert_equal 0 [r exists dstlist{t}] + } + + test {RPOPLPUSH against non list src key} { + r del srclist{t} dstlist{t} + r set srclist{t} x + assert_error WRONGTYPE* {r rpoplpush srclist{t} dstlist{t}} + assert_type string srclist{t} + assert_equal 0 [r exists newlist{t}] + } + +foreach {type large} [array get largevalue] { + test "RPOPLPUSH against non list dst key - $type" { + create_$type srclist{t} "a $large c d" + r set dstlist{t} x + assert_error WRONGTYPE* {r rpoplpush srclist{t} dstlist{t}} + assert_type string dstlist{t} + assert_equal "a $large c d" [r lrange srclist{t} 0 -1] + } +} + + test {RPOPLPUSH against non existing src key} { + r del srclist{t} dstlist{t} + assert_equal {} [r rpoplpush srclist{t} dstlist{t}] + } {} + + foreach {type large} [array get largevalue] { + test "Basic LPOP/RPOP/LMPOP - $type" { + create_$type mylist "$large 1 2" + assert_equal $large [r lpop mylist] + assert_equal 2 [r rpop mylist] + assert_equal 1 [r lpop mylist] + assert_equal 0 [r llen mylist] + + create_$type mylist "$large 1 2" + assert_equal "mylist $large" [r lmpop 1 mylist left count 1] + assert_equal {mylist {2 1}} [r lmpop 2 mylist mylist right count 2] + } + } + + test {LPOP/RPOP/LMPOP against empty list} { + r del non-existing-list{t} non-existing-list2{t} + + assert_equal {} [r lpop non-existing-list{t}] + assert_equal {} [r rpop non-existing-list2{t}] + + assert_equal {} [r lmpop 1 non-existing-list{t} left count 1] + assert_equal {} [r lmpop 1 non-existing-list{t} left count 10] + assert_equal {} [r lmpop 2 non-existing-list{t} non-existing-list2{t} right count 1] + assert_equal {} [r lmpop 2 non-existing-list{t} non-existing-list2{t} right count 10] + } + + test {LPOP/RPOP/LMPOP NON-BLOCK or BLOCK against non list value} { + r set notalist{t} foo + assert_error WRONGTYPE* {r lpop notalist{t}} + assert_error WRONGTYPE* {r blpop notalist{t} 0} + assert_error WRONGTYPE* {r rpop notalist{t}} + assert_error WRONGTYPE* {r brpop notalist{t} 0} + + r del notalist2{t} + assert_error "WRONGTYPE*" {r lmpop 2 notalist{t} notalist2{t} left count 1} + assert_error "WRONGTYPE*" {r blmpop 0 2 notalist{t} notalist2{t} left count 1} + + r del notalist{t} + r set notalist2{t} nolist + assert_error "WRONGTYPE*" {r lmpop 2 notalist{t} notalist2{t} right count 10} + assert_error "WRONGTYPE*" {r blmpop 0 2 notalist{t} notalist2{t} left count 1} + } + + foreach {num} {250 500} { + test "Mass RPOP/LPOP - $type" { + r del mylist + set sum1 0 + for {set i 0} {$i < $num} {incr i} { + if {$i == [expr $num/2]} { + r lpush mylist $large + } + r lpush mylist $i + incr sum1 $i + } + assert_encoding $type mylist + set sum2 0 + for {set i 0} {$i < [expr $num/2]} {incr i} { + incr sum2 [r lpop mylist] + incr sum2 [r rpop mylist] + } + assert_equal $sum1 $sum2 + } + } + + test {LMPOP with illegal argument} { + assert_error "ERR wrong number of arguments for 'lmpop' command" {r lmpop} + assert_error "ERR wrong number of arguments for 'lmpop' command" {r lmpop 1} + assert_error "ERR wrong number of arguments for 'lmpop' command" {r lmpop 1 mylist{t}} + + assert_error "ERR numkeys*" {r lmpop 0 mylist{t} LEFT} + assert_error "ERR numkeys*" {r lmpop a mylist{t} LEFT} + assert_error "ERR numkeys*" {r lmpop -1 mylist{t} RIGHT} + + assert_error "ERR syntax error*" {r lmpop 1 mylist{t} bad_where} + assert_error "ERR syntax error*" {r lmpop 1 mylist{t} LEFT bar_arg} + assert_error "ERR syntax error*" {r lmpop 1 mylist{t} RIGHT LEFT} + assert_error "ERR syntax error*" {r lmpop 1 mylist{t} COUNT} + assert_error "ERR syntax error*" {r lmpop 1 mylist{t} LEFT COUNT 1 COUNT 2} + assert_error "ERR syntax error*" {r lmpop 2 mylist{t} mylist2{t} bad_arg} + + assert_error "ERR count*" {r lmpop 1 mylist{t} LEFT COUNT 0} + assert_error "ERR count*" {r lmpop 1 mylist{t} RIGHT COUNT a} + assert_error "ERR count*" {r lmpop 1 mylist{t} LEFT COUNT -1} + assert_error "ERR count*" {r lmpop 2 mylist{t} mylist2{t} RIGHT COUNT -1} + } + +foreach {type large} [array get largevalue] { + test "LMPOP single existing list - $type" { + # Same key multiple times. + create_$type mylist{t} "a b $large d e f" + assert_equal {mylist{t} {a b}} [r lmpop 2 mylist{t} mylist{t} left count 2] + assert_equal {mylist{t} {f e}} [r lmpop 2 mylist{t} mylist{t} right count 2] + assert_equal 2 [r llen mylist{t}] + + # First one exists, second one does not exist. + create_$type mylist{t} "a b $large d e" + r del mylist2{t} + assert_equal {mylist{t} a} [r lmpop 2 mylist{t} mylist2{t} left count 1] + assert_equal 4 [r llen mylist{t}] + assert_equal "mylist{t} {e d $large b}" [r lmpop 2 mylist{t} mylist2{t} right count 10] + assert_equal {} [r lmpop 2 mylist{t} mylist2{t} right count 1] + + # First one does not exist, second one exists. + r del mylist{t} + create_$type mylist2{t} "1 2 $large 4 5" + assert_equal {mylist2{t} 5} [r lmpop 2 mylist{t} mylist2{t} right count 1] + assert_equal 4 [r llen mylist2{t}] + assert_equal "mylist2{t} {1 2 $large 4}" [r lmpop 2 mylist{t} mylist2{t} left count 10] + + assert_equal 0 [r exists mylist{t} mylist2{t}] + } + + test "LMPOP multiple existing lists - $type" { + create_$type mylist{t} "a b $large d e" + create_$type mylist2{t} "1 2 $large 4 5" + + # Pop up from the first key. + assert_equal {mylist{t} {a b}} [r lmpop 2 mylist{t} mylist2{t} left count 2] + assert_equal 3 [r llen mylist{t}] + assert_equal "mylist{t} {e d $large}" [r lmpop 2 mylist{t} mylist2{t} right count 3] + assert_equal 0 [r exists mylist{t}] + + # Pop up from the second key. + assert_equal "mylist2{t} {1 2 $large}" [r lmpop 2 mylist{t} mylist2{t} left count 3] + assert_equal 2 [r llen mylist2{t}] + assert_equal {mylist2{t} {5 4}} [r lmpop 2 mylist{t} mylist2{t} right count 2] + assert_equal 0 [r exists mylist{t}] + + # Pop up all elements. + create_$type mylist{t} "a $large c" + create_$type mylist2{t} "1 $large 3" + assert_equal "mylist{t} {a $large c}" [r lmpop 2 mylist{t} mylist2{t} left count 10] + assert_equal 0 [r llen mylist{t}] + assert_equal "mylist2{t} {3 $large 1}" [r lmpop 2 mylist{t} mylist2{t} right count 10] + assert_equal 0 [r llen mylist2{t}] + assert_equal 0 [r exists mylist{t} mylist2{t}] + } +} + + test {LMPOP propagate as pop with count command to replica} { + set repl [attach_to_replication_stream] + + # left/right propagate as lpop/rpop with count + r lpush mylist{t} a b c + + # Pop elements from one list. + r lmpop 1 mylist{t} left count 1 + r lmpop 1 mylist{t} right count 1 + + # Now the list have only one element + r lmpop 2 mylist{t} mylist2{t} left count 10 + + # No elements so we don't propagate. + r lmpop 2 mylist{t} mylist2{t} left count 10 + + # Pop elements from the second list. + r rpush mylist2{t} 1 2 3 + r lmpop 2 mylist{t} mylist2{t} left count 2 + r lmpop 2 mylist{t} mylist2{t} right count 1 + + # Pop all elements. + r rpush mylist{t} a b c + r rpush mylist2{t} 1 2 3 + r lmpop 2 mylist{t} mylist2{t} left count 10 + r lmpop 2 mylist{t} mylist2{t} right count 10 + + assert_replication_stream $repl { + {select *} + {lpush mylist{t} a b c} + {lpop mylist{t} 1} + {rpop mylist{t} 1} + {lpop mylist{t} 1} + {rpush mylist2{t} 1 2 3} + {lpop mylist2{t} 2} + {rpop mylist2{t} 1} + {rpush mylist{t} a b c} + {rpush mylist2{t} 1 2 3} + {lpop mylist{t} 3} + {rpop mylist2{t} 3} + } + close_replication_stream $repl + } {} {needs:repl} + + foreach {type large} [array get largevalue] { + test "LRANGE basics - $type" { + create_$type mylist "$large 1 2 3 4 5 6 7 8 9" + assert_equal {1 2 3 4 5 6 7 8} [r lrange mylist 1 -2] + assert_equal {7 8 9} [r lrange mylist -3 -1] + assert_equal {4} [r lrange mylist 4 4] + } + + test "LRANGE inverted indexes - $type" { + create_$type mylist "$large 1 2 3 4 5 6 7 8 9" + assert_equal {} [r lrange mylist 6 2] + } + + test "LRANGE out of range indexes including the full list - $type" { + create_$type mylist "$large 1 2 3" + assert_equal "$large 1 2 3" [r lrange mylist -1000 1000] + } + + test "LRANGE out of range negative end index - $type" { + create_$type mylist "$large 1 2 3" + assert_equal $large [r lrange mylist 0 -4] + assert_equal {} [r lrange mylist 0 -5] + } + } + + test {LRANGE against non existing key} { + assert_equal {} [r lrange nosuchkey 0 1] + } + + test {LRANGE with start > end yields an empty array for backward compatibility} { + create_$type mylist "1 $large 3" + assert_equal {} [r lrange mylist 1 0] + assert_equal {} [r lrange mylist -1 -2] + } + + foreach {type large} [array get largevalue] { + proc trim_list {type min max} { + upvar 1 large large + r del mylist + create_$type mylist "1 2 3 4 $large" + r ltrim mylist $min $max + r lrange mylist 0 -1 + } + + test "LTRIM basics - $type" { + assert_equal "1" [trim_list $type 0 0] + assert_equal "1 2" [trim_list $type 0 1] + assert_equal "1 2 3" [trim_list $type 0 2] + assert_equal "2 3" [trim_list $type 1 2] + assert_equal "2 3 4 $large" [trim_list $type 1 -1] + assert_equal "2 3 4" [trim_list $type 1 -2] + assert_equal "4 $large" [trim_list $type -2 -1] + assert_equal "$large" [trim_list $type -1 -1] + assert_equal "1 2 3 4 $large" [trim_list $type -5 -1] + assert_equal "1 2 3 4 $large" [trim_list $type -10 10] + assert_equal "1 2 3 4 $large" [trim_list $type 0 5] + assert_equal "1 2 3 4 $large" [trim_list $type 0 10] + } + + test "LTRIM out of range negative end index - $type" { + assert_equal {1} [trim_list $type 0 -5] + assert_equal {} [trim_list $type 0 -6] + } + + test "LSET - $type" { + create_$type mylist "99 98 $large 96 95" + r lset mylist 1 foo + r lset mylist -1 bar + assert_equal "99 foo $large 96 bar" [r lrange mylist 0 -1] + } + + test "LSET out of range index - $type" { + assert_error ERR*range* {r lset mylist 10 foo} + } + } + + test {LSET against non existing key} { + assert_error ERR*key* {r lset nosuchkey 10 foo} + } + + test {LSET against non list value} { + r set nolist foobar + assert_error WRONGTYPE* {r lset nolist 0 foo} + } + + foreach {type e} [array get largevalue] { + test "LREM remove all the occurrences - $type" { + create_$type mylist "$e foo bar foobar foobared zap bar test foo" + assert_equal 2 [r lrem mylist 0 bar] + assert_equal "$e foo foobar foobared zap test foo" [r lrange mylist 0 -1] + } + + test "LREM remove the first occurrence - $type" { + assert_equal 1 [r lrem mylist 1 foo] + assert_equal "$e foobar foobared zap test foo" [r lrange mylist 0 -1] + } + + test "LREM remove non existing element - $type" { + assert_equal 0 [r lrem mylist 1 nosuchelement] + assert_equal "$e foobar foobared zap test foo" [r lrange mylist 0 -1] + } + + test "LREM starting from tail with negative count - $type" { + create_$type mylist "$e foo bar foobar foobared zap bar test foo foo" + assert_equal 1 [r lrem mylist -1 bar] + assert_equal "$e foo bar foobar foobared zap test foo foo" [r lrange mylist 0 -1] + } + + test "LREM starting from tail with negative count (2) - $type" { + assert_equal 2 [r lrem mylist -2 foo] + assert_equal "$e foo bar foobar foobared zap test" [r lrange mylist 0 -1] + } + + test "LREM deleting objects that may be int encoded - $type" { + create_$type myotherlist "$e 1 2 3" + assert_equal 1 [r lrem myotherlist 1 2] + assert_equal 3 [r llen myotherlist] + } + } + + test "Regression for bug 593 - chaining BRPOPLPUSH with other blocking cmds" { + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + + $rd1 brpoplpush a{t} b{t} 0 + $rd1 brpoplpush a{t} b{t} 0 + wait_for_blocked_clients_count 1 + $rd2 brpoplpush b{t} c{t} 0 + wait_for_blocked_clients_count 2 + r lpush a{t} data + $rd1 close + $rd2 close + r ping + } {PONG} + + test "BLPOP/BLMOVE should increase dirty" { + r del lst{t} lst1{t} + set rd [redis_deferring_client] + + set dirty [s rdb_changes_since_last_save] + $rd blpop lst{t} 0 + wait_for_blocked_client + r lpush lst{t} a + assert_equal {lst{t} a} [$rd read] + set dirty2 [s rdb_changes_since_last_save] + assert {$dirty2 == $dirty + 2} + + set dirty [s rdb_changes_since_last_save] + $rd blmove lst{t} lst1{t} left left 0 + wait_for_blocked_client + r lpush lst{t} a + assert_equal {a} [$rd read] + set dirty2 [s rdb_changes_since_last_save] + assert {$dirty2 == $dirty + 2} + + $rd close + } + +foreach {pop} {BLPOP BLMPOP_RIGHT} { + test "client unblock tests" { + r del l + set rd [redis_deferring_client] + $rd client id + set id [$rd read] + + # test default args + bpop_command $rd $pop l 0 + wait_for_blocked_client + r client unblock $id + assert_equal {} [$rd read] + + # test with timeout + bpop_command $rd $pop l 0 + wait_for_blocked_client + r client unblock $id TIMEOUT + assert_equal {} [$rd read] + + # test with error + bpop_command $rd $pop l 0 + wait_for_blocked_client + r client unblock $id ERROR + catch {[$rd read]} e + assert_equal $e "UNBLOCKED client unblocked via CLIENT UNBLOCK" + + # test with invalid client id + catch {[r client unblock asd]} e + assert_equal $e "ERR value is not an integer or out of range" + + # test with non blocked client + set myid [r client id] + catch {[r client unblock $myid]} e + assert_equal $e {invalid command name "0"} + + # finally, see the this client and list are still functional + bpop_command $rd $pop l 0 + wait_for_blocked_client + r lpush l foo + assert_equal {l foo} [$rd read] + $rd close + } +} + + foreach {max_lp_size large} "3 $largevalue(listpack) -1 $largevalue(quicklist)" { + test "List listpack -> quicklist encoding conversion" { + set origin_conf [config_get_set list-max-listpack-size $max_lp_size] + + # RPUSH + create_listpack lst "a b c" + r RPUSH lst $large + assert_encoding quicklist lst + + # LINSERT + create_listpack lst "a b c" + r LINSERT lst after b $large + assert_encoding quicklist lst + + # LSET + create_listpack lst "a b c" + r LSET lst 0 $large + assert_encoding quicklist lst + + # LMOVE + create_quicklist lsrc{t} "a b c $large" + create_listpack ldes{t} "d e f" + r LMOVE lsrc{t} ldes{t} right right + assert_encoding quicklist ldes{t} + + r config set list-max-listpack-size $origin_conf + } + } + + test "List quicklist -> listpack encoding conversion" { + set origin_conf [config_get_set list-max-listpack-size 3] + + # RPOP + create_quicklist lst "a b c d" + r RPOP lst 3 + assert_encoding listpack lst + + # LREM + create_quicklist lst "a a a d" + r LREM lst 3 a + assert_encoding listpack lst + + # LTRIM + create_quicklist lst "a b c d" + r LTRIM lst 1 1 + assert_encoding listpack lst + + r config set list-max-listpack-size -1 + + # RPOP + create_quicklist lst "a b c $largevalue(quicklist)" + r RPOP lst 1 + assert_encoding listpack lst + + # LREM + create_quicklist lst "a $largevalue(quicklist)" + r LREM lst 1 $largevalue(quicklist) + assert_encoding listpack lst + + # LTRIM + create_quicklist lst "a b $largevalue(quicklist)" + r LTRIM lst 0 1 + assert_encoding listpack lst + + # LSET + create_quicklist lst "$largevalue(quicklist) a b" + r RPOP lst 2 + assert_encoding quicklist lst + r LSET lst -1 c + assert_encoding listpack lst + + r config set list-max-listpack-size $origin_conf + } + + test "List encoding conversion when RDB loading" { + set origin_conf [config_get_set list-max-listpack-size 3] + create_listpack lst "a b c" + + # list is still a listpack after DEBUG RELOAD + r DEBUG RELOAD + assert_encoding listpack lst + + # list is still a quicklist after DEBUG RELOAD + r RPUSH lst d + r DEBUG RELOAD + assert_encoding quicklist lst + + # when a quicklist has only one packed node, it will be + # converted to listpack during rdb loading + r RPOP lst + assert_encoding quicklist lst + r DEBUG RELOAD + assert_encoding listpack lst + + r config set list-max-listpack-size $origin_conf + } {OK} {needs:debug} + + test "List invalid list-max-listpack-size config" { + # ​When list-max-listpack-size is 0 we treat it as 1 and it'll + # still be listpack if there's a single element in the list. + r config set list-max-listpack-size 0 + r DEL lst + r RPUSH lst a + assert_encoding listpack lst + r RPUSH lst b + assert_encoding quicklist lst + + # When list-max-listpack-size < -5 we treat it as -5. + r config set list-max-listpack-size -6 + r DEL lst + r RPUSH lst [string repeat "x" 60000] + assert_encoding listpack lst + # Converted to quicklist when the size of listpack exceed 65536 + r RPUSH lst [string repeat "x" 5536] + assert_encoding quicklist lst + } + + test "List of various encodings" { + r del k + r lpush k 127 ;# ZIP_INT_8B + r lpush k 32767 ;# ZIP_INT_16B + r lpush k 2147483647 ;# ZIP_INT_32B + r lpush k 9223372036854775808 ;# ZIP_INT_64B + r lpush k 0 ;# ZIP_INT_IMM_MIN + r lpush k 12 ;# ZIP_INT_IMM_MAX + r lpush k [string repeat x 31] ;# ZIP_STR_06B + r lpush k [string repeat x 8191] ;# ZIP_STR_14B + r lpush k [string repeat x 65535] ;# ZIP_STR_32B + assert_encoding quicklist k ;# exceeds the size limit of quicklist node + set k [r lrange k 0 -1] + set dump [r dump k] + + # coverage for objectComputeSize + assert_morethan [memory_usage k] 0 + + config_set sanitize-dump-payload no mayfail + r restore kk 0 $dump replace + assert_encoding quicklist kk + set kk [r lrange kk 0 -1] + + # try some forward and backward searches to make sure all encodings + # can be traversed + assert_equal [r lindex kk 5] {9223372036854775808} + assert_equal [r lindex kk -5] {0} + assert_equal [r lpos kk foo rank 1] {} + assert_equal [r lpos kk foo rank -1] {} + + # make sure the values are right + assert_equal $k $kk + assert_equal [lpop k] [string repeat x 65535] + assert_equal [lpop k] [string repeat x 8191] + assert_equal [lpop k] [string repeat x 31] + set _ $k + } {12 0 9223372036854775808 2147483647 32767 127} + + test "List of various encodings - sanitize dump" { + config_set sanitize-dump-payload yes mayfail + r restore kk 0 $dump replace + assert_encoding quicklist kk + set k [r lrange k 0 -1] + set kk [r lrange kk 0 -1] + + # make sure the values are right + assert_equal $k $kk + assert_equal [lpop k] [string repeat x 65535] + assert_equal [lpop k] [string repeat x 8191] + assert_equal [lpop k] [string repeat x 31] + set _ $k + } {12 0 9223372036854775808 2147483647 32767 127} + + test "Unblock fairness is kept while pipelining" { + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + + # delete the list in case already exists + r del mylist + + # block a client on the list + $rd1 BLPOP mylist 0 + wait_for_blocked_clients_count 1 + + # pipeline on other client a list push and a blocking pop + # we should expect the fairness to be kept and have $rd1 + # being unblocked + set buf "" + append buf "LPUSH mylist 1\r\n" + append buf "BLPOP mylist 0\r\n" + $rd2 write $buf + $rd2 flush + + # we check that we still have 1 blocked client + # and that the first blocked client has been served + assert_equal [$rd1 read] {mylist 1} + assert_equal [$rd2 read] {1} + wait_for_blocked_clients_count 1 + + # We no unblock the last client and verify it was served last + r LPUSH mylist 2 + wait_for_blocked_clients_count 0 + assert_equal [$rd2 read] {mylist 2} + + $rd1 close + $rd2 close + } + + test "Unblock fairness is kept during nested unblock" { + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + set rd3 [redis_deferring_client] + + # delete the list in case already exists + r del l1{t} l2{t} l3{t} + + # block a client on the list + $rd1 BRPOPLPUSH l1{t} l3{t} 0 + wait_for_blocked_clients_count 1 + + $rd2 BLPOP l2{t} 0 + wait_for_blocked_clients_count 2 + + $rd3 BLMPOP 0 2 l2{t} l3{t} LEFT COUNT 1 + wait_for_blocked_clients_count 3 + + r multi + r lpush l1{t} 1 + r lpush l2{t} 2 + r exec + + wait_for_blocked_clients_count 0 + + assert_equal [$rd1 read] {1} + assert_equal [$rd2 read] {l2{t} 2} + assert_equal [$rd3 read] {l3{t} 1} + + $rd1 close + $rd2 close + $rd3 close + } + + test "Blocking command accounted only once in commandstats" { + # cleanup first + r del mylist + + # create a test client + set rd [redis_deferring_client] + + # reset the server stats + r config resetstat + + # block a client on the list + $rd BLPOP mylist 0 + wait_for_blocked_clients_count 1 + + # unblock the list + r LPUSH mylist 1 + wait_for_blocked_clients_count 0 + + assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdrstat blpop r] + + $rd close + } + + test "Blocking command accounted only once in commandstats after timeout" { + # cleanup first + r del mylist + + # create a test client + set rd [redis_deferring_client] + $rd client id + set id [$rd read] + + # reset the server stats + r config resetstat + + # block a client on the list + $rd BLPOP mylist 0 + wait_for_blocked_clients_count 1 + + # unblock the client on timeout + r client unblock $id timeout + + assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdrstat blpop r] + + $rd close + } + + test {Command being unblocked cause another command to get unblocked execution order test} { + r del src{t} dst{t} key1{t} key2{t} key3{t} + set repl [attach_to_replication_stream] + + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + set rd3 [redis_deferring_client] + + $rd1 blmove src{t} dst{t} left right 0 + wait_for_blocked_clients_count 1 + + $rd2 blmove dst{t} src{t} right left 0 + wait_for_blocked_clients_count 2 + + # Create a pipeline of commands that will be processed in one socket read. + # Insert two set commands before and after lpush to observe the execution order. + set buf "" + append buf "set key1{t} value1\r\n" + append buf "lpush src{t} dummy\r\n" + append buf "set key2{t} value2\r\n" + $rd3 write $buf + $rd3 flush + + wait_for_blocked_clients_count 0 + + r set key3{t} value3 + + # If a command being unblocked causes another command to get unblocked, like a BLMOVE would do, + # then the new unblocked command will get processed right away rather than wait for later. + # If the set command occurs between two lmove commands, the results are not as expected. + assert_replication_stream $repl { + {select *} + {set key1{t} value1} + {lpush src{t} dummy} + {lmove src{t} dst{t} left right} + {lmove dst{t} src{t} right left} + {set key2{t} value2} + {set key3{t} value3} + } + + $rd1 close + $rd2 close + $rd3 close + + close_replication_stream $repl + } {} {needs:repl} + +} ;# stop servers \ No newline at end of file diff --git a/tests/unit/type/set.tcl b/tests/unit/type/set.tcl new file mode 100644 index 000000000..f5bf8e4fc --- /dev/null +++ b/tests/unit/type/set.tcl @@ -0,0 +1,1314 @@ +start_server { + tags {"set"} + overrides { + "set-max-intset-entries" 512 + "set-max-listpack-entries" 128 + "set-max-listpack-value" 32 + } +} { + proc create_set {key entries} { + r del $key + foreach entry $entries { r sadd $key $entry } + } + + # Values for initialing sets, per encoding. + array set initelems {listpack {foo} hashtable {foo}} + for {set i 0} {$i < 130} {incr i} { + lappend initelems(hashtable) [format "i%03d" $i] + } + +# foreach type {listpack hashtable} { +# test "SADD, SCARD, SISMEMBER, SMISMEMBER, SMEMBERS basics - $type" { +# create_set myset $initelems($type) +# #assert_encoding $type myset +# assert_equal 1 [r sadd myset bar] +# assert_equal 0 [r sadd myset bar] +# assert_equal [expr [llength $initelems($type)] + 1] [r scard myset] +# assert_equal 1 [r sismember myset foo] +# assert_equal 1 [r sismember myset bar] +# assert_equal 0 [r sismember myset bla] +# assert_equal {1} [r smismember myset foo] +# assert_equal {1 1} [r smismember myset foo bar] +# assert_equal {1 0} [r smismember myset foo bla] +# assert_equal {0 1} [r smismember myset bla foo] +# assert_equal {0} [r smismember myset bla] +# assert_equal "bar $initelems($type)" [lsort [r smembers myset]] +# } +# } + +# test {SADD, SCARD, SISMEMBER, SMISMEMBER, SMEMBERS basics - intset} { +# create_set myset {17} +# #assert_encoding intset myset +# assert_equal 1 [r sadd myset 16] +# assert_equal 0 [r sadd myset 16] +# assert_equal 2 [r scard myset] +# assert_equal 1 [r sismember myset 16] +# assert_equal 1 [r sismember myset 17] +# assert_equal 0 [r sismember myset 18] +# assert_equal {1} [r smismember myset 16] +# assert_equal {1 1} [r smismember myset 16 17] +# assert_equal {1 0} [r smismember myset 16 18] +# assert_equal {0 1} [r smismember myset 18 16] +# assert_equal {0} [r smismember myset 18] +# assert_equal {16 17} [lsort [r smembers myset]] +# } + + test {SMISMEMBER SMEMBERS SCARD against non set} { + r lpush mylist foo + assert_error WRONGTYPE* {r smismember mylist bar} + assert_error WRONGTYPE* {r smembers mylist} + assert_error WRONGTYPE* {r scard mylist} + } + + test {SMISMEMBER SMEMBERS SCARD against non existing key} { + assert_equal {0} [r smismember myset1 foo] + assert_equal {0 0} [r smismember myset1 foo bar] + assert_equal {} [r smembers myset1] + assert_equal {0} [r scard myset1] + } + + test {SMISMEMBER requires one or more members} { + r del zmscoretest + r zadd zmscoretest 10 x + r zadd zmscoretest 20 y + + catch {r smismember zmscoretest} e + assert_match {*ERR*wrong*number*arg*} $e + } + + test {SADD against non set} { + r lpush mylist foo + assert_error WRONGTYPE* {r sadd mylist bar} + } + + test "SADD a non-integer against a small intset" { + create_set myset {1 2 3} + assert_encoding intset myset + assert_equal 1 [r sadd myset a] + assert_encoding listpack myset + } + + test "SADD a non-integer against a large intset" { + create_set myset {0} + for {set i 1} {$i < 130} {incr i} {r sadd myset $i} + assert_encoding intset myset + assert_equal 1 [r sadd myset a] + assert_encoding hashtable myset + } + + test "SADD an integer larger than 64 bits" { + create_set myset {213244124402402314402033402} + assert_encoding listpack myset + assert_equal 1 [r sismember myset 213244124402402314402033402] + assert_equal {1} [r smismember myset 213244124402402314402033402] + } + + test "SADD an integer larger than 64 bits to a large intset" { + create_set myset {0} + for {set i 1} {$i < 130} {incr i} {r sadd myset $i} + assert_encoding intset myset + r sadd myset 213244124402402314402033402 + assert_encoding hashtable myset + assert_equal 1 [r sismember myset 213244124402402314402033402] + assert_equal {1} [r smismember myset 213244124402402314402033402] + } + +foreach type {single multiple single_multiple} { + test "SADD overflows the maximum allowed integers in an intset - $type" { + r del myset + + if {$type == "single"} { + # All are single sadd commands. + for {set i 0} {$i < 512} {incr i} { r sadd myset $i } + } elseif {$type == "multiple"} { + # One sadd command to add all elements. + set args {} + for {set i 0} {$i < 512} {incr i} { lappend args $i } + r sadd myset {*}$args + } elseif {$type == "single_multiple"} { + # First one sadd adds an element (creates a key) and then one sadd adds all elements. + r sadd myset 1 + set args {} + for {set i 0} {$i < 512} {incr i} { lappend args $i } + r sadd myset {*}$args + } + + assert_encoding intset myset + assert_equal 512 [r scard myset] + assert_equal 1 [r sadd myset 512] + assert_encoding hashtable myset + } + + test "SADD overflows the maximum allowed elements in a listpack - $type" { + r del myset + + if {$type == "single"} { + # All are single sadd commands. + r sadd myset a + for {set i 0} {$i < 127} {incr i} { r sadd myset $i } + } elseif {$type == "multiple"} { + # One sadd command to add all elements. + set args {} + lappend args a + for {set i 0} {$i < 127} {incr i} { lappend args $i } + r sadd myset {*}$args + } elseif {$type == "single_multiple"} { + # First one sadd adds an element (creates a key) and then one sadd adds all elements. + r sadd myset a + set args {} + lappend args a + for {set i 0} {$i < 127} {incr i} { lappend args $i } + r sadd myset {*}$args + } + + assert_encoding listpack myset + assert_equal 128 [r scard myset] + assert_equal 1 [r sadd myset b] + assert_encoding hashtable myset + } +} + + test {Variadic SADD} { + r del myset + assert_equal 3 [r sadd myset a b c] + assert_equal 2 [r sadd myset A a b c B] + assert_equal [lsort {A a b c B}] [lsort [r smembers myset]] + } + + test "Set encoding after DEBUG RELOAD" { + r del myintset + r del myhashset + r del mylargeintset + r del mysmallset + for {set i 0} {$i < 100} {incr i} { r sadd myintset $i } + for {set i 0} {$i < 1280} {incr i} { r sadd mylargeintset $i } + for {set i 0} {$i < 50} {incr i} { r sadd mysmallset [format "i%03d" $i] } + for {set i 0} {$i < 256} {incr i} { r sadd myhashset [format "i%03d" $i] } + assert_encoding intset myintset + assert_encoding hashtable mylargeintset + assert_encoding listpack mysmallset + assert_encoding hashtable myhashset + + r debug reload + assert_encoding intset myintset + assert_encoding hashtable mylargeintset + assert_encoding listpack mysmallset + assert_encoding hashtable myhashset + } {} {needs:debug} + + foreach type {listpack hashtable} { + test {SREM basics - $type} { + create_set myset $initelems($type) + r sadd myset ciao + assert_encoding $type myset + assert_equal 0 [r srem myset qux] + assert_equal 1 [r srem myset ciao] + assert_equal $initelems($type) [lsort [r smembers myset]] + } + } + + test {SREM basics - intset} { + create_set myset {3 4 5} + assert_encoding intset myset + assert_equal 0 [r srem myset 6] + assert_equal 1 [r srem myset 4] + assert_equal {3 5} [lsort [r smembers myset]] + } + + test {SREM with multiple arguments} { + r del myset + r sadd myset a b c d + assert_equal 0 [r srem myset k k k] + assert_equal 2 [r srem myset b d x y] + lsort [r smembers myset] + } {a c} + + test {SREM variadic version with more args needed to destroy the key} { + r del myset + r sadd myset 1 2 3 + r srem myset 1 2 3 4 5 6 7 8 + } {3} + + test "SINTERCARD with illegal arguments" { + assert_error "ERR wrong number of arguments for 'sintercard' command" {r sintercard} + assert_error "ERR wrong number of arguments for 'sintercard' command" {r sintercard 1} + + assert_error "ERR numkeys*" {r sintercard 0 myset{t}} + assert_error "ERR numkeys*" {r sintercard a myset{t}} + + assert_error "ERR Number of keys*" {r sintercard 2 myset{t}} + assert_error "ERR Number of keys*" {r sintercard 3 myset{t} myset2{t}} + + assert_error "ERR syntax error*" {r sintercard 1 myset{t} myset2{t}} + assert_error "ERR syntax error*" {r sintercard 1 myset{t} bar_arg} + assert_error "ERR syntax error*" {r sintercard 1 myset{t} LIMIT} + + assert_error "ERR LIMIT*" {r sintercard 1 myset{t} LIMIT -1} + assert_error "ERR LIMIT*" {r sintercard 1 myset{t} LIMIT a} + } + + test "SINTERCARD against non-set should throw error" { + r del set{t} + r sadd set{t} a b c + r set key1{t} x + + assert_error "WRONGTYPE*" {r sintercard 1 key1{t}} + assert_error "WRONGTYPE*" {r sintercard 2 set{t} key1{t}} + assert_error "WRONGTYPE*" {r sintercard 2 key1{t} noset{t}} + } + + test "SINTERCARD against non-existing key" { + assert_equal 0 [r sintercard 1 non-existing-key] + assert_equal 0 [r sintercard 1 non-existing-key limit 0] + assert_equal 0 [r sintercard 1 non-existing-key limit 10] + } + + foreach {type} {regular intset} { + # Create sets setN{t} where N = 1..5 + if {$type eq "regular"} { + set smallenc listpack + set bigenc hashtable + } else { + set smallenc intset + set bigenc intset + } + # Sets 1, 2 and 4 are big; sets 3 and 5 are small. + array set encoding "1 $bigenc 2 $bigenc 3 $smallenc 4 $bigenc 5 $smallenc" + + for {set i 1} {$i <= 5} {incr i} { + r del [format "set%d{t}" $i] + } + for {set i 0} {$i < 200} {incr i} { + r sadd set1{t} $i + r sadd set2{t} [expr $i+195] + } + foreach i {199 195 1000 2000} { + r sadd set3{t} $i + } + for {set i 5} {$i < 200} {incr i} { + r sadd set4{t} $i + } + r sadd set5{t} 0 + + # To make sure the sets are encoded as the type we are testing -- also + # when the VM is enabled and the values may be swapped in and out + # while the tests are running -- an extra element is added to every + # set that determines its encoding. + set large 200 + if {$type eq "regular"} { + set large foo + } + + for {set i 1} {$i <= 5} {incr i} { + r sadd [format "set%d{t}" $i] $large + } + + test "Generated sets must be encoded correctly - $type" { + for {set i 1} {$i <= 5} {incr i} { + assert_encoding $encoding($i) [format "set%d{t}" $i] + } + } + + test "SINTER with two sets - $type" { + assert_equal [list 195 196 197 198 199 $large] [lsort [r sinter set1{t} set2{t}]] + } + + test "SINTERCARD with two sets - $type" { + assert_equal 6 [r sintercard 2 set1{t} set2{t}] + assert_equal 6 [r sintercard 2 set1{t} set2{t} limit 0] + assert_equal 3 [r sintercard 2 set1{t} set2{t} limit 3] + assert_equal 6 [r sintercard 2 set1{t} set2{t} limit 10] + } + + test "SINTERSTORE with two sets - $type" { + r sinterstore setres{t} set1{t} set2{t} + assert_encoding $smallenc setres{t} + assert_equal [list 195 196 197 198 199 $large] [lsort [r smembers setres{t}]] + } + + test "SINTERSTORE with two sets, after a DEBUG RELOAD - $type" { + r debug reload + r sinterstore setres{t} set1{t} set2{t} + assert_encoding $smallenc setres{t} + assert_equal [list 195 196 197 198 199 $large] [lsort [r smembers setres{t}]] + } {} {needs:debug} + + test "SUNION with two sets - $type" { + set expected [lsort -uniq "[r smembers set1{t}] [r smembers set2{t}]"] + assert_equal $expected [lsort [r sunion set1{t} set2{t}]] + } + + test "SUNIONSTORE with two sets - $type" { + r sunionstore setres{t} set1{t} set2{t} + assert_encoding $bigenc setres{t} + set expected [lsort -uniq "[r smembers set1{t}] [r smembers set2{t}]"] + assert_equal $expected [lsort [r smembers setres{t}]] + } + + test "SINTER against three sets - $type" { + assert_equal [list 195 199 $large] [lsort [r sinter set1{t} set2{t} set3{t}]] + } + + test "SINTERCARD against three sets - $type" { + assert_equal 3 [r sintercard 3 set1{t} set2{t} set3{t}] + assert_equal 3 [r sintercard 3 set1{t} set2{t} set3{t} limit 0] + assert_equal 2 [r sintercard 3 set1{t} set2{t} set3{t} limit 2] + assert_equal 3 [r sintercard 3 set1{t} set2{t} set3{t} limit 10] + } + + test "SINTERSTORE with three sets - $type" { + r sinterstore setres{t} set1{t} set2{t} set3{t} + assert_equal [list 195 199 $large] [lsort [r smembers setres{t}]] + } + + test "SUNION with non existing keys - $type" { + set expected [lsort -uniq "[r smembers set1{t}] [r smembers set2{t}]"] + assert_equal $expected [lsort [r sunion nokey1{t} set1{t} set2{t} nokey2{t}]] + } + + test "SDIFF with two sets - $type" { + assert_equal {0 1 2 3 4} [lsort [r sdiff set1{t} set4{t}]] + } + + test "SDIFF with three sets - $type" { + assert_equal {1 2 3 4} [lsort [r sdiff set1{t} set4{t} set5{t}]] + } + + test "SDIFFSTORE with three sets - $type" { + r sdiffstore setres{t} set1{t} set4{t} set5{t} + # When we start with intsets, we should always end with intsets. + if {$type eq {intset}} { + assert_encoding intset setres{t} + } + assert_equal {1 2 3 4} [lsort [r smembers setres{t}]] + } + + test "SINTER/SUNION/SDIFF with three same sets - $type" { + set expected [lsort "[r smembers set1{t}]"] + assert_equal $expected [lsort [r sinter set1{t} set1{t} set1{t}]] + assert_equal $expected [lsort [r sunion set1{t} set1{t} set1{t}]] + assert_equal {} [lsort [r sdiff set1{t} set1{t} set1{t}]] + } + } + + test "SINTERSTORE with two listpack sets where result is intset" { + r del setres{t} set1{t} set2{t} + r sadd set1{t} a b c 1 3 6 x y z + r sadd set2{t} e f g 1 2 3 u v w + assert_encoding listpack set1{t} + assert_encoding listpack set2{t} + r sinterstore setres{t} set1{t} set2{t} + assert_equal [list 1 3] [lsort [r smembers setres{t}]] + assert_encoding intset setres{t} + } + + test "SINTERSTORE with two hashtable sets where result is intset" { + r del setres{t} set1{t} set2{t} + r sadd set1{t} a b c 444 555 666 + r sadd set2{t} e f g 111 222 333 + set expected {} + for {set i 1} {$i < 130} {incr i} { + r sadd set1{t} $i + r sadd set2{t} $i + lappend expected $i + } + assert_encoding hashtable set1{t} + assert_encoding hashtable set2{t} + r sinterstore setres{t} set1{t} set2{t} + assert_equal [lsort $expected] [lsort [r smembers setres{t}]] + assert_encoding intset setres{t} + } + + test "SUNION hashtable and listpack" { + # This adds code coverage for adding a non-sds string to a hashtable set + # which already contains the string. + r del set1{t} set2{t} + set union {abcdefghijklmnopqrstuvwxyz1234567890 a b c 1 2 3} + create_set set1{t} $union + create_set set2{t} {a b c} + assert_encoding hashtable set1{t} + assert_encoding listpack set2{t} + assert_equal [lsort $union] [lsort [r sunion set1{t} set2{t}]] + } + + test "SDIFF with first set empty" { + r del set1{t} set2{t} set3{t} + r sadd set2{t} 1 2 3 4 + r sadd set3{t} a b c d + r sdiff set1{t} set2{t} set3{t} + } {} + + test "SDIFF with same set two times" { + r del set1 + r sadd set1 a b c 1 2 3 4 5 6 + r sdiff set1 set1 + } {} + + test "SDIFF fuzzing" { + for {set j 0} {$j < 100} {incr j} { + unset -nocomplain s + array set s {} + set args {} + set num_sets [expr {[randomInt 10]+1}] + for {set i 0} {$i < $num_sets} {incr i} { + set num_elements [randomInt 100] + r del set_$i{t} + lappend args set_$i{t} + while {$num_elements} { + set ele [randomValue] + r sadd set_$i{t} $ele + if {$i == 0} { + set s($ele) x + } else { + unset -nocomplain s($ele) + } + incr num_elements -1 + } + } + set result [lsort [r sdiff {*}$args]] + assert_equal $result [lsort [array names s]] + } + } + + test "SDIFF against non-set should throw error" { + # with an empty set + r set key1{t} x + assert_error "WRONGTYPE*" {r sdiff key1{t} noset{t}} + # different order + assert_error "WRONGTYPE*" {r sdiff noset{t} key1{t}} + + # with a legal set + r del set1{t} + r sadd set1{t} a b c + assert_error "WRONGTYPE*" {r sdiff key1{t} set1{t}} + # different order + assert_error "WRONGTYPE*" {r sdiff set1{t} key1{t}} + } + + test "SDIFF should handle non existing key as empty" { + r del set1{t} set2{t} set3{t} + + r sadd set1{t} a b c + r sadd set2{t} b c d + assert_equal {a} [lsort [r sdiff set1{t} set2{t} set3{t}]] + assert_equal {} [lsort [r sdiff set3{t} set2{t} set1{t}]] + } + + test "SDIFFSTORE against non-set should throw error" { + r del set1{t} set2{t} set3{t} key1{t} + r set key1{t} x + + # with en empty dstkey + assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} key1{t} noset{t}} + assert_equal 0 [r exists set3{t}] + assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} noset{t} key1{t}} + assert_equal 0 [r exists set3{t}] + + # with a legal dstkey + r sadd set1{t} a b c + r sadd set2{t} b c d + r sadd set3{t} e + assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} key1{t} set1{t} noset{t}} + assert_equal 1 [r exists set3{t}] + assert_equal {e} [lsort [r smembers set3{t}]] + + assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} set1{t} key1{t} set2{t}} + assert_equal 1 [r exists set3{t}] + assert_equal {e} [lsort [r smembers set3{t}]] + } + + test "SDIFFSTORE should handle non existing key as empty" { + r del set1{t} set2{t} set3{t} + + r set setres{t} xxx + assert_equal 0 [r sdiffstore setres{t} foo111{t} bar222{t}] + assert_equal 0 [r exists setres{t}] + + # with a legal dstkey, should delete dstkey + r sadd set3{t} a b c + assert_equal 0 [r sdiffstore set3{t} set1{t} set2{t}] + assert_equal 0 [r exists set3{t}] + + r sadd set1{t} a b c + assert_equal 3 [r sdiffstore set3{t} set1{t} set2{t}] + assert_equal 1 [r exists set3{t}] + assert_equal {a b c} [lsort [r smembers set3{t}]] + + # with a legal dstkey and empty set2, should delete the dstkey + r sadd set3{t} a b c + assert_equal 0 [r sdiffstore set3{t} set2{t} set1{t}] + assert_equal 0 [r exists set3{t}] + } + + test "SINTER against non-set should throw error" { + r set key1{t} x + assert_error "WRONGTYPE*" {r sinter key1{t} noset{t}} + # different order + assert_error "WRONGTYPE*" {r sinter noset{t} key1{t}} + + r sadd set1{t} a b c + assert_error "WRONGTYPE*" {r sinter key1{t} set1{t}} + # different order + assert_error "WRONGTYPE*" {r sinter set1{t} key1{t}} + } + + test "SINTER should handle non existing key as empty" { + r del set1{t} set2{t} set3{t} + r sadd set1{t} a b c + r sadd set2{t} b c d + r sinter set1{t} set2{t} set3{t} + } {} + + test "SINTER with same integer elements but different encoding" { + r del set1{t} set2{t} + r sadd set1{t} 1 2 3 + r sadd set2{t} 1 2 3 a + r srem set2{t} a + assert_encoding intset set1{t} + assert_encoding listpack set2{t} + lsort [r sinter set1{t} set2{t}] + } {1 2 3} + + test "SINTERSTORE against non-set should throw error" { + r del set1{t} set2{t} set3{t} key1{t} + r set key1{t} x + + # with en empty dstkey + assert_error "WRONGTYPE*" {r sinterstore set3{t} key1{t} noset{t}} + assert_equal 0 [r exists set3{t}] + assert_error "WRONGTYPE*" {r sinterstore set3{t} noset{t} key1{t}} + assert_equal 0 [r exists set3{t}] + + # with a legal dstkey + r sadd set1{t} a b c + r sadd set2{t} b c d + r sadd set3{t} e + assert_error "WRONGTYPE*" {r sinterstore set3{t} key1{t} set2{t} noset{t}} + assert_equal 1 [r exists set3{t}] + assert_equal {e} [lsort [r smembers set3{t}]] + + assert_error "WRONGTYPE*" {r sinterstore set3{t} noset{t} key1{t} set2{t}} + assert_equal 1 [r exists set3{t}] + assert_equal {e} [lsort [r smembers set3{t}]] + } + + test "SINTERSTORE against non existing keys should delete dstkey" { + r del set1{t} set2{t} set3{t} + + r set setres{t} xxx + assert_equal 0 [r sinterstore setres{t} foo111{t} bar222{t}] + assert_equal 0 [r exists setres{t}] + + # with a legal dstkey + r sadd set3{t} a b c + assert_equal 0 [r sinterstore set3{t} set1{t} set2{t}] + assert_equal 0 [r exists set3{t}] + + r sadd set1{t} a b c + assert_equal 0 [r sinterstore set3{t} set1{t} set2{t}] + assert_equal 0 [r exists set3{t}] + + assert_equal 0 [r sinterstore set3{t} set2{t} set1{t}] + assert_equal 0 [r exists set3{t}] + } + + test "SUNION against non-set should throw error" { + r set key1{t} x + assert_error "WRONGTYPE*" {r sunion key1{t} noset{t}} + # different order + assert_error "WRONGTYPE*" {r sunion noset{t} key1{t}} + + r del set1{t} + r sadd set1{t} a b c + assert_error "WRONGTYPE*" {r sunion key1{t} set1{t}} + # different order + assert_error "WRONGTYPE*" {r sunion set1{t} key1{t}} + } + + test "SUNION should handle non existing key as empty" { + r del set1{t} set2{t} set3{t} + + r sadd set1{t} a b c + r sadd set2{t} b c d + assert_equal {a b c d} [lsort [r sunion set1{t} set2{t} set3{t}]] + } + + test "SUNIONSTORE against non-set should throw error" { + r del set1{t} set2{t} set3{t} key1{t} + r set key1{t} x + + # with en empty dstkey + assert_error "WRONGTYPE*" {r sunionstore set3{t} key1{t} noset{t}} + assert_equal 0 [r exists set3{t}] + assert_error "WRONGTYPE*" {r sunionstore set3{t} noset{t} key1{t}} + assert_equal 0 [r exists set3{t}] + + # with a legal dstkey + r sadd set1{t} a b c + r sadd set2{t} b c d + r sadd set3{t} e + assert_error "WRONGTYPE*" {r sunionstore set3{t} key1{t} key2{t} noset{t}} + assert_equal 1 [r exists set3{t}] + assert_equal {e} [lsort [r smembers set3{t}]] + + assert_error "WRONGTYPE*" {r sunionstore set3{t} noset{t} key1{t} key2{t}} + assert_equal 1 [r exists set3{t}] + assert_equal {e} [lsort [r smembers set3{t}]] + } + + test "SUNIONSTORE should handle non existing key as empty" { + r del set1{t} set2{t} set3{t} + + r set setres{t} xxx + assert_equal 0 [r sunionstore setres{t} foo111{t} bar222{t}] + assert_equal 0 [r exists setres{t}] + + # set1 set2 both empty, should delete the dstkey + r sadd set3{t} a b c + assert_equal 0 [r sunionstore set3{t} set1{t} set2{t}] + assert_equal 0 [r exists set3{t}] + + r sadd set1{t} a b c + r sadd set3{t} e f + assert_equal 3 [r sunionstore set3{t} set1{t} set2{t}] + assert_equal 1 [r exists set3{t}] + assert_equal {a b c} [lsort [r smembers set3{t}]] + + r sadd set3{t} d + assert_equal 3 [r sunionstore set3{t} set2{t} set1{t}] + assert_equal 1 [r exists set3{t}] + assert_equal {a b c} [lsort [r smembers set3{t}]] + } + + test "SUNIONSTORE against non existing keys should delete dstkey" { + r set setres{t} xxx + assert_equal 0 [r sunionstore setres{t} foo111{t} bar222{t}] + assert_equal 0 [r exists setres{t}] + } + + foreach {type contents} {listpack {a b c} intset {1 2 3}} { + test "SPOP basics - $type" { + create_set myset $contents + assert_encoding $type myset + assert_equal $contents [lsort [list [r spop myset] [r spop myset] [r spop myset]]] + assert_equal 0 [r scard myset] + } + + test "SPOP with =1 - $type" { + create_set myset $contents + assert_encoding $type myset + assert_equal $contents [lsort [list [r spop myset 1] [r spop myset 1] [r spop myset 1]]] + assert_equal 0 [r scard myset] + } + + test "SRANDMEMBER - $type" { + create_set myset $contents + unset -nocomplain myset + array set myset {} + for {set i 0} {$i < 100} {incr i} { + set myset([r srandmember myset]) 1 + } + assert_equal $contents [lsort [array names myset]] + } + } + + test "SPOP integer from listpack set" { + create_set myset {a 1 2 3 4 5 6 7} + assert_encoding listpack myset + set a [r spop myset] + set b [r spop myset] + assert {[string is digit $a] || [string is digit $b]} + } + + foreach {type contents} { + listpack {a b c d e f g h i j k l m n o p q r s t u v w x y z} + intset {1 10 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 26 3 4 5 6 7 8 9} + hashtable {ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 b c d e f g h i j k l m n o p q r s t u v w x y z} + } { + test "SPOP with - $type" { + create_set myset $contents + assert_encoding $type myset + assert_equal $contents [lsort [concat [r spop myset 11] [r spop myset 9] [r spop myset 0] [r spop myset 4] [r spop myset 1] [r spop myset 0] [r spop myset 1] [r spop myset 0]]] + assert_equal 0 [r scard myset] + } + } + + # As seen in intsetRandomMembers + test "SPOP using integers, testing Knuth's and Floyd's algorithm" { + create_set myset {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} + assert_encoding intset myset + assert_equal 20 [r scard myset] + r spop myset 1 + assert_equal 19 [r scard myset] + r spop myset 2 + assert_equal 17 [r scard myset] + r spop myset 3 + assert_equal 14 [r scard myset] + r spop myset 10 + assert_equal 4 [r scard myset] + r spop myset 10 + assert_equal 0 [r scard myset] + r spop myset 1 + assert_equal 0 [r scard myset] + } {} + + test "SPOP using integers with Knuth's algorithm" { + r spop nonexisting_key 100 + } {} + + foreach {type content} { + intset {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} + listpack {a 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} + } { + test "SPOP new implementation: code path #1 $type" { + create_set myset $content + assert_encoding $type myset + set res [r spop myset 30] + assert {[lsort $content] eq [lsort $res]} + assert_equal {0} [r exists myset] + } + + test "SPOP new implementation: code path #2 $type" { + create_set myset $content + assert_encoding $type myset + set res [r spop myset 2] + assert {[llength $res] == 2} + assert {[r scard myset] == 18} + set union [concat [r smembers myset] $res] + assert {[lsort $union] eq [lsort $content]} + } + + test "SPOP new implementation: code path #3 $type" { + create_set myset $content + assert_encoding $type myset + set res [r spop myset 18] + assert {[llength $res] == 18} + assert {[r scard myset] == 2} + set union [concat [r smembers myset] $res] + assert {[lsort $union] eq [lsort $content]} + } + } + + test "SPOP new implementation: code path #1 propagate as DEL or UNLINK" { + r del myset1{t} myset2{t} + r sadd myset1{t} 1 2 3 4 5 + r sadd myset2{t} 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 + + set repl [attach_to_replication_stream] + + r config set lazyfree-lazy-server-del no + r spop myset1{t} [r scard myset1{t}] + r config set lazyfree-lazy-server-del yes + r spop myset2{t} [r scard myset2{t}] + assert_equal {0} [r exists myset1{t} myset2{t}] + + # Verify the propagate of DEL and UNLINK. + assert_replication_stream $repl { + {select *} + {del myset1{t}} + {unlink myset2{t}} + } + + close_replication_stream $repl + } {} {needs:repl} + + test "SRANDMEMBER count of 0 is handled correctly" { + r srandmember myset 0 + } {} + + test "SRANDMEMBER with against non existing key" { + r srandmember nonexisting_key 100 + } {} + + test "SRANDMEMBER count overflow" { + r sadd myset a + assert_error {*value is out of range*} {r srandmember myset -9223372036854775808} + } {} + + # Make sure we can distinguish between an empty array and a null response + r readraw 1 + + test "SRANDMEMBER count of 0 is handled correctly - emptyarray" { + r srandmember myset 0 + } {*0} + + test "SRANDMEMBER with against non existing key - emptyarray" { + r srandmember nonexisting_key 100 + } {*0} + + r readraw 0 + + foreach {type contents} { + listpack { + 1 5 10 50 125 50000 33959417 4775547 65434162 + 12098459 427716 483706 2726473884 72615637475 + MARY PATRICIA LINDA BARBARA ELIZABETH JENNIFER MARIA + SUSAN MARGARET DOROTHY LISA NANCY KAREN BETTY HELEN + SANDRA DONNA CAROL RUTH SHARON MICHELLE LAURA SARAH + KIMBERLY DEBORAH JESSICA SHIRLEY CYNTHIA ANGELA MELISSA + BRENDA AMY ANNA REBECCA VIRGINIA KATHLEEN + } + intset { + 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 + 20 21 22 23 24 25 26 27 28 29 + 30 31 32 33 34 35 36 37 38 39 + 40 41 42 43 44 45 46 47 48 49 + } + hashtable { + ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 + 1 5 10 50 125 50000 33959417 4775547 65434162 + 12098459 427716 483706 2726473884 72615637475 + MARY PATRICIA LINDA BARBARA ELIZABETH JENNIFER MARIA + SUSAN MARGARET DOROTHY LISA NANCY KAREN BETTY HELEN + SANDRA DONNA CAROL RUTH SHARON MICHELLE LAURA SARAH + KIMBERLY DEBORAH JESSICA SHIRLEY CYNTHIA ANGELA MELISSA + BRENDA AMY ANNA REBECCA VIRGINIA + } + } { + test "SRANDMEMBER with - $type" { + create_set myset $contents + assert_encoding $type myset + unset -nocomplain myset + array set myset {} + foreach ele [r smembers myset] { + set myset($ele) 1 + } + assert_equal [lsort $contents] [lsort [array names myset]] + + # Make sure that a count of 0 is handled correctly. + assert_equal [r srandmember myset 0] {} + + # We'll stress different parts of the code, see the implementation + # of SRANDMEMBER for more information, but basically there are + # four different code paths. + # + # PATH 1: Use negative count. + # + # 1) Check that it returns repeated elements. + set res [r srandmember myset -100] + assert_equal [llength $res] 100 + + # 2) Check that all the elements actually belong to the + # original set. + foreach ele $res { + assert {[info exists myset($ele)]} + } + + # 3) Check that eventually all the elements are returned. + unset -nocomplain auxset + set iterations 1000 + while {$iterations != 0} { + incr iterations -1 + set res [r srandmember myset -10] + foreach ele $res { + set auxset($ele) 1 + } + if {[lsort [array names myset]] eq + [lsort [array names auxset]]} { + break; + } + } + assert {$iterations != 0} + + # PATH 2: positive count (unique behavior) with requested size + # equal or greater than set size. + foreach size {50 100} { + set res [r srandmember myset $size] + assert_equal [llength $res] 50 + assert_equal [lsort $res] [lsort [array names myset]] + } + + # PATH 3: Ask almost as elements as there are in the set. + # In this case the implementation will duplicate the original + # set and will remove random elements up to the requested size. + # + # PATH 4: Ask a number of elements definitely smaller than + # the set size. + # + # We can test both the code paths just changing the size but + # using the same code. + + foreach size {45 5} { + set res [r srandmember myset $size] + assert_equal [llength $res] $size + + # 1) Check that all the elements actually belong to the + # original set. + foreach ele $res { + assert {[info exists myset($ele)]} + } + + # 2) Check that eventually all the elements are returned. + unset -nocomplain auxset + set iterations 1000 + while {$iterations != 0} { + incr iterations -1 + set res [r srandmember myset $size] + foreach ele $res { + set auxset($ele) 1 + } + if {[lsort [array names myset]] eq + [lsort [array names auxset]]} { + break; + } + } + assert {$iterations != 0} + } + } + } + + foreach {type contents} { + listpack { + 1 5 10 50 125 + MARY PATRICIA LINDA BARBARA ELIZABETH + } + intset { + 0 1 2 3 4 5 6 7 8 9 + } + hashtable { + ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 + 1 5 10 50 125 + MARY PATRICIA LINDA BARBARA + } + } { + test "SRANDMEMBER histogram distribution - $type" { + create_set myset $contents + assert_encoding $type myset + unset -nocomplain myset + array set myset {} + foreach ele [r smembers myset] { + set myset($ele) 1 + } + + # Use negative count (PATH 1). + # df = 9, 40 means 0.00001 probability + set res [r srandmember myset -1000] + assert_lessthan [chi_square_value $res] 40 + + # Use positive count (both PATH 3 and PATH 4). + foreach size {8 2} { + unset -nocomplain allkey + set iterations [expr {1000 / $size}] + while {$iterations != 0} { + incr iterations -1 + set res [r srandmember myset $size] + foreach ele $res { + lappend allkey $ele + } + } + # df = 9, 40 means 0.00001 probability + assert_lessthan [chi_square_value $allkey] 40 + } + } + } + + proc is_rehashing {myset} { + set htstats [r debug HTSTATS-KEY $myset] + return [string match {*rehashing target*} $htstats] + } + + proc rem_hash_set_top_N {myset n} { + set cursor 0 + set members {} + set enough 0 + while 1 { + set res [r sscan $myset $cursor] + set cursor [lindex $res 0] + set k [lindex $res 1] + foreach m $k { + lappend members $m + if {[llength $members] >= $n} { + set enough 1 + break + } + } + if {$enough || $cursor == 0} { + break + } + } + r srem $myset {*}$members + } + + proc verify_rehashing_completed_key {myset table_size keys} { + set htstats [r debug HTSTATS-KEY $myset] + assert {![string match {*rehashing target*} $htstats]} + return {[string match {*table size: $table_size*number of elements: $keys*} $htstats]} + } + + test "SRANDMEMBER with a dict containing long chain" { + set origin_save [config_get_set save ""] + set origin_max_lp [config_get_set set-max-listpack-entries 0] + set origin_save_delay [config_get_set rdb-key-save-delay 2147483647] + + # 1) Create a hash set with 100000 members. + set members {} + for {set i 0} {$i < 100000} {incr i} { + lappend members [format "m:%d" $i] + } + create_set myset $members + + # 2) Wait for the hash set rehashing to finish. + while {[is_rehashing myset]} { + r srandmember myset 100 + } + + # 3) Turn off the rehashing of this set, and remove the members to 500. + r bgsave + rem_hash_set_top_N myset [expr {[r scard myset] - 500}] + assert_equal [r scard myset] 500 + + # 4) Kill RDB child process to restart rehashing. + set pid1 [get_child_pid 0] + catch {exec kill -9 $pid1} + waitForBgsave r + + # 5) Let the set hash to start rehashing + r spop myset 1 + assert [is_rehashing myset] + + # 6) Verify that when rdb saving is in progress, rehashing will still be performed (because + # the ratio is extreme) by waiting for it to finish during an active bgsave. + r bgsave + + while {[is_rehashing myset]} { + r srandmember myset 1 + } + if {$::verbose} { + puts [r debug HTSTATS-KEY myset full] + } + + set pid1 [get_child_pid 0] + catch {exec kill -9 $pid1} + waitForBgsave r + + # 7) Check that eventually, SRANDMEMBER returns all elements. + array set allmyset {} + foreach ele [r smembers myset] { + set allmyset($ele) 1 + } + unset -nocomplain auxset + set iterations 1000 + while {$iterations != 0} { + incr iterations -1 + set res [r srandmember myset -10] + foreach ele $res { + set auxset($ele) 1 + } + if {[lsort [array names allmyset]] eq + [lsort [array names auxset]]} { + break; + } + } + assert {$iterations != 0} + + # 8) Remove the members to 30 in order to calculate the value of Chi-Square Distribution, + # otherwise we would need more iterations. + rem_hash_set_top_N myset [expr {[r scard myset] - 30}] + assert_equal [r scard myset] 30 + + # Hash set rehashing would be completed while removing members from the `myset` + # We also check the size and members in the hash table. + verify_rehashing_completed_key myset 64 30 + + # Now that we have a hash set with only one long chain bucket. + set htstats [r debug HTSTATS-KEY myset full] + assert {[regexp {different slots: ([0-9]+)} $htstats - different_slots]} + assert {[regexp {max chain length: ([0-9]+)} $htstats - max_chain_length]} + assert {$different_slots == 1 && $max_chain_length == 30} + + # 9) Use positive count (PATH 4) to get 10 elements (out of 30) each time. + unset -nocomplain allkey + set iterations 1000 + while {$iterations != 0} { + incr iterations -1 + set res [r srandmember myset 10] + foreach ele $res { + lappend allkey $ele + } + } + # validate even distribution of random sampling (df = 29, 73 means 0.00001 probability) + assert_lessthan [chi_square_value $allkey] 73 + + r config set save $origin_save + r config set set-max-listpack-entries $origin_max_lp + r config set rdb-key-save-delay $origin_save_delay + } {OK} {needs:debug slow} + + proc setup_move {} { + r del myset3{t} myset4{t} + create_set myset1{t} {1 a b} + create_set myset2{t} {2 3 4} + assert_encoding listpack myset1{t} + assert_encoding intset myset2{t} + } + + test "SMOVE basics - from regular set to intset" { + # move a non-integer element to an intset should convert encoding + setup_move + assert_equal 1 [r smove myset1{t} myset2{t} a] + assert_equal {1 b} [lsort [r smembers myset1{t}]] + assert_equal {2 3 4 a} [lsort [r smembers myset2{t}]] + assert_encoding listpack myset2{t} + + # move an integer element should not convert the encoding + setup_move + assert_equal 1 [r smove myset1{t} myset2{t} 1] + assert_equal {a b} [lsort [r smembers myset1{t}]] + assert_equal {1 2 3 4} [lsort [r smembers myset2{t}]] + assert_encoding intset myset2{t} + } + + test "SMOVE basics - from intset to regular set" { + setup_move + assert_equal 1 [r smove myset2{t} myset1{t} 2] + assert_equal {1 2 a b} [lsort [r smembers myset1{t}]] + assert_equal {3 4} [lsort [r smembers myset2{t}]] + } + + test "SMOVE non existing key" { + setup_move + assert_equal 0 [r smove myset1{t} myset2{t} foo] + assert_equal 0 [r smove myset1{t} myset1{t} foo] + assert_equal {1 a b} [lsort [r smembers myset1{t}]] + assert_equal {2 3 4} [lsort [r smembers myset2{t}]] + } + + test "SMOVE non existing src set" { + setup_move + assert_equal 0 [r smove noset{t} myset2{t} foo] + assert_equal {2 3 4} [lsort [r smembers myset2{t}]] + } + + test "SMOVE from regular set to non existing destination set" { + setup_move + assert_equal 1 [r smove myset1{t} myset3{t} a] + assert_equal {1 b} [lsort [r smembers myset1{t}]] + assert_equal {a} [lsort [r smembers myset3{t}]] + assert_encoding listpack myset3{t} + } + + test "SMOVE from intset to non existing destination set" { + setup_move + assert_equal 1 [r smove myset2{t} myset3{t} 2] + assert_equal {3 4} [lsort [r smembers myset2{t}]] + assert_equal {2} [lsort [r smembers myset3{t}]] + assert_encoding intset myset3{t} + } + + test "SMOVE wrong src key type" { + r set x{t} 10 + assert_error "WRONGTYPE*" {r smove x{t} myset2{t} foo} + } + + test "SMOVE wrong dst key type" { + r set x{t} 10 + assert_error "WRONGTYPE*" {r smove myset2{t} x{t} foo} + } + + test "SMOVE with identical source and destination" { + r del set{t} + r sadd set{t} a b c + r smove set{t} set{t} b + lsort [r smembers set{t}] + } {a b c} + + test "SMOVE only notify dstset when the addition is successful" { + r del srcset{t} + r del dstset{t} + + r sadd srcset{t} a b + r sadd dstset{t} a + + r watch dstset{t} + + r multi + r sadd dstset{t} c + + set r2 [redis_client] + $r2 smove srcset{t} dstset{t} a + + # The dstset is actually unchanged, multi should success + r exec + set res [r scard dstset{t}] + assert_equal $res 2 + $r2 close + } + + tags {slow} { + test {intsets implementation stress testing} { + for {set j 0} {$j < 20} {incr j} { + unset -nocomplain s + array set s {} + r del s + set len [randomInt 1024] + for {set i 0} {$i < $len} {incr i} { + randpath { + set data [randomInt 65536] + } { + set data [randomInt 4294967296] + } { + set data [randomInt 18446744073709551616] + } + set s($data) {} + r sadd s $data + } + assert_equal [lsort [r smembers s]] [lsort [array names s]] + set len [array size s] + for {set i 0} {$i < $len} {incr i} { + set e [r spop s] + if {![info exists s($e)]} { + puts "Can't find '$e' on local array" + puts "Local array: [lsort [r smembers s]]" + puts "Remote array: [lsort [array names s]]" + error "exception" + } + array unset s $e + } + assert_equal [r scard s] 0 + assert_equal [array size s] 0 + } + } + } +} + +run_solo {set-large-memory} { +start_server [list overrides [list save ""] ] { + +# test if the server supports such large configs (avoid 32 bit builds) +catch { + r config set proto-max-bulk-len 10000000000 ;#10gb + r config set client-query-buffer-limit 10000000000 ;#10gb +} +if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} { + + set str_length 4400000000 ;#~4.4GB + + test {SADD, SCARD, SISMEMBER - large data} { + r flushdb + r write "*3\r\n\$4\r\nSADD\r\n\$5\r\nmyset\r\n" + assert_equal 1 [write_big_bulk $str_length "aaa"] + r write "*3\r\n\$4\r\nSADD\r\n\$5\r\nmyset\r\n" + assert_equal 1 [write_big_bulk $str_length "bbb"] + r write "*3\r\n\$4\r\nSADD\r\n\$5\r\nmyset\r\n" + assert_equal 0 [write_big_bulk $str_length "aaa"] + assert_encoding hashtable myset + set s0 [s used_memory] + assert {$s0 > [expr $str_length * 2]} + assert_equal 2 [r scard myset] + + r write "*3\r\n\$9\r\nSISMEMBER\r\n\$5\r\nmyset\r\n" + assert_equal 1 [write_big_bulk $str_length "aaa"] + r write "*3\r\n\$9\r\nSISMEMBER\r\n\$5\r\nmyset\r\n" + assert_equal 0 [write_big_bulk $str_length "ccc"] + r write "*3\r\n\$4\r\nSREM\r\n\$5\r\nmyset\r\n" + assert_equal 1 [write_big_bulk $str_length "bbb"] + assert_equal [read_big_bulk {r spop myset} yes "aaa"] $str_length + } {} {large-memory} + + # restore defaults + r config set proto-max-bulk-len 536870912 + r config set client-query-buffer-limit 1073741824 + +} ;# skip 32bit builds +} +} ;# run_solo \ No newline at end of file diff --git a/tests/unit/type/string.tcl b/tests/unit/type/string.tcl new file mode 100644 index 000000000..49615e8aa --- /dev/null +++ b/tests/unit/type/string.tcl @@ -0,0 +1,674 @@ +start_server {tags {"string"}} { + test {SET and GET an item} { + r set x foobar + r get x + } {foobar} + + test {SET and GET an empty item} { + r set x {} + r get x + } {} + + test {Very big payload in GET/SET} { + set buf [string repeat "abcd" 1000000] + r set foo $buf + r get foo + } [string repeat "abcd" 1000000] + + tags {"slow"} { + test {Very big payload random access} { + set err {} + array set payload {} + for {set j 0} {$j < 100} {incr j} { + set size [expr 1+[randomInt 100000]] + set buf [string repeat "pl-$j" $size] + set payload($j) $buf + r set bigpayload_$j $buf + } + for {set j 0} {$j < 1000} {incr j} { + set index [randomInt 100] + set buf [r get bigpayload_$index] + if {$buf != $payload($index)} { + set err "Values differ: I set '$payload($index)' but I read back '$buf'" + break + } + } + unset payload + set _ $err + } {} + + test {SET 10000 numeric keys and access all them in reverse order} { + r flushdb + set err {} + for {set x 0} {$x < 10000} {incr x} { + r set $x $x + } + set sum 0 + for {set x 9999} {$x >= 0} {incr x -1} { + set val [r get $x] + if {$val ne $x} { + set err "Element at position $x is $val instead of $x" + break + } + } + set _ $err + } {} + + test {DBSIZE should be 10000 now} { + r dbsize + } {10000} + } + + test "SETNX target key missing" { + r del novar + assert_equal 1 [r setnx novar foobared] + assert_equal "foobared" [r get novar] + } + + test "SETNX target key exists" { + r set novar foobared + assert_equal 0 [r setnx novar blabla] + assert_equal "foobared" [r get novar] + } + + test "SETNX against not-expired volatile key" { + r set x 10 + r expire x 10000 + assert_equal 0 [r setnx x 20] + assert_equal 10 [r get x] + } + + test "SETNX against expired volatile key" { + # Make it very unlikely for the key this test uses to be expired by the + # active expiry cycle. This is tightly coupled to the implementation of + # active expiry and dbAdd() but currently the only way to test that + # SETNX expires a key when it should have been. + for {set x 0} {$x < 9999} {incr x} { + r setex key-$x 3600 value + } + + # This will be one of 10000 expiring keys. A cycle is executed every + # 100ms, sampling 10 keys for being expired or not. This key will be + # expired for at most 1s when we wait 2s, resulting in a total sample + # of 100 keys. The probability of the success of this test being a + # false positive is therefore approx. 1%. + r set x 10 + r expire x 1 + + # Wait for the key to expire + after 2000 + + assert_equal 1 [r setnx x 20] + assert_equal 20 [r get x] + } + + test "GETEX EX option" { + r del foo + r set foo bar + r getex foo ex 10 + assert_range [r ttl foo] 5 10 + } + + test "GETEX PX option" { + r del foo + r set foo bar + r getex foo px 10000 + assert_range [r pttl foo] 5000 10000 + } + + test "GETEX EXAT option" { + r del foo + r set foo bar + r getex foo exat [expr [clock seconds] + 10] + assert_range [r ttl foo] 5 10 + } + + test "GETEX PXAT option" { + r del foo + r set foo bar + r getex foo pxat [expr [clock milliseconds] + 10000] + assert_range [r pttl foo] 5000 10000 + } + + test "GETEX PERSIST option" { + r del foo + r set foo bar ex 10 + assert_range [r ttl foo] 5 10 + r getex foo persist + assert_equal -1 [r ttl foo] + } + + test "GETEX no option" { + r del foo + r set foo bar + r getex foo + assert_equal bar [r getex foo] + } + + test "GETEX syntax errors" { + set ex {} + catch {r getex foo non-existent-option} ex + set ex + } {*syntax*} + + test "GETEX and GET expired key or not exist" { + r del foo + r set foo bar px 1 + after 2 + assert_equal {} [r getex foo] + assert_equal {} [r get foo] + } + + test "GETEX no arguments" { + set ex {} + catch {r getex} ex + set ex + } {*wrong number of arguments for 'getex' command} + + test "GETDEL command" { + r del foo + r set foo bar + assert_equal bar [r getdel foo ] + assert_equal {} [r getdel foo ] + } + + test {GETDEL propagate as DEL command to replica} { + set repl [attach_to_replication_stream] + r set foo bar + r getdel foo + assert_replication_stream $repl { + {select *} + {set foo bar} + {del foo} + } + close_replication_stream $repl + } {} {needs:repl} + + test {GETEX without argument does not propagate to replica} { + set repl [attach_to_replication_stream] + r set foo bar + r getex foo + r del foo + assert_replication_stream $repl { + {select *} + {set foo bar} + {del foo} + } + close_replication_stream $repl + } {} {needs:repl} + + test {MGET} { + r flushdb + r set foo{t} BAR + r set bar{t} FOO + r mget foo{t} bar{t} + } {BAR FOO} + + test {MGET against non existing key} { + r mget foo{t} baazz{t} bar{t} + } {BAR {} FOO} + + test {MGET against non-string key} { + r sadd myset{t} ciao + r sadd myset{t} bau + r mget foo{t} baazz{t} bar{t} myset{t} + } {BAR {} FOO {}} + + test {GETSET (set new value)} { + r del foo + list [r getset foo xyz] [r get foo] + } {{} xyz} + + test {GETSET (replace old value)} { + r set foo bar + list [r getset foo xyz] [r get foo] + } {bar xyz} + + test {MSET base case} { + r mset x{t} 10 y{t} "foo bar" z{t} "x x x x x x x\n\n\r\n" + r mget x{t} y{t} z{t} + } [list 10 {foo bar} "x x x x x x x\n\n\r\n"] + + test {MSET/MSETNX wrong number of args} { + assert_error {*wrong number of arguments for 'mset' command} {r mset x{t} 10 y{t} "foo bar" z{t}} + assert_error {*wrong number of arguments for 'msetnx' command} {r msetnx x{t} 20 y{t} "foo bar" z{t}} + } + + test {MSET with already existing - same key twice} { + r set x{t} x + list [r mset x{t} xxx x{t} yyy] [r get x{t}] + } {OK yyy} + + test {MSETNX with already existent key} { + list [r msetnx x1{t} xxx y2{t} yyy x{t} 20] [r exists x1{t}] [r exists y2{t}] + } {0 0 0} + + test {MSETNX with not existing keys} { + list [r msetnx x1{t} xxx y2{t} yyy] [r get x1{t}] [r get y2{t}] + } {1 xxx yyy} + + test {MSETNX with not existing keys - same key twice} { + r del x1{t} + list [r msetnx x1{t} xxx x1{t} yyy] [r get x1{t}] + } {1 yyy} + + test {MSETNX with already existing keys - same key twice} { + list [r msetnx x1{t} xxx x1{t} zzz] [r get x1{t}] + } {0 yyy} + + test "STRLEN against non-existing key" { + assert_equal 0 [r strlen notakey] + } + + test "STRLEN against integer-encoded value" { + r set myinteger -555 + assert_equal 4 [r strlen myinteger] + } + + test "STRLEN against plain string" { + r set mystring "foozzz0123456789 baz" + assert_equal 20 [r strlen mystring] + } + + test "SETBIT against non-existing key" { + r del mykey + assert_equal 0 [r setbit mykey 1 1] + assert_equal [binary format B* 01000000] [r get mykey] + } + + test "SETBIT against string-encoded key" { + # Ascii "@" is integer 64 = 01 00 00 00 + r set mykey "@" + + assert_equal 0 [r setbit mykey 2 1] + assert_equal [binary format B* 01100000] [r get mykey] + assert_equal 1 [r setbit mykey 1 0] + assert_equal [binary format B* 00100000] [r get mykey] + } + + test "SETBIT against integer-encoded key" { + # Ascii "1" is integer 49 = 00 11 00 01 + r set mykey 1 + assert_encoding int mykey + + assert_equal 0 [r setbit mykey 6 1] + assert_equal [binary format B* 00110011] [r get mykey] + assert_equal 1 [r setbit mykey 2 0] + assert_equal [binary format B* 00010011] [r get mykey] + } + + test "SETBIT against key with wrong type" { + r del mykey + r lpush mykey "foo" + assert_error "WRONGTYPE*" {r setbit mykey 0 1} + } + + test "SETBIT with out of range bit offset" { + r del mykey + assert_error "*out of range*" {r setbit mykey [expr 4*1024*1024*1024] 1} + assert_error "*out of range*" {r setbit mykey -1 1} + } + + test "SETBIT with non-bit argument" { + r del mykey + assert_error "*out of range*" {r setbit mykey 0 -1} + assert_error "*out of range*" {r setbit mykey 0 2} + assert_error "*out of range*" {r setbit mykey 0 10} + assert_error "*out of range*" {r setbit mykey 0 20} + } + + test "SETBIT fuzzing" { + set str "" + set len [expr 256*8] + r del mykey + + for {set i 0} {$i < 2000} {incr i} { + set bitnum [randomInt $len] + set bitval [randomInt 2] + set fmt [format "%%-%ds%%d%%-s" $bitnum] + set head [string range $str 0 $bitnum-1] + set tail [string range $str $bitnum+1 end] + set str [string map {" " 0} [format $fmt $head $bitval $tail]] + + r setbit mykey $bitnum $bitval + assert_equal [binary format B* $str] [r get mykey] + } + } + + test "GETBIT against non-existing key" { + r del mykey + assert_equal 0 [r getbit mykey 0] + } + + test "GETBIT against string-encoded key" { + # Single byte with 2nd and 3rd bit set + r set mykey "`" + + # In-range + assert_equal 0 [r getbit mykey 0] + assert_equal 1 [r getbit mykey 1] + assert_equal 1 [r getbit mykey 2] + assert_equal 0 [r getbit mykey 3] + + # Out-range + assert_equal 0 [r getbit mykey 8] + assert_equal 0 [r getbit mykey 100] + assert_equal 0 [r getbit mykey 10000] + } + + test "GETBIT against integer-encoded key" { + r set mykey 1 + assert_encoding int mykey + + # Ascii "1" is integer 49 = 00 11 00 01 + assert_equal 0 [r getbit mykey 0] + assert_equal 0 [r getbit mykey 1] + assert_equal 1 [r getbit mykey 2] + assert_equal 1 [r getbit mykey 3] + + # Out-range + assert_equal 0 [r getbit mykey 8] + assert_equal 0 [r getbit mykey 100] + assert_equal 0 [r getbit mykey 10000] + } + + test "SETRANGE against non-existing key" { + r del mykey + assert_equal 3 [r setrange mykey 0 foo] + assert_equal "foo" [r get mykey] + + r del mykey + assert_equal 0 [r setrange mykey 0 ""] + assert_equal 0 [r exists mykey] + + r del mykey + assert_equal 4 [r setrange mykey 1 foo] + assert_equal "\000foo" [r get mykey] + } + + test "SETRANGE against string-encoded key" { + r set mykey "foo" + assert_equal 3 [r setrange mykey 0 b] + assert_equal "boo" [r get mykey] + + r set mykey "foo" + assert_equal 3 [r setrange mykey 0 ""] + assert_equal "foo" [r get mykey] + + r set mykey "foo" + assert_equal 3 [r setrange mykey 1 b] + assert_equal "fbo" [r get mykey] + + r set mykey "foo" + assert_equal 7 [r setrange mykey 4 bar] + assert_equal "foo\000bar" [r get mykey] + } + + test "SETRANGE against integer-encoded key" { + r set mykey 1234 + assert_encoding int mykey + assert_equal 4 [r setrange mykey 0 2] + assert_encoding raw mykey + assert_equal 2234 [r get mykey] + + # Shouldn't change encoding when nothing is set + r set mykey 1234 + assert_encoding int mykey + assert_equal 4 [r setrange mykey 0 ""] + assert_encoding int mykey + assert_equal 1234 [r get mykey] + + r set mykey 1234 + assert_encoding int mykey + assert_equal 4 [r setrange mykey 1 3] + assert_encoding raw mykey + assert_equal 1334 [r get mykey] + + r set mykey 1234 + assert_encoding int mykey + assert_equal 6 [r setrange mykey 5 2] + assert_encoding raw mykey + assert_equal "1234\0002" [r get mykey] + } + + test "SETRANGE against key with wrong type" { + r del mykey + r lpush mykey "foo" + assert_error "WRONGTYPE*" {r setrange mykey 0 bar} + } + + test "SETRANGE with out of range offset" { + r del mykey + assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world} + + r set mykey "hello" + assert_error "*out of range*" {r setrange mykey -1 world} + assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world} + } + + test "GETRANGE against non-existing key" { + r del mykey + assert_equal "" [r getrange mykey 0 -1] + } + + test "GETRANGE against wrong key type" { + r lpush lkey1 "list" + assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} {r getrange lkey1 0 -1} + } + + test "GETRANGE against string value" { + r set mykey "Hello World" + assert_equal "Hell" [r getrange mykey 0 3] + assert_equal "Hello World" [r getrange mykey 0 -1] + assert_equal "orld" [r getrange mykey -4 -1] + assert_equal "" [r getrange mykey 5 3] + assert_equal " World" [r getrange mykey 5 5000] + assert_equal "Hello World" [r getrange mykey -5000 10000] + } + + test "GETRANGE against integer-encoded value" { + r set mykey 1234 + assert_equal "123" [r getrange mykey 0 2] + assert_equal "1234" [r getrange mykey 0 -1] + assert_equal "234" [r getrange mykey -3 -1] + assert_equal "" [r getrange mykey 5 3] + assert_equal "4" [r getrange mykey 3 5000] + assert_equal "1234" [r getrange mykey -5000 10000] + } + + test "GETRANGE fuzzing" { + for {set i 0} {$i < 1000} {incr i} { + r set bin [set bin [randstring 0 1024 binary]] + set _start [set start [randomInt 1500]] + set _end [set end [randomInt 1500]] + if {$_start < 0} {set _start "end-[abs($_start)-1]"} + if {$_end < 0} {set _end "end-[abs($_end)-1]"} + assert_equal [string range $bin $_start $_end] [r getrange bin $start $end] + } + } + + test "Coverage: SUBSTR" { + r set key abcde + assert_equal "a" [r substr key 0 0] + assert_equal "abcd" [r substr key 0 3] + assert_equal "bcde" [r substr key -4 -1] + assert_equal "" [r substr key -1 -3] + assert_equal "" [r substr key 7 8] + assert_equal "" [r substr nokey 0 1] + } + +if {[string match {*jemalloc*} [s mem_allocator]]} { + test {trim on SET with big value} { + # set a big value to trigger increasing the query buf + r set key [string repeat A 100000] + # set a smaller value but > PROTO_MBULK_BIG_ARG (32*1024) Redis will try to save the query buf itself on the DB. + r set key [string repeat A 33000] + # asset the value was trimmed + assert {[r memory usage key] < 42000}; # 42K to count for Jemalloc's additional memory overhead. + } +} ;# if jemalloc + + test {Extended SET can detect syntax errors} { + set e {} + catch {r set foo bar non-existing-option} e + set e + } {*syntax*} + + test {Extended SET NX option} { + r del foo + set v1 [r set foo 1 nx] + set v2 [r set foo 2 nx] + list $v1 $v2 [r get foo] + } {OK {} 1} + + test {Extended SET XX option} { + r del foo + set v1 [r set foo 1 xx] + r set foo bar + set v2 [r set foo 2 xx] + list $v1 $v2 [r get foo] + } {{} OK 2} + + test {Extended SET GET option} { + r del foo + r set foo bar + set old_value [r set foo bar2 GET] + set new_value [r get foo] + list $old_value $new_value + } {bar bar2} + + test {Extended SET GET option with no previous value} { + r del foo + set old_value [r set foo bar GET] + set new_value [r get foo] + list $old_value $new_value + } {{} bar} + + test {Extended SET GET option with XX} { + r del foo + r set foo bar + set old_value [r set foo baz GET XX] + set new_value [r get foo] + list $old_value $new_value + } {bar baz} + + test {Extended SET GET option with XX and no previous value} { + r del foo + set old_value [r set foo bar GET XX] + set new_value [r get foo] + list $old_value $new_value + } {{} {}} + + test {Extended SET GET option with NX} { + r del foo + set old_value [r set foo bar GET NX] + set new_value [r get foo] + list $old_value $new_value + } {{} bar} + + test {Extended SET GET option with NX and previous value} { + r del foo + r set foo bar + set old_value [r set foo baz GET NX] + set new_value [r get foo] + list $old_value $new_value + } {bar bar} + + test {Extended SET GET with incorrect type should result in wrong type error} { + r del foo + r rpush foo waffle + catch {r set foo bar GET} err1 + assert_equal "waffle" [r rpop foo] + set err1 + } {*WRONGTYPE*} + + test {Extended SET EX option} { + r del foo + r set foo bar ex 10 + set ttl [r ttl foo] + assert {$ttl <= 10 && $ttl > 5} + } + + test {Extended SET PX option} { + r del foo + r set foo bar px 10000 + set ttl [r ttl foo] + assert {$ttl <= 10 && $ttl > 5} + } + + test "Extended SET EXAT option" { + r del foo + r set foo bar exat [expr [clock seconds] + 10] + assert_range [r ttl foo] 5 10 + } + + test "Extended SET PXAT option" { + r del foo + r set foo bar pxat [expr [clock milliseconds] + 10000] + assert_range [r ttl foo] 5 10 + } + test {Extended SET using multiple options at once} { + r set foo val + assert {[r set foo bar xx px 10000] eq {OK}} + set ttl [r ttl foo] + assert {$ttl <= 10 && $ttl > 5} + } + + test {GETRANGE with huge ranges, Github issue #1844} { + r set foo bar + r getrange foo 0 4294967297 + } {bar} + + set rna1 {CACCTTCCCAGGTAACAAACCAACCAACTTTCGATCTCTTGTAGATCTGTTCTCTAAACGAACTTTAAAATCTGTGTGGCTGTCACTCGGCTGCATGCTTAGTGCACTCACGCAGTATAATTAATAACTAATTACTGTCGTTGACAGGACACGAGTAACTCGTCTATCTTCTGCAGGCTGCTTACGGTTTCGTCCGTGTTGCAGCCGATCATCAGCACATCTAGGTTTCGTCCGGGTGTG} + set rna2 {ATTAAAGGTTTATACCTTCCCAGGTAACAAACCAACCAACTTTCGATCTCTTGTAGATCTGTTCTCTAAACGAACTTTAAAATCTGTGTGGCTGTCACTCGGCTGCATGCTTAGTGCACTCACGCAGTATAATTAATAACTAATTACTGTCGTTGACAGGACACGAGTAACTCGTCTATCTTCTGCAGGCTGCTTACGGTTTCGTCCGTGTTGCAGCCGATCATCAGCACATCTAGGTTT} + set rnalcs {ACCTTCCCAGGTAACAAACCAACCAACTTTCGATCTCTTGTAGATCTGTTCTCTAAACGAACTTTAAAATCTGTGTGGCTGTCACTCGGCTGCATGCTTAGTGCACTCACGCAGTATAATTAATAACTAATTACTGTCGTTGACAGGACACGAGTAACTCGTCTATCTTCTGCAGGCTGCTTACGGTTTCGTCCGTGTTGCAGCCGATCATCAGCACATCTAGGTTT} + + test {LCS basic} { + r set virus1{t} $rna1 + r set virus2{t} $rna2 + r LCS virus1{t} virus2{t} + } $rnalcs + + test {LCS len} { + r set virus1{t} $rna1 + r set virus2{t} $rna2 + r LCS virus1{t} virus2{t} LEN + } [string length $rnalcs] + + test {LCS indexes} { + dict get [r LCS virus1{t} virus2{t} IDX] matches + } {{{238 238} {239 239}} {{236 236} {238 238}} {{229 230} {236 237}} {{224 224} {235 235}} {{1 222} {13 234}}} + + test {LCS indexes with match len} { + dict get [r LCS virus1{t} virus2{t} IDX WITHMATCHLEN] matches + } {{{238 238} {239 239} 1} {{236 236} {238 238} 1} {{229 230} {236 237} 2} {{224 224} {235 235} 1} {{1 222} {13 234} 222}} + + test {LCS indexes with match len and minimum match len} { + dict get [r LCS virus1{t} virus2{t} IDX WITHMATCHLEN MINMATCHLEN 5] matches + } {{{1 222} {13 234} 222}} + + test {SETRANGE with huge offset} { + foreach value {9223372036854775807 2147483647} { + catch {[r setrange K $value A]} res + # expecting a different error on 32 and 64 bit systems + if {![string match "*string exceeds maximum allowed size*" $res] && ![string match "*out of range*" $res]} { + assert_equal $res "expecting an error" + } + } + } + + test {APPEND modifies the encoding from int to raw} { + r del foo + r set foo 1 + assert_encoding "int" foo + r append foo 2 + + set res {} + lappend res [r get foo] + assert_encoding "raw" foo + + r set bar 12 + assert_encoding "int" bar + lappend res [r get bar] + } {12 12} +} \ No newline at end of file diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl new file mode 100644 index 000000000..f23dfeb65 --- /dev/null +++ b/tests/unit/type/zset.tcl @@ -0,0 +1,2735 @@ +start_server {tags {"zset"}} { + proc create_zset {key items} { + r del $key + foreach {score entry} $items { + r zadd $key $score $entry + } + } + + # A helper function to verify either ZPOP* or ZMPOP* response. + proc verify_pop_response {pop res zpop_expected_response zmpop_expected_response} { + if {[string match "*ZM*" $pop]} { + assert_equal $res $zmpop_expected_response + } else { + assert_equal $res $zpop_expected_response + } + } + + # A helper function to verify either ZPOP* or ZMPOP* response when given one input key. + proc verify_zpop_response {rd pop key count zpop_expected_response zmpop_expected_response} { + if {[string match "ZM*" $pop]} { + lassign [split $pop "_"] pop where + + if {$count == 0} { + set res [$rd $pop 1 $key $where] + } else { + set res [$rd $pop 1 $key $where COUNT $count] + } + } else { + if {$count == 0} { + set res [$rd $pop $key] + } else { + set res [$rd $pop $key $count] + } + } + verify_pop_response $pop $res $zpop_expected_response $zmpop_expected_response + } + + # A helper function to verify either BZPOP* or BZMPOP* response when given one input key. + proc verify_bzpop_response {rd pop key timeout count bzpop_expected_response bzmpop_expected_response} { + if {[string match "BZM*" $pop]} { + lassign [split $pop "_"] pop where + + if {$count == 0} { + $rd $pop $timeout 1 $key $where + } else { + $rd $pop $timeout 1 $key $where COUNT $count + } + } else { + $rd $pop $key $timeout + } + verify_pop_response $pop [$rd read] $bzpop_expected_response $bzmpop_expected_response + } + + # A helper function to verify either ZPOP* or ZMPOP* response when given two input keys. + proc verify_bzpop_two_key_response {rd pop key key2 timeout count bzpop_expected_response bzmpop_expected_response} { + if {[string match "BZM*" $pop]} { + lassign [split $pop "_"] pop where + + if {$count == 0} { + $rd $pop $timeout 2 $key $key2 $where + } else { + $rd $pop $timeout 2 $key $key2 $where COUNT $count + } + } else { + $rd $pop $key $key2 $timeout + } + verify_pop_response $pop [$rd read] $bzpop_expected_response $bzmpop_expected_response + } + + # A helper function to execute either BZPOP* or BZMPOP* with one input key. + proc bzpop_command {rd pop key timeout} { + if {[string match "BZM*" $pop]} { + lassign [split $pop "_"] pop where + $rd $pop $timeout 1 $key $where COUNT 1 + } else { + $rd $pop $key $timeout + } + } + + # A helper function to verify nil response in readraw base on RESP version. + proc verify_nil_response {resp nil_response} { + if {$resp == 2} { + assert_equal $nil_response {*-1} + } elseif {$resp == 3} { + assert_equal $nil_response {_} + } + } + + # A helper function to verify zset score response in readraw base on RESP version. + proc verify_score_response {rd resp score} { + if {$resp == 2} { + assert_equal [$rd read] {$1} + assert_equal [$rd read] $score + } elseif {$resp == 3} { + assert_equal [$rd read] ",$score" + } + } + + proc basics {encoding} { +# set original_max_entries [lindex [r config get zset-max-ziplist-entries] 1] +# set original_max_value [lindex [r config get zset-max-ziplist-value] 1] +# if {$encoding == "listpack"} { +# r config set zset-max-ziplist-entries 128 +# r config set zset-max-ziplist-value 64 +# } elseif {$encoding == "skiplist"} { +# r config set zset-max-ziplist-entries 0 +# r config set zset-max-ziplist-value 0 +# } else { +# puts "Unknown sorted set encoding" +# exit +# } + + test "Check encoding - $encoding" { + r del ztmp + r zadd ztmp 10 x + #assert_encoding $encoding ztmp + } + + test "ZSET basic ZADD and score update - $encoding" { + r del ztmp + r zadd ztmp 10 x + r zadd ztmp 20 y + r zadd ztmp 30 z + assert_equal {x y z} [r zrange ztmp 0 -1] + + r zadd ztmp 1 y + assert_equal {y x z} [r zrange ztmp 0 -1] + } + + test "ZSET element can't be set to NaN with ZADD - $encoding" { + assert_error "*not*float*" {r zadd myzset nan abc} + } + + test "ZSET element can't be set to NaN with ZINCRBY - $encoding" { + assert_error "*not*float*" {r zincrby myzset nan abc} + } + + test "ZADD with options syntax error with incomplete pair - $encoding" { + r del ztmp + catch {r zadd ztmp xx 10 x 20} err + set err + } {ERR*} + + test "ZADD XX option without key - $encoding" { + r del ztmp + assert {[r zadd ztmp xx 10 x] == 0} + assert {[r type ztmp] eq {none}} + } + + test "ZADD XX existing key - $encoding" { + r del ztmp + r zadd ztmp 10 x + assert {[r zadd ztmp xx 20 y] == 0} + assert {[r zcard ztmp] == 1} + } + + test "ZADD XX returns the number of elements actually added - $encoding" { + r del ztmp + r zadd ztmp 10 x + set retval [r zadd ztmp 10 x 20 y 30 z] + assert {$retval == 2} + } + + test "ZADD XX updates existing elements score - $encoding" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + r zadd ztmp xx 5 foo 11 x 21 y 40 zap + assert {[r zcard ztmp] == 3} + assert {[r zscore ztmp x] == 11} + assert {[r zscore ztmp y] == 21} + } + + test "ZADD GT updates existing elements when new scores are greater - $encoding" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + assert {[r zadd ztmp gt ch 5 foo 11 x 21 y 29 z] == 3} + assert {[r zcard ztmp] == 4} + assert {[r zscore ztmp x] == 11} + assert {[r zscore ztmp y] == 21} + assert {[r zscore ztmp z] == 30} + } + + test "ZADD LT updates existing elements when new scores are lower - $encoding" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + assert {[r zadd ztmp lt ch 5 foo 11 x 21 y 29 z] == 2} + assert {[r zcard ztmp] == 4} + assert {[r zscore ztmp x] == 10} + assert {[r zscore ztmp y] == 20} + assert {[r zscore ztmp z] == 29} + } + + test "ZADD GT XX updates existing elements when new scores are greater and skips new elements - $encoding" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + assert {[r zadd ztmp gt xx ch 5 foo 11 x 21 y 29 z] == 2} + assert {[r zcard ztmp] == 3} + assert {[r zscore ztmp x] == 11} + assert {[r zscore ztmp y] == 21} + assert {[r zscore ztmp z] == 30} + } + + test "ZADD LT XX updates existing elements when new scores are lower and skips new elements - $encoding" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + assert {[r zadd ztmp lt xx ch 5 foo 11 x 21 y 29 z] == 1} + assert {[r zcard ztmp] == 3} + assert {[r zscore ztmp x] == 10} + assert {[r zscore ztmp y] == 20} + assert {[r zscore ztmp z] == 29} + } + + test "ZADD XX and NX are not compatible - $encoding" { + r del ztmp + catch {r zadd ztmp xx nx 10 x} err + set err + } {ERR*} + + test "ZADD NX with non existing key - $encoding" { + r del ztmp + r zadd ztmp nx 10 x 20 y 30 z + assert {[r zcard ztmp] == 3} + } + + test "ZADD NX only add new elements without updating old ones - $encoding" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + assert {[r zadd ztmp nx 11 x 21 y 100 a 200 b] == 2} + assert {[r zscore ztmp x] == 10} + assert {[r zscore ztmp y] == 20} + assert {[r zscore ztmp a] == 100} + assert {[r zscore ztmp b] == 200} + } + + test "ZADD GT and NX are not compatible - $encoding" { + r del ztmp + catch {r zadd ztmp gt nx 10 x} err + set err + } {ERR*} + + test "ZADD LT and NX are not compatible - $encoding" { + r del ztmp + catch {r zadd ztmp lt nx 10 x} err + set err + } {ERR*} + + test "ZADD LT and GT are not compatible - $encoding" { + r del ztmp + catch {r zadd ztmp lt gt 10 x} err + set err + } {ERR*} + + test "ZADD INCR LT/GT replies with nill if score not updated - $encoding" { + r del ztmp + r zadd ztmp 28 x + assert {[r zadd ztmp lt incr 1 x] eq {}} + assert {[r zscore ztmp x] == 28} + assert {[r zadd ztmp gt incr -1 x] eq {}} + assert {[r zscore ztmp x] == 28} + } + + test "ZADD INCR LT/GT with inf - $encoding" { + r del ztmp + r zadd ztmp +inf x -inf y + + assert {[r zadd ztmp lt incr 1 x] eq {}} + assert {[r zscore ztmp x] == inf} + assert {[r zadd ztmp gt incr -1 x] eq {}} + assert {[r zscore ztmp x] == inf} + assert {[r zadd ztmp lt incr -1 x] eq {}} + assert {[r zscore ztmp x] == inf} + assert {[r zadd ztmp gt incr 1 x] eq {}} + assert {[r zscore ztmp x] == inf} + + assert {[r zadd ztmp lt incr 1 y] eq {}} + assert {[r zscore ztmp y] == -inf} + assert {[r zadd ztmp gt incr -1 y] eq {}} + assert {[r zscore ztmp y] == -inf} + assert {[r zadd ztmp lt incr -1 y] eq {}} + assert {[r zscore ztmp y] == -inf} + assert {[r zadd ztmp gt incr 1 y] eq {}} + assert {[r zscore ztmp y] == -inf} + } + + test "ZADD INCR works like ZINCRBY - $encoding" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + r zadd ztmp INCR 15 x + assert {[r zscore ztmp x] == 25} + } + + test "ZADD INCR works with a single score-elemenet pair - $encoding" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + catch {r zadd ztmp INCR 15 x 10 y} err + set err + } {ERR*} + + test "ZADD CH option changes return value to all changed elements - $encoding" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + assert {[r zadd ztmp 11 x 21 y 30 z] == 0} + assert {[r zadd ztmp ch 12 x 22 y 30 z] == 2} + } + + test "ZINCRBY calls leading to NaN result in error - $encoding" { + r zincrby myzset +inf abc + assert_error "*NaN*" {r zincrby myzset -inf abc} + } + + test "ZINCRBY against invalid incr value - $encoding" { + r del zincr + r zadd zincr 1 "one" + assert_error "*value is not a valid*" {r zincrby zincr v "one"} + } + + test "ZADD - Variadic version base case - $encoding" { + r del myzset + list [r zadd myzset 10 a 20 b 30 c] [r zrange myzset 0 -1 withscores] + } {3 {a 10 b 20 c 30}} + + test "ZADD - Return value is the number of actually added items - $encoding" { + list [r zadd myzset 5 x 20 b 30 c] [r zrange myzset 0 -1 withscores] + } {1 {x 5 a 10 b 20 c 30}} + + test "ZADD - Variadic version does not add nothing on single parsing err - $encoding" { + r del myzset + catch {r zadd myzset 10 a 20 b 30.badscore c} e + assert_match {*ERR*not*float*} $e + r exists myzset + } {0} + + test "ZADD - Variadic version will raise error on missing arg - $encoding" { + r del myzset + catch {r zadd myzset 10 a 20 b 30 c 40} e + assert_match {*ERR*syntax*} $e + } + + test "ZINCRBY does not work variadic even if shares ZADD implementation - $encoding" { + r del myzset + catch {r zincrby myzset 10 a 20 b 30 c} e + assert_match {*ERR*wrong*number*arg*} $e + } + + test "ZCARD basics - $encoding" { + r del ztmp + r zadd ztmp 10 a 20 b 30 c + assert_equal 3 [r zcard ztmp] + assert_equal 0 [r zcard zdoesntexist] + } + + test "ZREM removes key after last element is removed - $encoding" { + r del ztmp + r zadd ztmp 10 x + r zadd ztmp 20 y + + assert_equal 1 [r exists ztmp] + assert_equal 0 [r zrem ztmp z] + assert_equal 1 [r zrem ztmp y] + assert_equal 1 [r zrem ztmp x] + assert_equal 0 [r exists ztmp] + } + + test "ZREM variadic version - $encoding" { + r del ztmp + r zadd ztmp 10 a 20 b 30 c + assert_equal 2 [r zrem ztmp x y a b k] + assert_equal 0 [r zrem ztmp foo bar] + assert_equal 1 [r zrem ztmp c] + r exists ztmp + } {0} + + test "ZREM variadic version -- remove elements after key deletion - $encoding" { + r del ztmp + r zadd ztmp 10 a 20 b 30 c + r zrem ztmp a b c d e f g + } {3} + + test "ZRANGE basics - $encoding" { + r del ztmp + r zadd ztmp 1 a + r zadd ztmp 2 b + r zadd ztmp 3 c + r zadd ztmp 4 d + + assert_equal {a b c d} [r zrange ztmp 0 -1] + assert_equal {a b c} [r zrange ztmp 0 -2] + assert_equal {b c d} [r zrange ztmp 1 -1] + assert_equal {b c} [r zrange ztmp 1 -2] + assert_equal {c d} [r zrange ztmp -2 -1] + assert_equal {c} [r zrange ztmp -2 -2] + + # out of range start index + assert_equal {a b c} [r zrange ztmp -5 2] + assert_equal {a b} [r zrange ztmp -5 1] + assert_equal {} [r zrange ztmp 5 -1] + assert_equal {} [r zrange ztmp 5 -2] + + # out of range end index + assert_equal {a b c d} [r zrange ztmp 0 5] + assert_equal {b c d} [r zrange ztmp 1 5] + assert_equal {} [r zrange ztmp 0 -5] + assert_equal {} [r zrange ztmp 1 -5] + + # withscores + assert_equal {a 1 b 2 c 3 d 4} [r zrange ztmp 0 -1 withscores] + } + + test "ZREVRANGE basics - $encoding" { + r del ztmp + r zadd ztmp 1 a + r zadd ztmp 2 b + r zadd ztmp 3 c + r zadd ztmp 4 d + + assert_equal {d c b a} [r zrevrange ztmp 0 -1] + assert_equal {d c b} [r zrevrange ztmp 0 -2] + assert_equal {c b a} [r zrevrange ztmp 1 -1] + assert_equal {c b} [r zrevrange ztmp 1 -2] + assert_equal {b a} [r zrevrange ztmp -2 -1] + assert_equal {b} [r zrevrange ztmp -2 -2] + + # out of range start index + assert_equal {d c b} [r zrevrange ztmp -5 2] + assert_equal {d c} [r zrevrange ztmp -5 1] + assert_equal {} [r zrevrange ztmp 5 -1] + assert_equal {} [r zrevrange ztmp 5 -2] + + # out of range end index + assert_equal {d c b a} [r zrevrange ztmp 0 5] + assert_equal {c b a} [r zrevrange ztmp 1 5] + assert_equal {} [r zrevrange ztmp 0 -5] + assert_equal {} [r zrevrange ztmp 1 -5] + + # withscores + assert_equal {d 4 c 3 b 2 a 1} [r zrevrange ztmp 0 -1 withscores] + } + + test "ZRANK/ZREVRANK basics - $encoding" { + set nullres {$-1} + if {$::force_resp3} { + set nullres {_} + } + r del zranktmp + r zadd zranktmp 10 x + r zadd zranktmp 20 y + r zadd zranktmp 30 z + assert_equal 0 [r zrank zranktmp x] + assert_equal 1 [r zrank zranktmp y] + assert_equal 2 [r zrank zranktmp z] + assert_equal 2 [r zrevrank zranktmp x] + assert_equal 1 [r zrevrank zranktmp y] + assert_equal 0 [r zrevrank zranktmp z] + r readraw 1 + assert_equal $nullres [r zrank zranktmp foo] + assert_equal $nullres [r zrevrank zranktmp foo] + r readraw 0 + + # withscore + set nullres {*-1} + if {$::force_resp3} { + set nullres {_} + } + assert_equal {0 10} [r zrank zranktmp x withscore] + assert_equal {1 20} [r zrank zranktmp y withscore] + assert_equal {2 30} [r zrank zranktmp z withscore] + assert_equal {2 10} [r zrevrank zranktmp x withscore] + assert_equal {1 20} [r zrevrank zranktmp y withscore] + assert_equal {0 30} [r zrevrank zranktmp z withscore] + r readraw 1 + assert_equal $nullres [r zrank zranktmp foo withscore] + assert_equal $nullres [r zrevrank zranktmp foo withscore] + r readraw 0 + } + + test "ZRANK - after deletion - $encoding" { + r zrem zranktmp y + assert_equal 0 [r zrank zranktmp x] + assert_equal 1 [r zrank zranktmp z] + assert_equal {0 10} [r zrank zranktmp x withscore] + assert_equal {1 30} [r zrank zranktmp z withscore] + } + + test "ZINCRBY - can create a new sorted set - $encoding" { + r del zset + r zincrby zset 1 foo + assert_equal {foo} [r zrange zset 0 -1] + assert_equal 1 [r zscore zset foo] + } + + test "ZINCRBY - increment and decrement - $encoding" { + r zincrby zset 2 foo + r zincrby zset 1 bar + assert_equal {bar foo} [r zrange zset 0 -1] + + r zincrby zset 10 bar + r zincrby zset -5 foo + r zincrby zset -5 bar + assert_equal {foo bar} [r zrange zset 0 -1] + + assert_equal -2 [r zscore zset foo] + assert_equal 6 [r zscore zset bar] + } + + test "ZINCRBY return value - $encoding" { + r del ztmp + set retval [r zincrby ztmp 1.0 x] + assert {$retval == 1.0} + } + + proc create_default_zset {} { + create_zset zset {-inf a 1 b 2 c 3 d 4 e 5 f +inf g} + } + + proc create_long_zset {key length} { + r del $key + for {set i 0} {$i < $length} {incr i 1} { + r zadd $key $i i$i + } + } + + test "ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics - $encoding" { + create_default_zset + + # inclusive range + assert_equal {a b c} [r zrangebyscore zset -inf 2] + assert_equal {b c d} [r zrangebyscore zset 0 3] + assert_equal {d e f} [r zrangebyscore zset 3 6] + assert_equal {e f g} [r zrangebyscore zset 4 +inf] + assert_equal {c b a} [r zrevrangebyscore zset 2 -inf] + assert_equal {d c b} [r zrevrangebyscore zset 3 0] + assert_equal {f e d} [r zrevrangebyscore zset 6 3] + assert_equal {g f e} [r zrevrangebyscore zset +inf 4] + assert_equal 3 [r zcount zset 0 3] + + # exclusive range + assert_equal {b} [r zrangebyscore zset (-inf (2] + assert_equal {b c} [r zrangebyscore zset (0 (3] + assert_equal {e f} [r zrangebyscore zset (3 (6] + assert_equal {f} [r zrangebyscore zset (4 (+inf] + assert_equal {b} [r zrevrangebyscore zset (2 (-inf] + assert_equal {c b} [r zrevrangebyscore zset (3 (0] + assert_equal {f e} [r zrevrangebyscore zset (6 (3] + assert_equal {f} [r zrevrangebyscore zset (+inf (4] + assert_equal 2 [r zcount zset (0 (3] + + # test empty ranges + r zrem zset a + r zrem zset g + + # inclusive + assert_equal {} [r zrangebyscore zset 4 2] + assert_equal {} [r zrangebyscore zset 6 +inf] + assert_equal {} [r zrangebyscore zset -inf -6] + assert_equal {} [r zrevrangebyscore zset +inf 6] + assert_equal {} [r zrevrangebyscore zset -6 -inf] + + # exclusive + assert_equal {} [r zrangebyscore zset (4 (2] + assert_equal {} [r zrangebyscore zset 2 (2] + assert_equal {} [r zrangebyscore zset (2 2] + assert_equal {} [r zrangebyscore zset (6 (+inf] + assert_equal {} [r zrangebyscore zset (-inf (-6] + assert_equal {} [r zrevrangebyscore zset (+inf (6] + assert_equal {} [r zrevrangebyscore zset (-6 (-inf] + + # empty inner range + assert_equal {} [r zrangebyscore zset 2.4 2.6] + assert_equal {} [r zrangebyscore zset (2.4 2.6] + assert_equal {} [r zrangebyscore zset 2.4 (2.6] + assert_equal {} [r zrangebyscore zset (2.4 (2.6] + } + + test "ZRANGEBYSCORE with WITHSCORES - $encoding" { + create_default_zset + assert_equal {b 1 c 2 d 3} [r zrangebyscore zset 0 3 withscores] + assert_equal {d 3 c 2 b 1} [r zrevrangebyscore zset 3 0 withscores] + } + + test "ZRANGEBYSCORE with LIMIT - $encoding" { + create_default_zset + assert_equal {b c} [r zrangebyscore zset 0 10 LIMIT 0 2] + assert_equal {d e f} [r zrangebyscore zset 0 10 LIMIT 2 3] + assert_equal {d e f} [r zrangebyscore zset 0 10 LIMIT 2 10] + assert_equal {} [r zrangebyscore zset 0 10 LIMIT 20 10] + assert_equal {f e} [r zrevrangebyscore zset 10 0 LIMIT 0 2] + assert_equal {d c b} [r zrevrangebyscore zset 10 0 LIMIT 2 3] + assert_equal {d c b} [r zrevrangebyscore zset 10 0 LIMIT 2 10] + assert_equal {} [r zrevrangebyscore zset 10 0 LIMIT 20 10] + # zrangebyscore uses different logic when offset > ZSKIPLIST_MAX_SEARCH + create_long_zset zset 30 + assert_equal {i12 i13 i14} [r zrangebyscore zset 0 20 LIMIT 12 3] + assert_equal {i14 i15} [r zrangebyscore zset 0 20 LIMIT 14 2] + assert_equal {i19 i20 i21} [r zrangebyscore zset 0 30 LIMIT 19 3] + assert_equal {i29} [r zrangebyscore zset 10 30 LIMIT 19 2] + assert_equal {i17 i16 i15} [r zrevrangebyscore zset 30 10 LIMIT 12 3] + assert_equal {i6 i5} [r zrevrangebyscore zset 20 0 LIMIT 14 2] + assert_equal {i2 i1 i0} [r zrevrangebyscore zset 20 0 LIMIT 18 5] + assert_equal {i0} [r zrevrangebyscore zset 20 0 LIMIT 20 5] + } + + test "ZRANGEBYSCORE with LIMIT and WITHSCORES - $encoding" { + create_default_zset + assert_equal {e 4 f 5} [r zrangebyscore zset 2 5 LIMIT 2 3 WITHSCORES] + assert_equal {d 3 c 2} [r zrevrangebyscore zset 5 2 LIMIT 2 3 WITHSCORES] + assert_equal {} [r zrangebyscore zset 2 5 LIMIT 12 13 WITHSCORES] + } + + test "ZRANGEBYSCORE with non-value min or max - $encoding" { + assert_error "*not*float*" {r zrangebyscore fooz str 1} + assert_error "*not*float*" {r zrangebyscore fooz 1 str} + assert_error "*not*float*" {r zrangebyscore fooz 1 NaN} + } + + proc create_default_lex_zset {} { + create_zset zset {0 alpha 0 bar 0 cool 0 down + 0 elephant 0 foo 0 great 0 hill + 0 omega} + } + + proc create_long_lex_zset {} { + create_zset zset {0 alpha 0 bar 0 cool 0 down + 0 elephant 0 foo 0 great 0 hill + 0 island 0 jacket 0 key 0 lip + 0 max 0 null 0 omega 0 point + 0 query 0 result 0 sea 0 tree} + } + + test "ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics - $encoding" { + create_default_lex_zset + + # inclusive range + assert_equal {alpha bar cool} [r zrangebylex zset - \[cool] + assert_equal {bar cool down} [r zrangebylex zset \[bar \[down] + assert_equal {great hill omega} [r zrangebylex zset \[g +] + assert_equal {cool bar alpha} [r zrevrangebylex zset \[cool -] + assert_equal {down cool bar} [r zrevrangebylex zset \[down \[bar] + assert_equal {omega hill great foo elephant down} [r zrevrangebylex zset + \[d] + assert_equal 3 [r zlexcount zset \[ele \[h] + + # exclusive range + assert_equal {alpha bar} [r zrangebylex zset - (cool] + assert_equal {cool} [r zrangebylex zset (bar (down] + assert_equal {hill omega} [r zrangebylex zset (great +] + assert_equal {bar alpha} [r zrevrangebylex zset (cool -] + assert_equal {cool} [r zrevrangebylex zset (down (bar] + assert_equal {omega hill} [r zrevrangebylex zset + (great] + assert_equal 2 [r zlexcount zset (ele (great] + + # inclusive and exclusive + assert_equal {} [r zrangebylex zset (az (b] + assert_equal {} [r zrangebylex zset (z +] + assert_equal {} [r zrangebylex zset - \[aaaa] + assert_equal {} [r zrevrangebylex zset \[elez \[elex] + assert_equal {} [r zrevrangebylex zset (hill (omega] + } + + test "ZLEXCOUNT advanced - $encoding" { + create_default_lex_zset + + assert_equal 9 [r zlexcount zset - +] + assert_equal 0 [r zlexcount zset + -] + assert_equal 0 [r zlexcount zset + \[c] + assert_equal 0 [r zlexcount zset \[c -] + assert_equal 8 [r zlexcount zset \[bar +] + assert_equal 5 [r zlexcount zset \[bar \[foo] + assert_equal 4 [r zlexcount zset \[bar (foo] + assert_equal 4 [r zlexcount zset (bar \[foo] + assert_equal 3 [r zlexcount zset (bar (foo] + assert_equal 5 [r zlexcount zset - (foo] + assert_equal 1 [r zlexcount zset (maxstring +] + } + + test "ZRANGEBYLEX with LIMIT - $encoding" { + create_default_lex_zset + assert_equal {alpha bar} [r zrangebylex zset - \[cool LIMIT 0 2] + assert_equal {bar cool} [r zrangebylex zset - \[cool LIMIT 1 2] + assert_equal {} [r zrangebylex zset \[bar \[down LIMIT 0 0] + assert_equal {} [r zrangebylex zset \[bar \[down LIMIT 2 0] + assert_equal {bar} [r zrangebylex zset \[bar \[down LIMIT 0 1] + assert_equal {cool} [r zrangebylex zset \[bar \[down LIMIT 1 1] + assert_equal {bar cool down} [r zrangebylex zset \[bar \[down LIMIT 0 100] + assert_equal {omega hill great foo elephant} [r zrevrangebylex zset + \[d LIMIT 0 5] + assert_equal {omega hill great foo} [r zrevrangebylex zset + \[d LIMIT 0 4] + assert_equal {great foo elephant} [r zrevrangebylex zset + \[d LIMIT 2 3] + # zrangebylex uses different logic when offset > ZSKIPLIST_MAX_SEARCH + create_long_lex_zset + assert_equal {max null} [r zrangebylex zset - \[tree LIMIT 12 2] + assert_equal {point query} [r zrangebylex zset - \[tree LIMIT 15 2] + assert_equal {} [r zrangebylex zset \[max \[tree LIMIT 10 0] + assert_equal {} [r zrangebylex zset \[max \[tree LIMIT 12 0] + assert_equal {max} [r zrangebylex zset \[max \[null LIMIT 0 1] + assert_equal {null} [r zrangebylex zset \[max \[null LIMIT 1 1] + assert_equal {max null omega point} [r zrangebylex zset \[max \[point LIMIT 0 100] + assert_equal {tree sea result query point} [r zrevrangebylex zset + \[o LIMIT 0 5] + assert_equal {tree sea result query} [r zrevrangebylex zset + \[o LIMIT 0 4] + assert_equal {omega null max lip} [r zrevrangebylex zset + \[l LIMIT 5 4] + assert_equal {elephant down} [r zrevrangebylex zset + \[a LIMIT 15 2] + assert_equal {bar alpha} [r zrevrangebylex zset + - LIMIT 18 6] + assert_equal {hill great foo} [r zrevrangebylex zset + \[c LIMIT 12 3] + } + + test "ZRANGEBYLEX with invalid lex range specifiers - $encoding" { + assert_error "*not*string*" {r zrangebylex fooz foo bar} + assert_error "*not*string*" {r zrangebylex fooz \[foo bar} + assert_error "*not*string*" {r zrangebylex fooz foo \[bar} + assert_error "*not*string*" {r zrangebylex fooz +x \[bar} + assert_error "*not*string*" {r zrangebylex fooz -x \[bar} + } + + test "ZREMRANGEBYSCORE basics - $encoding" { + proc remrangebyscore {min max} { + create_zset zset {1 a 2 b 3 c 4 d 5 e} + assert_equal 1 [r exists zset] + r zremrangebyscore zset $min $max + } + + # inner range + assert_equal 3 [remrangebyscore 2 4] + assert_equal {a e} [r zrange zset 0 -1] + + # start underflow + assert_equal 1 [remrangebyscore -10 1] + assert_equal {b c d e} [r zrange zset 0 -1] + + # end overflow + assert_equal 1 [remrangebyscore 5 10] + assert_equal {a b c d} [r zrange zset 0 -1] + + # switch min and max + assert_equal 0 [remrangebyscore 4 2] + assert_equal {a b c d e} [r zrange zset 0 -1] + + # -inf to mid + assert_equal 3 [remrangebyscore -inf 3] + assert_equal {d e} [r zrange zset 0 -1] + + # mid to +inf + assert_equal 3 [remrangebyscore 3 +inf] + assert_equal {a b} [r zrange zset 0 -1] + + # -inf to +inf + assert_equal 5 [remrangebyscore -inf +inf] + assert_equal {} [r zrange zset 0 -1] + + # exclusive min + assert_equal 4 [remrangebyscore (1 5] + assert_equal {a} [r zrange zset 0 -1] + assert_equal 3 [remrangebyscore (2 5] + assert_equal {a b} [r zrange zset 0 -1] + + # exclusive max + assert_equal 4 [remrangebyscore 1 (5] + assert_equal {e} [r zrange zset 0 -1] + assert_equal 3 [remrangebyscore 1 (4] + assert_equal {d e} [r zrange zset 0 -1] + + # exclusive min and max + assert_equal 3 [remrangebyscore (1 (5] + assert_equal {a e} [r zrange zset 0 -1] + + # destroy when empty + assert_equal 5 [remrangebyscore 1 5] + assert_equal 0 [r exists zset] + } + + test "ZREMRANGEBYSCORE with non-value min or max - $encoding" { + assert_error "*not*float*" {r zremrangebyscore fooz str 1} + assert_error "*not*float*" {r zremrangebyscore fooz 1 str} + assert_error "*not*float*" {r zremrangebyscore fooz 1 NaN} + } + + test "ZREMRANGEBYRANK basics - $encoding" { + proc remrangebyrank {min max} { + create_zset zset {1 a 2 b 3 c 4 d 5 e} + assert_equal 1 [r exists zset] + r zremrangebyrank zset $min $max + } + + # inner range + assert_equal 3 [remrangebyrank 1 3] + assert_equal {a e} [r zrange zset 0 -1] + + # start underflow + assert_equal 1 [remrangebyrank -10 0] + assert_equal {b c d e} [r zrange zset 0 -1] + + # start overflow + assert_equal 0 [remrangebyrank 10 -1] + assert_equal {a b c d e} [r zrange zset 0 -1] + + # end underflow + assert_equal 0 [remrangebyrank 0 -10] + assert_equal {a b c d e} [r zrange zset 0 -1] + + # end overflow + assert_equal 5 [remrangebyrank 0 10] + assert_equal {} [r zrange zset 0 -1] + + # destroy when empty + assert_equal 5 [remrangebyrank 0 4] + assert_equal 0 [r exists zset] + } + + test "ZREMRANGEBYLEX basics - $encoding" { + proc remrangebylex {min max} { + create_default_lex_zset + assert_equal 1 [r exists zset] + r zremrangebylex zset $min $max + } + + # inclusive range + assert_equal 3 [remrangebylex - \[cool] + assert_equal {down elephant foo great hill omega} [r zrange zset 0 -1] + assert_equal 3 [remrangebylex \[bar \[down] + assert_equal {alpha elephant foo great hill omega} [r zrange zset 0 -1] + assert_equal 3 [remrangebylex \[g +] + assert_equal {alpha bar cool down elephant foo} [r zrange zset 0 -1] + assert_equal 6 [r zcard zset] + + # exclusive range + assert_equal 2 [remrangebylex - (cool] + assert_equal {cool down elephant foo great hill omega} [r zrange zset 0 -1] + assert_equal 1 [remrangebylex (bar (down] + assert_equal {alpha bar down elephant foo great hill omega} [r zrange zset 0 -1] + assert_equal 2 [remrangebylex (great +] + assert_equal {alpha bar cool down elephant foo great} [r zrange zset 0 -1] + assert_equal 7 [r zcard zset] + + # inclusive and exclusive + assert_equal 0 [remrangebylex (az (b] + assert_equal {alpha bar cool down elephant foo great hill omega} [r zrange zset 0 -1] + assert_equal 0 [remrangebylex (z +] + assert_equal {alpha bar cool down elephant foo great hill omega} [r zrange zset 0 -1] + assert_equal 0 [remrangebylex - \[aaaa] + assert_equal {alpha bar cool down elephant foo great hill omega} [r zrange zset 0 -1] + assert_equal 9 [r zcard zset] + + # destroy when empty + assert_equal 9 [remrangebylex - +] + assert_equal 0 [r zcard zset] + assert_equal 0 [r exists zset] + } + + test "ZUNIONSTORE against non-existing key doesn't set destination - $encoding" { + r del zseta{t} + assert_equal 0 [r zunionstore dst_key{t} 1 zseta{t}] + assert_equal 0 [r exists dst_key{t}] + } + + test "ZUNION/ZINTER/ZINTERCARD/ZDIFF against non-existing key - $encoding" { + r del zseta + assert_equal {} [r zunion 1 zseta] + assert_equal {} [r zinter 1 zseta] + assert_equal 0 [r zintercard 1 zseta] + assert_equal 0 [r zintercard 1 zseta limit 0] + assert_equal {} [r zdiff 1 zseta] + } + + test "ZUNIONSTORE with empty set - $encoding" { + r del zseta{t} zsetb{t} + r zadd zseta{t} 1 a + r zadd zseta{t} 2 b + r zunionstore zsetc{t} 2 zseta{t} zsetb{t} + r zrange zsetc{t} 0 -1 withscores + } {a 1 b 2} + + test "ZUNION/ZINTER/ZINTERCARD/ZDIFF with empty set - $encoding" { + r del zseta{t} zsetb{t} + r zadd zseta{t} 1 a + r zadd zseta{t} 2 b + assert_equal {a 1 b 2} [r zunion 2 zseta{t} zsetb{t} withscores] + assert_equal {} [r zinter 2 zseta{t} zsetb{t} withscores] + assert_equal 0 [r zintercard 2 zseta{t} zsetb{t}] + assert_equal 0 [r zintercard 2 zseta{t} zsetb{t} limit 0] + assert_equal {a 1 b 2} [r zdiff 2 zseta{t} zsetb{t} withscores] + } + + test "ZUNIONSTORE basics - $encoding" { + r del zseta{t} zsetb{t} zsetc{t} + r zadd zseta{t} 1 a + r zadd zseta{t} 2 b + r zadd zseta{t} 3 c + r zadd zsetb{t} 1 b + r zadd zsetb{t} 2 c + r zadd zsetb{t} 3 d + + assert_equal 4 [r zunionstore zsetc{t} 2 zseta{t} zsetb{t}] + assert_equal {a 1 b 3 d 3 c 5} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZUNION/ZINTER/ZINTERCARD/ZDIFF with integer members - $encoding" { + r del zsetd{t} zsetf{t} + r zadd zsetd{t} 1 1 + r zadd zsetd{t} 2 2 + r zadd zsetd{t} 3 3 + r zadd zsetf{t} 1 1 + r zadd zsetf{t} 3 3 + r zadd zsetf{t} 4 4 + + assert_equal {1 2 2 2 4 4 3 6} [r zunion 2 zsetd{t} zsetf{t} withscores] + assert_equal {1 2 3 6} [r zinter 2 zsetd{t} zsetf{t} withscores] + assert_equal 2 [r zintercard 2 zsetd{t} zsetf{t}] + assert_equal 2 [r zintercard 2 zsetd{t} zsetf{t} limit 0] + assert_equal {2 2} [r zdiff 2 zsetd{t} zsetf{t} withscores] + } + + test "ZUNIONSTORE with weights - $encoding" { + assert_equal 4 [r zunionstore zsetc{t} 2 zseta{t} zsetb{t} weights 2 3] + assert_equal {a 2 b 7 d 9 c 12} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZUNION with weights - $encoding" { + assert_equal {a 2 b 7 d 9 c 12} [r zunion 2 zseta{t} zsetb{t} weights 2 3 withscores] + assert_equal {b 7 c 12} [r zinter 2 zseta{t} zsetb{t} weights 2 3 withscores] + } + + test "ZUNIONSTORE with a regular set and weights - $encoding" { + r del seta{t} + r sadd seta{t} a + r sadd seta{t} b + r sadd seta{t} c + + assert_equal 4 [r zunionstore zsetc{t} 2 seta{t} zsetb{t} weights 2 3] + assert_equal {a 2 b 5 c 8 d 9} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZUNIONSTORE with AGGREGATE MIN - $encoding" { + assert_equal 4 [r zunionstore zsetc{t} 2 zseta{t} zsetb{t} aggregate min] + assert_equal {a 1 b 1 c 2 d 3} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZUNION/ZINTER with AGGREGATE MIN - $encoding" { + assert_equal {a 1 b 1 c 2 d 3} [r zunion 2 zseta{t} zsetb{t} aggregate min withscores] + assert_equal {b 1 c 2} [r zinter 2 zseta{t} zsetb{t} aggregate min withscores] + } + + test "ZUNIONSTORE with AGGREGATE MAX - $encoding" { + assert_equal 4 [r zunionstore zsetc{t} 2 zseta{t} zsetb{t} aggregate max] + assert_equal {a 1 b 2 c 3 d 3} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZUNION/ZINTER with AGGREGATE MAX - $encoding" { + assert_equal {a 1 b 2 c 3 d 3} [r zunion 2 zseta{t} zsetb{t} aggregate max withscores] + assert_equal {b 2 c 3} [r zinter 2 zseta{t} zsetb{t} aggregate max withscores] + } + + test "ZINTERSTORE basics - $encoding" { + assert_equal 2 [r zinterstore zsetc{t} 2 zseta{t} zsetb{t}] + assert_equal {b 3 c 5} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZINTER basics - $encoding" { + assert_equal {b 3 c 5} [r zinter 2 zseta{t} zsetb{t} withscores] + } + + test "ZINTERCARD with illegal arguments" { + assert_error "ERR syntax error*" {r zintercard 1 zseta{t} zseta{t}} + assert_error "ERR syntax error*" {r zintercard 1 zseta{t} bar_arg} + assert_error "ERR syntax error*" {r zintercard 1 zseta{t} LIMIT} + + assert_error "ERR LIMIT*" {r zintercard 1 myset{t} LIMIT -1} + assert_error "ERR LIMIT*" {r zintercard 1 myset{t} LIMIT a} + } + + test "ZINTERCARD basics - $encoding" { + assert_equal 2 [r zintercard 2 zseta{t} zsetb{t}] + assert_equal 2 [r zintercard 2 zseta{t} zsetb{t} limit 0] + assert_equal 1 [r zintercard 2 zseta{t} zsetb{t} limit 1] + assert_equal 2 [r zintercard 2 zseta{t} zsetb{t} limit 10] + } + + test "ZINTER RESP3 - $encoding" { + r hello 3 + assert_equal {{b 3.0} {c 5.0}} [r zinter 2 zseta{t} zsetb{t} withscores] + r hello 2 + } + + test "ZINTERSTORE with weights - $encoding" { + assert_equal 2 [r zinterstore zsetc{t} 2 zseta{t} zsetb{t} weights 2 3] + assert_equal {b 7 c 12} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZINTER with weights - $encoding" { + assert_equal {b 7 c 12} [r zinter 2 zseta{t} zsetb{t} weights 2 3 withscores] + } + + test "ZINTERSTORE with a regular set and weights - $encoding" { + r del seta{t} + r sadd seta{t} a + r sadd seta{t} b + r sadd seta{t} c + assert_equal 2 [r zinterstore zsetc{t} 2 seta{t} zsetb{t} weights 2 3] + assert_equal {b 5 c 8} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZINTERSTORE with AGGREGATE MIN - $encoding" { + assert_equal 2 [r zinterstore zsetc{t} 2 zseta{t} zsetb{t} aggregate min] + assert_equal {b 1 c 2} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZINTERSTORE with AGGREGATE MAX - $encoding" { + assert_equal 2 [r zinterstore zsetc{t} 2 zseta{t} zsetb{t} aggregate max] + assert_equal {b 2 c 3} [r zrange zsetc{t} 0 -1 withscores] + } + + foreach cmd {ZUNIONSTORE ZINTERSTORE} { + test "$cmd with +inf/-inf scores - $encoding" { + r del zsetinf1{t} zsetinf2{t} + + r zadd zsetinf1{t} +inf key + r zadd zsetinf2{t} +inf key + r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t} + assert_equal inf [r zscore zsetinf3{t} key] + + r zadd zsetinf1{t} -inf key + r zadd zsetinf2{t} +inf key + r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t} + assert_equal 0 [r zscore zsetinf3{t} key] + + r zadd zsetinf1{t} +inf key + r zadd zsetinf2{t} -inf key + r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t} + assert_equal 0 [r zscore zsetinf3{t} key] + + r zadd zsetinf1{t} -inf key + r zadd zsetinf2{t} -inf key + r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t} + assert_equal -inf [r zscore zsetinf3{t} key] + } + + test "$cmd with NaN weights - $encoding" { + r del zsetinf1{t} zsetinf2{t} + + r zadd zsetinf1{t} 1.0 key + r zadd zsetinf2{t} 1.0 key + assert_error "*weight*not*float*" { + r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t} weights nan nan + } + } + } + + test "ZDIFFSTORE basics - $encoding" { + assert_equal 1 [r zdiffstore zsetc{t} 2 zseta{t} zsetb{t}] + assert_equal {a 1} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZDIFF basics - $encoding" { + assert_equal {a 1} [r zdiff 2 zseta{t} zsetb{t} withscores] + } + + test "ZDIFFSTORE with a regular set - $encoding" { + r del seta{t} + r sadd seta{t} a + r sadd seta{t} b + r sadd seta{t} c + assert_equal 1 [r zdiffstore zsetc{t} 2 seta{t} zsetb{t}] + assert_equal {a 1} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZDIFF subtracting set from itself - $encoding" { + assert_equal 0 [r zdiffstore zsetc{t} 2 zseta{t} zseta{t}] + assert_equal {} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZDIFF algorithm 1 - $encoding" { + r del zseta{t} zsetb{t} zsetc{t} + r zadd zseta{t} 1 a + r zadd zseta{t} 2 b + r zadd zseta{t} 3 c + r zadd zsetb{t} 1 b + r zadd zsetb{t} 2 c + r zadd zsetb{t} 3 d + assert_equal 1 [r zdiffstore zsetc{t} 2 zseta{t} zsetb{t}] + assert_equal {a 1} [r zrange zsetc{t} 0 -1 withscores] + } + + test "ZDIFF algorithm 2 - $encoding" { + r del zseta{t} zsetb{t} zsetc{t} zsetd{t} zsete{t} + r zadd zseta{t} 1 a + r zadd zseta{t} 2 b + r zadd zseta{t} 3 c + r zadd zseta{t} 5 e + r zadd zsetb{t} 1 b + r zadd zsetc{t} 1 c + r zadd zsetd{t} 1 d + assert_equal 2 [r zdiffstore zsete{t} 4 zseta{t} zsetb{t} zsetc{t} zsetd{t}] + assert_equal {a 1 e 5} [r zrange zsete{t} 0 -1 withscores] + } + + test "ZDIFF fuzzing - $encoding" { + for {set j 0} {$j < 100} {incr j} { + unset -nocomplain s + array set s {} + set args {} + set num_sets [expr {[randomInt 10]+1}] + for {set i 0} {$i < $num_sets} {incr i} { + set num_elements [randomInt 100] + r del zset_$i{t} + lappend args zset_$i{t} + while {$num_elements} { + set ele [randomValue] + r zadd zset_$i{t} [randomInt 100] $ele + if {$i == 0} { + set s($ele) x + } else { + unset -nocomplain s($ele) + } + incr num_elements -1 + } + } + set result [lsort [r zdiff [llength $args] {*}$args]] + assert_equal $result [lsort [array names s]] + } + } + + foreach {pop} {ZPOPMIN ZPOPMAX} { + test "$pop with the count 0 returns an empty array" { + r del zset + r zadd zset 1 a 2 b 3 c + assert_equal {} [r $pop zset 0] + + # Make sure we can distinguish between an empty array and a null response + r readraw 1 + assert_equal {*0} [r $pop zset 0] + r readraw 0 + + assert_equal 3 [r zcard zset] + } + + test "$pop with negative count" { + r set zset foo + assert_error "ERR *must be positive" {r $pop zset -1} + + r del zset + assert_error "ERR *must be positive" {r $pop zset -2} + + r zadd zset 1 a 2 b 3 c + assert_error "ERR *must be positive" {r $pop zset -3} + } + } + + foreach {popmin popmax} {ZPOPMIN ZPOPMAX ZMPOP_MIN ZMPOP_MAX} { + test "Basic $popmin/$popmax with a single key - $encoding" { + r del zset + verify_zpop_response r $popmin zset 0 {} {} + + create_zset zset {-1 a 1 b 2 c 3 d 4 e} + verify_zpop_response r $popmin zset 0 {a -1} {zset {{a -1}}} + verify_zpop_response r $popmin zset 0 {b 1} {zset {{b 1}}} + verify_zpop_response r $popmax zset 0 {e 4} {zset {{e 4}}} + verify_zpop_response r $popmax zset 0 {d 3} {zset {{d 3}}} + verify_zpop_response r $popmin zset 0 {c 2} {zset {{c 2}}} + assert_equal 0 [r exists zset] + } + + test "$popmin/$popmax with count - $encoding" { + r del z1 + verify_zpop_response r $popmin z1 2 {} {} + + create_zset z1 {0 a 1 b 2 c 3 d} + verify_zpop_response r $popmin z1 2 {a 0 b 1} {z1 {{a 0} {b 1}}} + verify_zpop_response r $popmax z1 2 {d 3 c 2} {z1 {{d 3} {c 2}}} + } + } + + foreach {popmin popmax} {BZPOPMIN BZPOPMAX BZMPOP_MIN BZMPOP_MAX} { + test "$popmin/$popmax with a single existing sorted set - $encoding" { + set rd [redis_deferring_client] + create_zset zset {0 a 1 b 2 c 3 d} + + verify_bzpop_response $rd $popmin zset 5 0 {zset a 0} {zset {{a 0}}} + verify_bzpop_response $rd $popmax zset 5 0 {zset d 3} {zset {{d 3}}} + verify_bzpop_response $rd $popmin zset 5 0 {zset b 1} {zset {{b 1}}} + verify_bzpop_response $rd $popmax zset 5 0 {zset c 2} {zset {{c 2}}} + assert_equal 0 [r exists zset] + $rd close + } + + test "$popmin/$popmax with multiple existing sorted sets - $encoding" { + set rd [redis_deferring_client] + create_zset z1{t} {0 a 1 b 2 c} + create_zset z2{t} {3 d 4 e 5 f} + + verify_bzpop_two_key_response $rd $popmin z1{t} z2{t} 5 0 {z1{t} a 0} {z1{t} {{a 0}}} + verify_bzpop_two_key_response $rd $popmax z1{t} z2{t} 5 0 {z1{t} c 2} {z1{t} {{c 2}}} + assert_equal 1 [r zcard z1{t}] + assert_equal 3 [r zcard z2{t}] + + verify_bzpop_two_key_response $rd $popmax z2{t} z1{t} 5 0 {z2{t} f 5} {z2{t} {{f 5}}} + verify_bzpop_two_key_response $rd $popmin z2{t} z1{t} 5 0 {z2{t} d 3} {z2{t} {{d 3}}} + assert_equal 1 [r zcard z1{t}] + assert_equal 1 [r zcard z2{t}] + $rd close + } + + test "$popmin/$popmax second sorted set has members - $encoding" { + set rd [redis_deferring_client] + r del z1{t} + create_zset z2{t} {3 d 4 e 5 f} + + verify_bzpop_two_key_response $rd $popmax z1{t} z2{t} 5 0 {z2{t} f 5} {z2{t} {{f 5}}} + verify_bzpop_two_key_response $rd $popmin z1{t} z2{t} 5 0 {z2{t} d 3} {z2{t} {{d 3}}} + assert_equal 0 [r zcard z1{t}] + assert_equal 1 [r zcard z2{t}] + $rd close + } + } + + foreach {popmin popmax} {ZPOPMIN ZPOPMAX ZMPOP_MIN ZMPOP_MAX} { + test "Basic $popmin/$popmax - $encoding RESP3" { + r hello 3 + create_zset z1 {0 a 1 b 2 c 3 d} + verify_zpop_response r $popmin z1 0 {a 0.0} {z1 {{a 0.0}}} + verify_zpop_response r $popmax z1 0 {d 3.0} {z1 {{d 3.0}}} + r hello 2 + } + + test "$popmin/$popmax with count - $encoding RESP3" { + r hello 3 + create_zset z1 {0 a 1 b 2 c 3 d} + verify_zpop_response r $popmin z1 2 {{a 0.0} {b 1.0}} {z1 {{a 0.0} {b 1.0}}} + verify_zpop_response r $popmax z1 2 {{d 3.0} {c 2.0}} {z1 {{d 3.0} {c 2.0}}} + r hello 2 + } + } + + foreach {popmin popmax} {BZPOPMIN BZPOPMAX BZMPOP_MIN BZMPOP_MAX} { + test "$popmin/$popmax - $encoding RESP3" { + r hello 3 + set rd [redis_deferring_client] + create_zset zset {0 a 1 b 2 c 3 d} + + verify_bzpop_response $rd $popmin zset 5 0 {zset a 0} {zset {{a 0}}} + verify_bzpop_response $rd $popmax zset 5 0 {zset d 3} {zset {{d 3}}} + verify_bzpop_response $rd $popmin zset 5 0 {zset b 1} {zset {{b 1}}} + verify_bzpop_response $rd $popmax zset 5 0 {zset c 2} {zset {{c 2}}} + + assert_equal 0 [r exists zset] + r hello 2 + $rd close + } + } + + r config set zset-max-ziplist-entries $original_max_entries + r config set zset-max-ziplist-value $original_max_value + } + + basics listpack + basics skiplist + + test "ZPOP/ZMPOP against wrong type" { + r set foo{t} bar + assert_error "*WRONGTYPE*" {r zpopmin foo{t}} + assert_error "*WRONGTYPE*" {r zpopmin foo{t} 0} + assert_error "*WRONGTYPE*" {r zpopmax foo{t}} + assert_error "*WRONGTYPE*" {r zpopmax foo{t} 0} + assert_error "*WRONGTYPE*" {r zpopmin foo{t} 2} + + assert_error "*WRONGTYPE*" {r zmpop 1 foo{t} min} + assert_error "*WRONGTYPE*" {r zmpop 1 foo{t} max} + assert_error "*WRONGTYPE*" {r zmpop 1 foo{t} max count 200} + + r del foo{t} + r set foo2{t} bar + assert_error "*WRONGTYPE*" {r zmpop 2 foo{t} foo2{t} min} + assert_error "*WRONGTYPE*" {r zmpop 2 foo2{t} foo1{t} max count 1} + } + + test "ZMPOP with illegal argument" { + assert_error "ERR wrong number of arguments for 'zmpop' command" {r zmpop} + assert_error "ERR wrong number of arguments for 'zmpop' command" {r zmpop 1} + assert_error "ERR wrong number of arguments for 'zmpop' command" {r zmpop 1 myzset{t}} + + assert_error "ERR numkeys*" {r zmpop 0 myzset{t} MIN} + assert_error "ERR numkeys*" {r zmpop a myzset{t} MIN} + assert_error "ERR numkeys*" {r zmpop -1 myzset{t} MAX} + + assert_error "ERR syntax error*" {r zmpop 1 myzset{t} bad_where} + assert_error "ERR syntax error*" {r zmpop 1 myzset{t} MIN bar_arg} + assert_error "ERR syntax error*" {r zmpop 1 myzset{t} MAX MIN} + assert_error "ERR syntax error*" {r zmpop 1 myzset{t} COUNT} + assert_error "ERR syntax error*" {r zmpop 1 myzset{t} MAX COUNT 1 COUNT 2} + assert_error "ERR syntax error*" {r zmpop 2 myzset{t} myzset2{t} bad_arg} + + assert_error "ERR count*" {r zmpop 1 myzset{t} MIN COUNT 0} + assert_error "ERR count*" {r zmpop 1 myzset{t} MAX COUNT a} + assert_error "ERR count*" {r zmpop 1 myzset{t} MIN COUNT -1} + assert_error "ERR count*" {r zmpop 2 myzset{t} myzset2{t} MAX COUNT -1} + } + + test "ZMPOP propagate as pop with count command to replica" { + set repl [attach_to_replication_stream] + + # ZMPOP min/max propagate as ZPOPMIN/ZPOPMAX with count + r zadd myzset{t} 1 one 2 two 3 three + + # Pop elements from one zset. + r zmpop 1 myzset{t} min + r zmpop 1 myzset{t} max count 1 + + # Now the zset have only one element + r zmpop 2 myzset{t} myzset2{t} min count 10 + + # No elements so we don't propagate. + r zmpop 2 myzset{t} myzset2{t} max count 10 + + # Pop elements from the second zset. + r zadd myzset2{t} 1 one 2 two 3 three + r zmpop 2 myzset{t} myzset2{t} min count 2 + r zmpop 2 myzset{t} myzset2{t} max count 1 + + # Pop all elements. + r zadd myzset{t} 1 one 2 two 3 three + r zadd myzset2{t} 4 four 5 five 6 six + r zmpop 2 myzset{t} myzset2{t} min count 10 + r zmpop 2 myzset{t} myzset2{t} max count 10 + + assert_replication_stream $repl { + {select *} + {zadd myzset{t} 1 one 2 two 3 three} + {zpopmin myzset{t} 1} + {zpopmax myzset{t} 1} + {zpopmin myzset{t} 1} + {zadd myzset2{t} 1 one 2 two 3 three} + {zpopmin myzset2{t} 2} + {zpopmax myzset2{t} 1} + {zadd myzset{t} 1 one 2 two 3 three} + {zadd myzset2{t} 4 four 5 five 6 six} + {zpopmin myzset{t} 3} + {zpopmax myzset2{t} 3} + } + close_replication_stream $repl + } {} {needs:repl} + + foreach resp {3 2} { + set rd [redis_deferring_client] + + if {[lsearch $::denytags "resp3"] >= 0} { + if {$resp == 3} {continue} + } elseif {$::force_resp3} { + if {$resp == 2} {continue} + } + r hello $resp + $rd hello $resp + $rd read + + test "ZPOPMIN/ZPOPMAX readraw in RESP$resp" { + r del zset{t} + create_zset zset2{t} {1 a 2 b 3 c 4 d 5 e} + + r readraw 1 + + # ZPOP against non existing key. + assert_equal {*0} [r zpopmin zset{t}] + assert_equal {*0} [r zpopmin zset{t} 1] + + # ZPOP without COUNT option. + assert_equal {*2} [r zpopmin zset2{t}] + assert_equal [r read] {$1} + assert_equal [r read] {a} + verify_score_response r $resp 1 + + # ZPOP with COUNT option. + if {$resp == 2} { + assert_equal {*2} [r zpopmax zset2{t} 1] + assert_equal [r read] {$1} + assert_equal [r read] {e} + } elseif {$resp == 3} { + assert_equal {*1} [r zpopmax zset2{t} 1] + assert_equal [r read] {*2} + assert_equal [r read] {$1} + assert_equal [r read] {e} + } + verify_score_response r $resp 5 + + r readraw 0 + } + + test "BZPOPMIN/BZPOPMAX readraw in RESP$resp" { + r del zset{t} + create_zset zset2{t} {1 a 2 b 3 c 4 d 5 e} + + $rd readraw 1 + + # BZPOP released on timeout. + $rd bzpopmin zset{t} 0.01 + verify_nil_response $resp [$rd read] + $rd bzpopmax zset{t} 0.01 + verify_nil_response $resp [$rd read] + + # BZPOP non-blocking path. + $rd bzpopmin zset1{t} zset2{t} 0.1 + assert_equal [$rd read] {*3} + assert_equal [$rd read] {$8} + assert_equal [$rd read] {zset2{t}} + assert_equal [$rd read] {$1} + assert_equal [$rd read] {a} + verify_score_response $rd $resp 1 + + # BZPOP blocking path. + $rd bzpopmin zset{t} 5 + wait_for_blocked_client + r zadd zset{t} 1 a + assert_equal [$rd read] {*3} + assert_equal [$rd read] {$7} + assert_equal [$rd read] {zset{t}} + assert_equal [$rd read] {$1} + assert_equal [$rd read] {a} + verify_score_response $rd $resp 1 + + $rd readraw 0 + } + + test "ZMPOP readraw in RESP$resp" { + r del zset{t} zset2{t} + create_zset zset3{t} {1 a} + create_zset zset4{t} {1 a 2 b 3 c 4 d 5 e} + + r readraw 1 + + # ZMPOP against non existing key. + verify_nil_response $resp [r zmpop 1 zset{t} min] + verify_nil_response $resp [r zmpop 1 zset{t} max count 1] + verify_nil_response $resp [r zmpop 2 zset{t} zset2{t} min] + verify_nil_response $resp [r zmpop 2 zset{t} zset2{t} max count 1] + + # ZMPOP with one input key. + assert_equal {*2} [r zmpop 1 zset3{t} max] + assert_equal [r read] {$8} + assert_equal [r read] {zset3{t}} + assert_equal [r read] {*1} + assert_equal [r read] {*2} + assert_equal [r read] {$1} + assert_equal [r read] {a} + verify_score_response r $resp 1 + + # ZMPOP with COUNT option. + assert_equal {*2} [r zmpop 2 zset3{t} zset4{t} min count 2] + assert_equal [r read] {$8} + assert_equal [r read] {zset4{t}} + assert_equal [r read] {*2} + assert_equal [r read] {*2} + assert_equal [r read] {$1} + assert_equal [r read] {a} + verify_score_response r $resp 1 + assert_equal [r read] {*2} + assert_equal [r read] {$1} + assert_equal [r read] {b} + verify_score_response r $resp 2 + + r readraw 0 + } + + test "BZMPOP readraw in RESP$resp" { + r del zset{t} zset2{t} + create_zset zset3{t} {1 a 2 b 3 c 4 d 5 e} + + $rd readraw 1 + + # BZMPOP released on timeout. + $rd bzmpop 0.01 1 zset{t} min + verify_nil_response $resp [$rd read] + $rd bzmpop 0.01 2 zset{t} zset2{t} max + verify_nil_response $resp [$rd read] + + # BZMPOP non-blocking path. + $rd bzmpop 0.1 2 zset3{t} zset4{t} min + + assert_equal [$rd read] {*2} + assert_equal [$rd read] {$8} + assert_equal [$rd read] {zset3{t}} + assert_equal [$rd read] {*1} + assert_equal [$rd read] {*2} + assert_equal [$rd read] {$1} + assert_equal [$rd read] {a} + verify_score_response $rd $resp 1 + + # BZMPOP blocking path with COUNT option. + $rd bzmpop 5 2 zset{t} zset2{t} max count 2 + wait_for_blocked_client + r zadd zset2{t} 1 a 2 b 3 c + + assert_equal [$rd read] {*2} + assert_equal [$rd read] {$8} + assert_equal [$rd read] {zset2{t}} + assert_equal [$rd read] {*2} + assert_equal [$rd read] {*2} + assert_equal [$rd read] {$1} + assert_equal [$rd read] {c} + verify_score_response $rd $resp 3 + assert_equal [$rd read] {*2} + assert_equal [$rd read] {$1} + assert_equal [$rd read] {b} + verify_score_response $rd $resp 2 + + } + + $rd close + r hello 2 + } + + test {ZINTERSTORE regression with two sets, intset+hashtable} { + r del seta{t} setb{t} setc{t} + r sadd set1{t} a + r sadd set2{t} 10 + r zinterstore set3{t} 2 set1{t} set2{t} + } {0} + + test {ZUNIONSTORE regression, should not create NaN in scores} { + r zadd z{t} -inf neginf + r zunionstore out{t} 1 z{t} weights 0 + r zrange out{t} 0 -1 withscores + } {neginf 0} + + test {ZINTERSTORE #516 regression, mixed sets and ziplist zsets} { + r sadd one{t} 100 101 102 103 + r sadd two{t} 100 200 201 202 + r zadd three{t} 1 500 1 501 1 502 1 503 1 100 + r zinterstore to_here{t} 3 one{t} two{t} three{t} WEIGHTS 0 0 1 + r zrange to_here{t} 0 -1 + } {100} + + test {ZUNIONSTORE result is sorted} { + # Create two sets with common and not common elements, perform + # the UNION, check that elements are still sorted. + r del one{t} two{t} dest{t} + set cmd1 [list r zadd one{t}] + set cmd2 [list r zadd two{t}] + for {set j 0} {$j < 1000} {incr j} { + lappend cmd1 [expr rand()] [randomInt 1000] + lappend cmd2 [expr rand()] [randomInt 1000] + } + {*}$cmd1 + {*}$cmd2 + assert {[r zcard one{t}] > 100} + assert {[r zcard two{t}] > 100} + r zunionstore dest{t} 2 one{t} two{t} + set oldscore 0 + foreach {ele score} [r zrange dest{t} 0 -1 withscores] { + assert {$score >= $oldscore} + set oldscore $score + } + } + + test "ZUNIONSTORE/ZINTERSTORE/ZDIFFSTORE error if using WITHSCORES " { + assert_error "*ERR*syntax*" {r zunionstore foo{t} 2 zsetd{t} zsetf{t} withscores} + assert_error "*ERR*syntax*" {r zinterstore foo{t} 2 zsetd{t} zsetf{t} withscores} + assert_error "*ERR*syntax*" {r zdiffstore foo{t} 2 zsetd{t} zsetf{t} withscores} + } + + test {ZMSCORE retrieve} { + r del zmscoretest + r zadd zmscoretest 10 x + r zadd zmscoretest 20 y + + r zmscore zmscoretest x y + } {10 20} + + test {ZMSCORE retrieve from empty set} { + r del zmscoretest + + r zmscore zmscoretest x y + } {{} {}} + + test {ZMSCORE retrieve with missing member} { + r del zmscoretest + r zadd zmscoretest 10 x + + r zmscore zmscoretest x y + } {10 {}} + + test {ZMSCORE retrieve single member} { + r del zmscoretest + r zadd zmscoretest 10 x + r zadd zmscoretest 20 y + + r zmscore zmscoretest x + } {10} + + test {ZMSCORE retrieve requires one or more members} { + r del zmscoretest + r zadd zmscoretest 10 x + r zadd zmscoretest 20 y + + catch {r zmscore zmscoretest} e + assert_match {*ERR*wrong*number*arg*} $e + } + + test "ZSET commands don't accept the empty strings as valid score" { + assert_error "*not*float*" {r zadd myzset "" abc} + } + + test "zunionInterDiffGenericCommand at least 1 input key" { + assert_error {*at least 1 input key * 'zunion' command} {r zunion 0 key{t}} + assert_error {*at least 1 input key * 'zunionstore' command} {r zunionstore dst_key{t} 0 key{t}} + assert_error {*at least 1 input key * 'zinter' command} {r zinter 0 key{t}} + assert_error {*at least 1 input key * 'zinterstore' command} {r zinterstore dst_key{t} 0 key{t}} + assert_error {*at least 1 input key * 'zdiff' command} {r zdiff 0 key{t}} + assert_error {*at least 1 input key * 'zdiffstore' command} {r zdiffstore dst_key{t} 0 key{t}} + assert_error {*at least 1 input key * 'zintercard' command} {r zintercard 0 key{t}} + } + + proc stressers {encoding} { + set original_max_entries [lindex [r config get zset-max-ziplist-entries] 1] + set original_max_value [lindex [r config get zset-max-ziplist-value] 1] + if {$encoding == "listpack"} { + # Little extra to allow proper fuzzing in the sorting stresser + r config set zset-max-ziplist-entries 256 + r config set zset-max-ziplist-value 64 + set elements 128 + } elseif {$encoding == "skiplist"} { + r config set zset-max-ziplist-entries 0 + r config set zset-max-ziplist-value 0 + if {$::accurate} {set elements 1000} else {set elements 100} + } else { + puts "Unknown sorted set encoding" + exit + } + + test "ZSCORE - $encoding" { + r del zscoretest + set aux {} + for {set i 0} {$i < $elements} {incr i} { + set score [expr rand()] + lappend aux $score + r zadd zscoretest $score $i + } + + assert_encoding $encoding zscoretest + for {set i 0} {$i < $elements} {incr i} { + # If an IEEE 754 double-precision number is converted to a decimal string with at + # least 17 significant digits (reply of zscore), and then converted back to double-precision representation, + # the final result replied via zscore command must match the original number present on the $aux list. + # Given Tcl is mostly very relaxed about types (everything is a string) we need to use expr to convert a string to float. + assert_equal [expr [lindex $aux $i]] [expr [r zscore zscoretest $i]] + } + } + + test "ZMSCORE - $encoding" { + r del zscoretest + set aux {} + for {set i 0} {$i < $elements} {incr i} { + set score [expr rand()] + lappend aux $score + r zadd zscoretest $score $i + } + + assert_encoding $encoding zscoretest + for {set i 0} {$i < $elements} {incr i} { + # Check above notes on IEEE 754 double-precision comparison + assert_equal [expr [lindex $aux $i]] [expr [r zscore zscoretest $i]] + } + } + + test "ZSCORE after a DEBUG RELOAD - $encoding" { + r del zscoretest + set aux {} + for {set i 0} {$i < $elements} {incr i} { + set score [expr rand()] + lappend aux $score + r zadd zscoretest $score $i + } + + r debug reload + assert_encoding $encoding zscoretest + for {set i 0} {$i < $elements} {incr i} { + # Check above notes on IEEE 754 double-precision comparison + assert_equal [expr [lindex $aux $i]] [expr [r zscore zscoretest $i]] + } + } {} {needs:debug} + + test "ZSET sorting stresser - $encoding" { + set delta 0 + for {set test 0} {$test < 2} {incr test} { + unset -nocomplain auxarray + array set auxarray {} + set auxlist {} + r del myzset + for {set i 0} {$i < $elements} {incr i} { + if {$test == 0} { + set score [expr rand()] + } else { + set score [expr int(rand()*10)] + } + set auxarray($i) $score + r zadd myzset $score $i + # Random update + if {[expr rand()] < .2} { + set j [expr int(rand()*1000)] + if {$test == 0} { + set score [expr rand()] + } else { + set score [expr int(rand()*10)] + } + set auxarray($j) $score + r zadd myzset $score $j + } + } + foreach {item score} [array get auxarray] { + lappend auxlist [list $score $item] + } + set sorted [lsort -command zlistAlikeSort $auxlist] + set auxlist {} + foreach x $sorted { + lappend auxlist [lindex $x 1] + } + + assert_encoding $encoding myzset + set fromredis [r zrange myzset 0 -1] + set delta 0 + for {set i 0} {$i < [llength $fromredis]} {incr i} { + if {[lindex $fromredis $i] != [lindex $auxlist $i]} { + incr delta + } + } + } + assert_equal 0 $delta + } + + test "ZRANGEBYSCORE fuzzy test, 100 ranges in $elements element sorted set - $encoding" { + set err {} + r del zset + for {set i 0} {$i < $elements} {incr i} { + r zadd zset [expr rand()] $i + } + + assert_encoding $encoding zset + for {set i 0} {$i < 100} {incr i} { + set min [expr rand()] + set max [expr rand()] + if {$min > $max} { + set aux $min + set min $max + set max $aux + } + set low [r zrangebyscore zset -inf $min] + set ok [r zrangebyscore zset $min $max] + set high [r zrangebyscore zset $max +inf] + set lowx [r zrangebyscore zset -inf ($min] + set okx [r zrangebyscore zset ($min ($max] + set highx [r zrangebyscore zset ($max +inf] + + if {[r zcount zset -inf $min] != [llength $low]} { + append err "Error, len does not match zcount\n" + } + if {[r zcount zset $min $max] != [llength $ok]} { + append err "Error, len does not match zcount\n" + } + if {[r zcount zset $max +inf] != [llength $high]} { + append err "Error, len does not match zcount\n" + } + if {[r zcount zset -inf ($min] != [llength $lowx]} { + append err "Error, len does not match zcount\n" + } + if {[r zcount zset ($min ($max] != [llength $okx]} { + append err "Error, len does not match zcount\n" + } + if {[r zcount zset ($max +inf] != [llength $highx]} { + append err "Error, len does not match zcount\n" + } + + foreach x $low { + set score [r zscore zset $x] + if {$score > $min} { + append err "Error, score for $x is $score > $min\n" + } + } + foreach x $lowx { + set score [r zscore zset $x] + if {$score >= $min} { + append err "Error, score for $x is $score >= $min\n" + } + } + foreach x $ok { + set score [r zscore zset $x] + if {$score < $min || $score > $max} { + append err "Error, score for $x is $score outside $min-$max range\n" + } + } + foreach x $okx { + set score [r zscore zset $x] + if {$score <= $min || $score >= $max} { + append err "Error, score for $x is $score outside $min-$max open range\n" + } + } + foreach x $high { + set score [r zscore zset $x] + if {$score < $max} { + append err "Error, score for $x is $score < $max\n" + } + } + foreach x $highx { + set score [r zscore zset $x] + if {$score <= $max} { + append err "Error, score for $x is $score <= $max\n" + } + } + } + assert_equal {} $err + } + + test "ZRANGEBYLEX fuzzy test, 100 ranges in $elements element sorted set - $encoding" { + set lexset {} + r del zset + for {set j 0} {$j < $elements} {incr j} { + set e [randstring 0 30 alpha] + lappend lexset $e + r zadd zset 0 $e + } + set lexset [lsort -unique $lexset] + for {set j 0} {$j < 100} {incr j} { + set min [randstring 0 30 alpha] + set max [randstring 0 30 alpha] + set mininc [randomInt 2] + set maxinc [randomInt 2] + if {$mininc} {set cmin "\[$min"} else {set cmin "($min"} + if {$maxinc} {set cmax "\[$max"} else {set cmax "($max"} + set rev [randomInt 2] + if {$rev} { + set cmd zrevrangebylex + } else { + set cmd zrangebylex + } + + # Make sure data is the same in both sides + assert {[r zrange zset 0 -1] eq $lexset} + + # Get the Redis output + set output [r $cmd zset $cmin $cmax] + if {$rev} { + set outlen [r zlexcount zset $cmax $cmin] + } else { + set outlen [r zlexcount zset $cmin $cmax] + } + + # Compute the same output via Tcl + set o {} + set copy $lexset + if {(!$rev && [string compare $min $max] > 0) || + ($rev && [string compare $max $min] > 0)} { + # Empty output when ranges are inverted. + } else { + if {$rev} { + # Invert the Tcl array using Redis itself. + set copy [r zrevrange zset 0 -1] + # Invert min / max as well + lassign [list $min $max $mininc $maxinc] \ + max min maxinc mininc + } + foreach e $copy { + set mincmp [string compare $e $min] + set maxcmp [string compare $e $max] + if { + ($mininc && $mincmp >= 0 || !$mininc && $mincmp > 0) + && + ($maxinc && $maxcmp <= 0 || !$maxinc && $maxcmp < 0) + } { + lappend o $e + } + } + } + assert {$o eq $output} + assert {$outlen eq [llength $output]} + } + } + + test "ZREMRANGEBYLEX fuzzy test, 100 ranges in $elements element sorted set - $encoding" { + set lexset {} + r del zset{t} zsetcopy{t} + for {set j 0} {$j < $elements} {incr j} { + set e [randstring 0 30 alpha] + lappend lexset $e + r zadd zset{t} 0 $e + } + set lexset [lsort -unique $lexset] + for {set j 0} {$j < 100} {incr j} { + # Copy... + r zunionstore zsetcopy{t} 1 zset{t} + set lexsetcopy $lexset + + set min [randstring 0 30 alpha] + set max [randstring 0 30 alpha] + set mininc [randomInt 2] + set maxinc [randomInt 2] + if {$mininc} {set cmin "\[$min"} else {set cmin "($min"} + if {$maxinc} {set cmax "\[$max"} else {set cmax "($max"} + + # Make sure data is the same in both sides + assert {[r zrange zset{t} 0 -1] eq $lexset} + + # Get the range we are going to remove + set torem [r zrangebylex zset{t} $cmin $cmax] + set toremlen [r zlexcount zset{t} $cmin $cmax] + r zremrangebylex zsetcopy{t} $cmin $cmax + set output [r zrange zsetcopy{t} 0 -1] + + # Remove the range with Tcl from the original list + if {$toremlen} { + set first [lsearch -exact $lexsetcopy [lindex $torem 0]] + set last [expr {$first+$toremlen-1}] + set lexsetcopy [lreplace $lexsetcopy $first $last] + } + assert {$lexsetcopy eq $output} + } + } + + test "ZSETs skiplist implementation backlink consistency test - $encoding" { + set diff 0 + for {set j 0} {$j < $elements} {incr j} { + r zadd myzset [expr rand()] "Element-$j" + r zrem myzset "Element-[expr int(rand()*$elements)]" + } + + assert_encoding $encoding myzset + set l1 [r zrange myzset 0 -1] + set l2 [r zrevrange myzset 0 -1] + for {set j 0} {$j < [llength $l1]} {incr j} { + if {[lindex $l1 $j] ne [lindex $l2 end-$j]} { + incr diff + } + } + assert_equal 0 $diff + } + + test "ZSETs ZRANK augmented skip list stress testing - $encoding" { + set err {} + r del myzset + for {set k 0} {$k < 2000} {incr k} { + set i [expr {$k % $elements}] + if {[expr rand()] < .2} { + r zrem myzset $i + } else { + set score [expr rand()] + r zadd myzset $score $i + assert_encoding $encoding myzset + } + + set card [r zcard myzset] + if {$card > 0} { + set index [randomInt $card] + set ele [lindex [r zrange myzset $index $index] 0] + set rank [r zrank myzset $ele] + if {$rank != $index} { + set err "$ele RANK is wrong! ($rank != $index)" + break + } + } + } + assert_equal {} $err + } + + foreach {pop} {BZPOPMIN BZMPOP_MIN} { + test "$pop, ZADD + DEL should not awake blocked client" { + set rd [redis_deferring_client] + r del zset + + bzpop_command $rd $pop zset 0 + wait_for_blocked_client + + r multi + r zadd zset 0 foo + r del zset + r exec + r del zset + r zadd zset 1 bar + + verify_pop_response $pop [$rd read] {zset bar 1} {zset {{bar 1}}} + $rd close + } + + test "$pop, ZADD + DEL + SET should not awake blocked client" { + set rd [redis_deferring_client] + r del zset + + bzpop_command $rd $pop zset 0 + wait_for_blocked_client + + r multi + r zadd zset 0 foo + r del zset + r set zset foo + r exec + r del zset + r zadd zset 1 bar + + verify_pop_response $pop [$rd read] {zset bar 1} {zset {{bar 1}}} + $rd close + } + } + + test {BZPOPMIN unblock but the key is expired and then block again - reprocessing command} { + r flushall + r debug set-active-expire 0 + set rd [redis_deferring_client] + + set start [clock milliseconds] + $rd bzpopmin zset{t} 1 + wait_for_blocked_clients_count 1 + + # The exec will try to awake the blocked client, but the key is expired, + # so the client will be blocked again during the command reprocessing. + r multi + r zadd zset{t} 1 one + r pexpire zset{t} 100 + r debug sleep 0.2 + r exec + + assert_equal {} [$rd read] + set end [clock milliseconds] + + # Before the fix in #13004, this time would have been 1200+ (i.e. more than 1200ms), + # now it should be 1000, but in order to avoid timing issues, we increase the range a bit. + assert_range [expr $end-$start] 1000 1150 + + r debug set-active-expire 1 + $rd close + } {0} {needs:debug} + + test "BZPOPMIN with same key multiple times should work" { + set rd [redis_deferring_client] + r del z1{t} z2{t} + + # Data arriving after the BZPOPMIN. + $rd bzpopmin z1{t} z2{t} z2{t} z1{t} 0 + wait_for_blocked_client + r zadd z1{t} 0 a + assert_equal [$rd read] {z1{t} a 0} + $rd bzpopmin z1{t} z2{t} z2{t} z1{t} 0 + wait_for_blocked_client + r zadd z2{t} 1 b + assert_equal [$rd read] {z2{t} b 1} + + # Data already there. + r zadd z1{t} 0 a + r zadd z2{t} 1 b + $rd bzpopmin z1{t} z2{t} z2{t} z1{t} 0 + assert_equal [$rd read] {z1{t} a 0} + $rd bzpopmin z1{t} z2{t} z2{t} z1{t} 0 + assert_equal [$rd read] {z2{t} b 1} + $rd close + } + + foreach {pop} {BZPOPMIN BZMPOP_MIN} { + test "MULTI/EXEC is isolated from the point of view of $pop" { + set rd [redis_deferring_client] + r del zset + + bzpop_command $rd $pop zset 0 + wait_for_blocked_client + + r multi + r zadd zset 0 a + r zadd zset 1 b + r zadd zset 2 c + r exec + + verify_pop_response $pop [$rd read] {zset a 0} {zset {{a 0}}} + $rd close + } + + test "$pop with variadic ZADD" { + set rd [redis_deferring_client] + r del zset + if {$::valgrind} {after 100} + bzpop_command $rd $pop zset 0 + wait_for_blocked_client + if {$::valgrind} {after 100} + assert_equal 2 [r zadd zset -1 foo 1 bar] + if {$::valgrind} {after 100} + verify_pop_response $pop [$rd read] {zset foo -1} {zset {{foo -1}}} + assert_equal {bar} [r zrange zset 0 -1] + $rd close + } + + test "$pop with zero timeout should block indefinitely" { + set rd [redis_deferring_client] + r del zset + bzpop_command $rd $pop zset 0 + wait_for_blocked_client + after 1000 + r zadd zset 0 foo + verify_pop_response $pop [$rd read] {zset foo 0} {zset {{foo 0}}} + $rd close + } + } + + r config set zset-max-ziplist-entries $original_max_entries + r config set zset-max-ziplist-value $original_max_value + } + + tags {"slow"} { + stressers listpack + stressers skiplist + } + + test "BZPOP/BZMPOP against wrong type" { + r set foo{t} bar + assert_error "*WRONGTYPE*" {r bzpopmin foo{t} 1} + assert_error "*WRONGTYPE*" {r bzpopmax foo{t} 1} + + assert_error "*WRONGTYPE*" {r bzmpop 1 1 foo{t} min} + assert_error "*WRONGTYPE*" {r bzmpop 1 1 foo{t} max} + assert_error "*WRONGTYPE*" {r bzmpop 1 1 foo{t} min count 10} + + r del foo{t} + r set foo2{t} bar + assert_error "*WRONGTYPE*" {r bzmpop 1 2 foo{t} foo2{t} min} + assert_error "*WRONGTYPE*" {r bzmpop 1 2 foo2{t} foo{t} max count 1} + } + + test "BZMPOP with illegal argument" { + assert_error "ERR wrong number of arguments for 'bzmpop' command" {r bzmpop} + assert_error "ERR wrong number of arguments for 'bzmpop' command" {r bzmpop 0 1} + assert_error "ERR wrong number of arguments for 'bzmpop' command" {r bzmpop 0 1 myzset{t}} + + assert_error "ERR numkeys*" {r bzmpop 1 0 myzset{t} MIN} + assert_error "ERR numkeys*" {r bzmpop 1 a myzset{t} MIN} + assert_error "ERR numkeys*" {r bzmpop 1 -1 myzset{t} MAX} + + assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} bad_where} + assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} MIN bar_arg} + assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} MAX MIN} + assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} COUNT} + assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} MIN COUNT 1 COUNT 2} + assert_error "ERR syntax error*" {r bzmpop 1 2 myzset{t} myzset2{t} bad_arg} + + assert_error "ERR count*" {r bzmpop 1 1 myzset{t} MIN COUNT 0} + assert_error "ERR count*" {r bzmpop 1 1 myzset{t} MAX COUNT a} + assert_error "ERR count*" {r bzmpop 1 1 myzset{t} MIN COUNT -1} + assert_error "ERR count*" {r bzmpop 1 2 myzset{t} myzset2{t} MAX COUNT -1} + } + + test "BZMPOP with multiple blocked clients" { + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + set rd3 [redis_deferring_client] + set rd4 [redis_deferring_client] + r del myzset{t} myzset2{t} + + $rd1 bzmpop 0 2 myzset{t} myzset2{t} min count 1 + wait_for_blocked_clients_count 1 + $rd2 bzmpop 0 2 myzset{t} myzset2{t} max count 10 + wait_for_blocked_clients_count 2 + $rd3 bzmpop 0 2 myzset{t} myzset2{t} min count 10 + wait_for_blocked_clients_count 3 + $rd4 bzmpop 0 2 myzset{t} myzset2{t} max count 1 + wait_for_blocked_clients_count 4 + + r multi + r zadd myzset{t} 1 a 2 b 3 c 4 d 5 e + r zadd myzset2{t} 1 a 2 b 3 c 4 d 5 e + r exec + + assert_equal {myzset{t} {{a 1}}} [$rd1 read] + assert_equal {myzset{t} {{e 5} {d 4} {c 3} {b 2}}} [$rd2 read] + assert_equal {myzset2{t} {{a 1} {b 2} {c 3} {d 4} {e 5}}} [$rd3 read] + + r zadd myzset2{t} 1 a 2 b 3 c + assert_equal {myzset2{t} {{c 3}}} [$rd4 read] + + r del myzset{t} myzset2{t} + $rd1 close + $rd2 close + $rd3 close + $rd4 close + } + + test "BZMPOP propagate as pop with count command to replica" { + set rd [redis_deferring_client] + set repl [attach_to_replication_stream] + + # BZMPOP without being blocked. + r zadd myzset{t} 1 one 2 two 3 three + r zadd myzset2{t} 4 four 5 five 6 six + r bzmpop 0 1 myzset{t} min + r bzmpop 0 2 myzset{t} myzset2{t} max count 10 + r bzmpop 0 2 myzset{t} myzset2{t} max count 10 + + # BZMPOP that gets blocked. + $rd bzmpop 0 1 myzset{t} min count 1 + wait_for_blocked_client + r zadd myzset{t} 1 one + $rd bzmpop 0 2 myzset{t} myzset2{t} min count 5 + wait_for_blocked_client + r zadd myzset{t} 1 one 2 two 3 three + $rd bzmpop 0 2 myzset{t} myzset2{t} max count 10 + wait_for_blocked_client + r zadd myzset2{t} 4 four 5 five 6 six + + # Released on timeout. + assert_equal {} [r bzmpop 0.01 1 myzset{t} max count 10] + r set foo{t} bar ;# something else to propagate after, so we can make sure the above pop didn't. + + $rd close + + assert_replication_stream $repl { + {select *} + {zadd myzset{t} 1 one 2 two 3 three} + {zadd myzset2{t} 4 four 5 five 6 six} + {zpopmin myzset{t} 1} + {zpopmax myzset{t} 2} + {zpopmax myzset2{t} 3} + {zadd myzset{t} 1 one} + {zpopmin myzset{t} 1} + {zadd myzset{t} 1 one 2 two 3 three} + {zpopmin myzset{t} 3} + {zadd myzset2{t} 4 four 5 five 6 six} + {zpopmax myzset2{t} 3} + {set foo{t} bar} + } + close_replication_stream $repl + } {} {needs:repl} + + test "BZMPOP should not blocks on non key arguments - #10762" { + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + r del myzset myzset2 myzset3 + + $rd1 bzmpop 0 1 myzset min count 10 + wait_for_blocked_clients_count 1 + $rd2 bzmpop 0 2 myzset2 myzset3 max count 10 + wait_for_blocked_clients_count 2 + + # These non-key keys will not unblock the clients. + r zadd 0 100 timeout_value + r zadd 1 200 numkeys_value + r zadd min 300 min_token + r zadd max 400 max_token + r zadd count 500 count_token + r zadd 10 600 count_value + + r zadd myzset 1 zset + r zadd myzset3 1 zset3 + assert_equal {myzset {{zset 1}}} [$rd1 read] + assert_equal {myzset3 {{zset3 1}}} [$rd2 read] + + $rd1 close + $rd2 close + } {0} {cluster:skip} + + test {ZSET skiplist order consistency when elements are moved} { + set original_max [lindex [r config get zset-max-ziplist-entries] 1] + r config set zset-max-ziplist-entries 0 + for {set times 0} {$times < 10} {incr times} { + r del zset + for {set j 0} {$j < 1000} {incr j} { + r zadd zset [randomInt 50] ele-[randomInt 10] + } + + # Make sure that element ordering is correct + set prev_element {} + set prev_score -1 + foreach {element score} [r zrange zset 0 -1 WITHSCORES] { + # Assert that elements are in increasing ordering + assert { + $prev_score < $score || + ($prev_score == $score && + [string compare $prev_element $element] == -1) + } + set prev_element $element + set prev_score $score + } + } + r config set zset-max-ziplist-entries $original_max + } + + test {ZRANGESTORE basic} { + r flushall + r zadd z1{t} 1 a 2 b 3 c 4 d + set res [r zrangestore z2{t} z1{t} 0 -1] + assert_equal $res 4 + r zrange z2{t} 0 -1 withscores + } {a 1 b 2 c 3 d 4} + + test {ZRANGESTORE RESP3} { + r hello 3 + assert_equal [r zrange z2{t} 0 -1 withscores] {{a 1.0} {b 2.0} {c 3.0} {d 4.0}} + r hello 2 + } + + test {ZRANGESTORE range} { + set res [r zrangestore z2{t} z1{t} 1 2] + assert_equal $res 2 + r zrange z2{t} 0 -1 withscores + } {b 2 c 3} + + test {ZRANGESTORE BYLEX} { + set res [r zrangestore z3{t} z1{t} \[b \[c BYLEX] + assert_equal $res 2 + assert_encoding listpack z3{t} + set res [r zrangestore z2{t} z1{t} \[b \[c BYLEX] + assert_equal $res 2 + r zrange z2{t} 0 -1 withscores + } {b 2 c 3} + + test {ZRANGESTORE BYSCORE} { + set res [r zrangestore z4{t} z1{t} 1 2 BYSCORE] + assert_equal $res 2 + assert_encoding listpack z4{t} + set res [r zrangestore z2{t} z1{t} 1 2 BYSCORE] + assert_equal $res 2 + r zrange z2{t} 0 -1 withscores + } {a 1 b 2} + + test {ZRANGESTORE BYSCORE LIMIT} { + set res [r zrangestore z2{t} z1{t} 0 5 BYSCORE LIMIT 0 2] + assert_equal $res 2 + r zrange z2{t} 0 -1 withscores + } {a 1 b 2} + + test {ZRANGESTORE BYSCORE REV LIMIT} { + set res [r zrangestore z2{t} z1{t} 5 0 BYSCORE REV LIMIT 0 2] + assert_equal $res 2 + r zrange z2{t} 0 -1 withscores + } {c 3 d 4} + + test {ZRANGE BYSCORE REV LIMIT} { + r zrange z1{t} 5 0 BYSCORE REV LIMIT 0 2 WITHSCORES + } {d 4 c 3} + + test {ZRANGESTORE - src key missing} { + set res [r zrangestore z2{t} missing{t} 0 -1] + assert_equal $res 0 + r exists z2{t} + } {0} + + test {ZRANGESTORE - src key wrong type} { + r zadd z2{t} 1 a + r set foo{t} bar + assert_error "*WRONGTYPE*" {r zrangestore z2{t} foo{t} 0 -1} + r zrange z2{t} 0 -1 + } {a} + + test {ZRANGESTORE - empty range} { + set res [r zrangestore z2{t} z1{t} 5 6] + assert_equal $res 0 + r exists z2{t} + } {0} + + test {ZRANGESTORE BYLEX - empty range} { + set res [r zrangestore z2{t} z1{t} \[f \[g BYLEX] + assert_equal $res 0 + r exists z2{t} + } {0} + + test {ZRANGESTORE BYSCORE - empty range} { + set res [r zrangestore z2{t} z1{t} 5 6 BYSCORE] + assert_equal $res 0 + r exists z2{t} + } {0} + + test {ZRANGE BYLEX} { + r zrange z1{t} \[b \[c BYLEX + } {b c} + + test {ZRANGESTORE invalid syntax} { + catch {r zrangestore z2{t} z1{t} 0 -1 limit 1 2} err + assert_match "*syntax*" $err + catch {r zrangestore z2{t} z1{t} 0 -1 WITHSCORES} err + assert_match "*syntax*" $err + } + + test {ZRANGESTORE with zset-max-listpack-entries 0 #10767 case} { + set original_max [lindex [r config get zset-max-listpack-entries] 1] + r config set zset-max-listpack-entries 0 + r del z1{t} z2{t} + r zadd z1{t} 1 a + assert_encoding skiplist z1{t} + assert_equal 1 [r zrangestore z2{t} z1{t} 0 -1] + assert_encoding skiplist z2{t} + r config set zset-max-listpack-entries $original_max + } + + test {ZRANGESTORE with zset-max-listpack-entries 1 dst key should use skiplist encoding} { + set original_max [lindex [r config get zset-max-listpack-entries] 1] + r config set zset-max-listpack-entries 1 + r del z1{t} z2{t} z3{t} + r zadd z1{t} 1 a 2 b + assert_equal 1 [r zrangestore z2{t} z1{t} 0 0] + assert_encoding listpack z2{t} + assert_equal 2 [r zrangestore z3{t} z1{t} 0 1] + assert_encoding skiplist z3{t} + r config set zset-max-listpack-entries $original_max + } + + test {ZRANGE invalid syntax} { + catch {r zrange z1{t} 0 -1 limit 1 2} err + assert_match "*syntax*" $err + catch {r zrange z1{t} 0 -1 BYLEX WITHSCORES} err + assert_match "*syntax*" $err + catch {r zrevrange z1{t} 0 -1 BYSCORE} err + assert_match "*syntax*" $err + catch {r zrangebyscore z1{t} 0 -1 REV} err + assert_match "*syntax*" $err + } + + proc get_keys {l} { + set res {} + foreach {score key} $l { + lappend res $key + } + return $res + } + + # Check whether the zset members belong to the zset + proc check_member {mydict res} { + foreach ele $res { + assert {[dict exists $mydict $ele]} + } + } + + # Check whether the zset members and score belong to the zset + proc check_member_and_score {mydict res} { + foreach {key val} $res { + assert_equal $val [dict get $mydict $key] + } + } + + foreach {type contents} "listpack {1 a 2 b 3 c} skiplist {1 a 2 b 3 [randstring 70 90 alpha]}" { + set original_max_value [lindex [r config get zset-max-ziplist-value] 1] + r config set zset-max-ziplist-value 10 + create_zset myzset $contents + assert_encoding $type myzset + + test "ZRANDMEMBER - $type" { + unset -nocomplain myzset + array set myzset {} + for {set i 0} {$i < 100} {incr i} { + set key [r zrandmember myzset] + set myzset($key) 1 + } + assert_equal [lsort [get_keys $contents]] [lsort [array names myzset]] + } + r config set zset-max-ziplist-value $original_max_value + } + + test "ZRANDMEMBER with RESP3" { + r hello 3 + set res [r zrandmember myzset 3 withscores] + assert_equal [llength $res] 3 + assert_equal [llength [lindex $res 1]] 2 + + set res [r zrandmember myzset 3] + assert_equal [llength $res] 3 + assert_equal [llength [lindex $res 1]] 1 + r hello 2 + } + + test "ZRANDMEMBER count of 0 is handled correctly" { + r zrandmember myzset 0 + } {} + + test "ZRANDMEMBER with against non existing key" { + r zrandmember nonexisting_key 100 + } {} + + test "ZRANDMEMBER count overflow" { + r zadd myzset 0 a + assert_error {*value is out of range*} {r zrandmember myzset -9223372036854770000 withscores} + assert_error {*value is out of range*} {r zrandmember myzset -9223372036854775808 withscores} + assert_error {*value is out of range*} {r zrandmember myzset -9223372036854775808} + } {} + + # Make sure we can distinguish between an empty array and a null response + r readraw 1 + + test "ZRANDMEMBER count of 0 is handled correctly - emptyarray" { + r zrandmember myzset 0 + } {*0} + + test "ZRANDMEMBER with against non existing key - emptyarray" { + r zrandmember nonexisting_key 100 + } {*0} + + r readraw 0 + + foreach {type contents} " + skiplist {1 a 2 b 3 c 4 d 5 e 6 f 7 g 7 h 9 i 10 [randstring 70 90 alpha]} + listpack {1 a 2 b 3 c 4 d 5 e 6 f 7 g 7 h 9 i 10 j} " { + test "ZRANDMEMBER with - $type" { + set original_max_value [lindex [r config get zset-max-ziplist-value] 1] + r config set zset-max-ziplist-value 10 + create_zset myzset $contents + assert_encoding $type myzset + + # create a dict for easy lookup + set mydict [dict create {*}[r zrange myzset 0 -1 withscores]] + + # We'll stress different parts of the code, see the implementation + # of ZRANDMEMBER for more information, but basically there are + # four different code paths. + + # PATH 1: Use negative count. + + # 1) Check that it returns repeated elements with and without values. + # 2) Check that all the elements actually belong to the original zset. + set res [r zrandmember myzset -20] + assert_equal [llength $res] 20 + check_member $mydict $res + + set res [r zrandmember myzset -1001] + assert_equal [llength $res] 1001 + check_member $mydict $res + + # again with WITHSCORES + set res [r zrandmember myzset -20 withscores] + assert_equal [llength $res] 40 + check_member_and_score $mydict $res + + set res [r zrandmember myzset -1001 withscores] + assert_equal [llength $res] 2002 + check_member_and_score $mydict $res + + # Test random uniform distribution + # df = 9, 40 means 0.00001 probability + set res [r zrandmember myzset -1000] + assert_lessthan [chi_square_value $res] 40 + check_member $mydict $res + + # 3) Check that eventually all the elements are returned. + # Use both WITHSCORES and without + unset -nocomplain auxset + set iterations 1000 + while {$iterations != 0} { + incr iterations -1 + if {[expr {$iterations % 2}] == 0} { + set res [r zrandmember myzset -3 withscores] + foreach {key val} $res { + dict append auxset $key $val + } + } else { + set res [r zrandmember myzset -3] + foreach key $res { + dict append auxset $key + } + } + if {[lsort [dict keys $mydict]] eq + [lsort [dict keys $auxset]]} { + break; + } + } + assert {$iterations != 0} + + # PATH 2: positive count (unique behavior) with requested size + # equal or greater than set size. + foreach size {10 20} { + set res [r zrandmember myzset $size] + assert_equal [llength $res] 10 + assert_equal [lsort $res] [lsort [dict keys $mydict]] + check_member $mydict $res + + # again with WITHSCORES + set res [r zrandmember myzset $size withscores] + assert_equal [llength $res] 20 + assert_equal [lsort $res] [lsort $mydict] + check_member_and_score $mydict $res + } + + # PATH 3: Ask almost as elements as there are in the set. + # In this case the implementation will duplicate the original + # set and will remove random elements up to the requested size. + # + # PATH 4: Ask a number of elements definitely smaller than + # the set size. + # + # We can test both the code paths just changing the size but + # using the same code. + foreach size {1 2 8} { + # 1) Check that all the elements actually belong to the + # original set. + set res [r zrandmember myzset $size] + assert_equal [llength $res] $size + check_member $mydict $res + + # again with WITHSCORES + set res [r zrandmember myzset $size withscores] + assert_equal [llength $res] [expr {$size * 2}] + check_member_and_score $mydict $res + + # 2) Check that eventually all the elements are returned. + # Use both WITHSCORES and without + unset -nocomplain auxset + unset -nocomplain allkey + set iterations [expr {1000 / $size}] + set all_ele_return false + while {$iterations != 0} { + incr iterations -1 + if {[expr {$iterations % 2}] == 0} { + set res [r zrandmember myzset $size withscores] + foreach {key value} $res { + dict append auxset $key $value + lappend allkey $key + } + } else { + set res [r zrandmember myzset $size] + foreach key $res { + dict append auxset $key + lappend allkey $key + } + } + if {[lsort [dict keys $mydict]] eq + [lsort [dict keys $auxset]]} { + set all_ele_return true + } + } + assert_equal $all_ele_return true + # df = 9, 40 means 0.00001 probability + assert_lessthan [chi_square_value $allkey] 40 + } + } + r config set zset-max-ziplist-value $original_max_value + } + + test {zset score double range} { + set dblmax 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.00000000000000000 + r del zz + r zadd zz $dblmax dblmax + assert_encoding listpack zz + r zscore zz dblmax + } {1.7976931348623157e+308} + + test {zunionInterDiffGenericCommand acts on SET and ZSET} { + r del set_small{t} set_big{t} zset_small{t} zset_big{t} zset_dest{t} + + foreach set_type {intset listpack hashtable} { + # Restore all default configurations before each round of testing. + r config set set-max-intset-entries 512 + r config set set-max-listpack-entries 128 + r config set zset-max-listpack-entries 128 + + r del set_small{t} set_big{t} + + if {$set_type == "intset"} { + r sadd set_small{t} 1 2 3 + r sadd set_big{t} 1 2 3 4 5 + assert_encoding intset set_small{t} + assert_encoding intset set_big{t} + } elseif {$set_type == "listpack"} { + # Add an "a" and then remove it, make sure the set is listpack encoding. + r sadd set_small{t} a 1 2 3 + r sadd set_big{t} a 1 2 3 4 5 + r srem set_small{t} a + r srem set_big{t} a + assert_encoding listpack set_small{t} + assert_encoding listpack set_big{t} + } elseif {$set_type == "hashtable"} { + r config set set-max-intset-entries 0 + r config set set-max-listpack-entries 0 + r sadd set_small{t} 1 2 3 + r sadd set_big{t} 1 2 3 4 5 + assert_encoding hashtable set_small{t} + assert_encoding hashtable set_big{t} + } + + foreach zset_type {listpack skiplist} { + r del zset_small{t} zset_big{t} + + if {$zset_type == "listpack"} { + r zadd zset_small{t} 1 1 2 2 3 3 + r zadd zset_big{t} 1 1 2 2 3 3 4 4 5 5 + assert_encoding listpack zset_small{t} + assert_encoding listpack zset_big{t} + } elseif {$zset_type == "skiplist"} { + r config set zset-max-listpack-entries 0 + r zadd zset_small{t} 1 1 2 2 3 3 + r zadd zset_big{t} 1 1 2 2 3 3 4 4 5 5 + assert_encoding skiplist zset_small{t} + assert_encoding skiplist zset_big{t} + } + + # Test one key is big and one key is small separately. + # The reason for this is because we will sort the sets from smallest to largest. + # So set one big key and one small key, then the test can cover more code paths. + foreach {small_or_big set_key zset_key} { + small set_small{t} zset_big{t} + big set_big{t} zset_small{t} + } { + # The result of these commands are not related to the order of the keys. + assert_equal {1 2 3 4 5} [lsort [r zunion 2 $set_key $zset_key]] + assert_equal {5} [r zunionstore zset_dest{t} 2 $set_key $zset_key] + assert_equal {1 2 3} [lsort [r zinter 2 $set_key $zset_key]] + assert_equal {3} [r zinterstore zset_dest{t} 2 $set_key $zset_key] + assert_equal {3} [r zintercard 2 $set_key $zset_key] + + # The result of sdiff is related to the order of the keys. + if {$small_or_big == "small"} { + assert_equal {} [r zdiff 2 $set_key $zset_key] + assert_equal {0} [r zdiffstore zset_dest{t} 2 $set_key $zset_key] + } else { + assert_equal {4 5} [lsort [r zdiff 2 $set_key $zset_key]] + assert_equal {2} [r zdiffstore zset_dest{t} 2 $set_key $zset_key] + } + } + } + } + + r config set set-max-intset-entries 512 + r config set set-max-listpack-entries 128 + r config set zset-max-listpack-entries 128 + } + + foreach type {single multiple single_multiple} { + test "ZADD overflows the maximum allowed elements in a listpack - $type" { + r del myzset + + set max_entries 64 + set original_max [lindex [r config get zset-max-listpack-entries] 1] + r config set zset-max-listpack-entries $max_entries + + if {$type == "single"} { + # All are single zadd commands. + for {set i 0} {$i < $max_entries} {incr i} { r zadd myzset $i $i } + } elseif {$type == "multiple"} { + # One zadd command to add all elements. + set args {} + for {set i 0} {$i < $max_entries * 2} {incr i} { lappend args $i } + r zadd myzset {*}$args + } elseif {$type == "single_multiple"} { + # First one zadd adds an element (creates a key) and then one zadd adds all elements. + r zadd myzset 1 1 + set args {} + for {set i 0} {$i < $max_entries * 2} {incr i} { lappend args $i } + r zadd myzset {*}$args + } + + assert_encoding listpack myzset + assert_equal $max_entries [r zcard myzset] + assert_equal 1 [r zadd myzset 1 b] + assert_encoding skiplist myzset + + r config set zset-max-listpack-entries $original_max + } + } +} \ No newline at end of file diff --git a/tests/zset_test.go b/tests/zset_test.go index 5cdf309f6..eab3e7838 100644 --- a/tests/zset_test.go +++ b/tests/zset_test.go @@ -175,7 +175,7 @@ var _ = Describe("Zset", Ordered, func() { Expect(zRevRange.Val()).To(Equal([]string{"two", "one"})) }) - It("should ZRevRangeByScore", func() { + It("should ZRemRangeByRank", func() { err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err() Expect(err).NotTo(HaveOccurred()) err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err() @@ -183,20 +183,36 @@ var _ = Describe("Zset", Ordered, func() { err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err() Expect(err).NotTo(HaveOccurred()) - vals, err := client.ZRevRangeByScore( - ctx, "zset", &redis.ZRangeBy{Max: "+inf", Min: "-inf"}).Result() + zRemRangeByRank := client.ZRemRangeByRank(ctx, "zset", 0, 1) + Expect(zRemRangeByRank.Err()).NotTo(HaveOccurred()) + Expect(zRemRangeByRank.Val()).To(Equal(int64(2))) + + vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result() Expect(err).NotTo(HaveOccurred()) - Expect(vals).To(Equal([]string{"three", "two", "one"})) + Expect(vals).To(Equal([]redis.Z{{ + Score: 3, + Member: "three", + }})) + }) - vals, err = client.ZRevRangeByScore( - ctx, "zset", &redis.ZRangeBy{Max: "2", Min: "(1"}).Result() + It("should ZRevRangeByScore", func() { + err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err() Expect(err).NotTo(HaveOccurred()) - Expect(vals).To(Equal([]string{"two"})) - vals, err = client.ZRevRangeByScore( - ctx, "zset", &redis.ZRangeBy{Max: "(2", Min: "(1"}).Result() + zRemRangeByRank := client.ZRemRangeByRank(ctx, "zset", 0, 1) + Expect(zRemRangeByRank.Err()).NotTo(HaveOccurred()) + Expect(zRemRangeByRank.Val()).To(Equal(int64(2))) + + vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result() Expect(err).NotTo(HaveOccurred()) - Expect(vals).To(Equal([]string{})) + Expect(vals).To(Equal([]redis.Z{{ + Score: 3, + Member: "three", + }})) }) It("should ZCard", func() { @@ -215,4 +231,250 @@ var _ = Describe("Zset", Ordered, func() { Expect(err).NotTo(HaveOccurred()) Expect(card).To(Equal(int64(2))) }) + + It("should ZRange", func() { + err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err() + Expect(err).NotTo(HaveOccurred()) + + zRange := client.ZRange(ctx, "zset", 0, -1) + Expect(zRange.Err()).NotTo(HaveOccurred()) + Expect(zRange.Val()).To(Equal([]string{"one", "two", "three"})) + + zRange = client.ZRange(ctx, "zset", 2, 3) + Expect(zRange.Err()).NotTo(HaveOccurred()) + Expect(zRange.Val()).To(Equal([]string{"three"})) + + zRange = client.ZRange(ctx, "zset", -2, -1) + Expect(zRange.Err()).NotTo(HaveOccurred()) + Expect(zRange.Val()).To(Equal([]string{"two", "three"})) + }) + + It("should ZRangeWithScores", func() { + err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err() + Expect(err).NotTo(HaveOccurred()) + + vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(vals).To(Equal([]redis.Z{{ + Score: 1, + Member: "one", + }, { + Score: 2, + Member: "two", + }, { + Score: 3, + Member: "three", + }})) + + vals, err = client.ZRangeWithScores(ctx, "zset", 2, 3).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(vals).To(Equal([]redis.Z{{Score: 3, Member: "three"}})) + + vals, err = client.ZRangeWithScores(ctx, "zset", -2, -1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(vals).To(Equal([]redis.Z{{ + Score: 2, + Member: "two", + }, { + Score: 3, + Member: "three", + }})) + }) + + It("should ZRangeByScoreWithScoresMap", func() { + err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err() + Expect(err).NotTo(HaveOccurred()) + + vals, err := client.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{ + Min: "-inf", + Max: "+inf", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(vals).To(Equal([]redis.Z{{ + Score: 1, + Member: "one", + }, { + Score: 2, + Member: "two", + }, { + Score: 3, + Member: "three", + }})) + + vals, err = client.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{ + Min: "1", + Max: "2", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(vals).To(Equal([]redis.Z{{ + Score: 1, + Member: "one", + }, { + Score: 2, + Member: "two", + }})) + + vals, err = client.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{ + Min: "(1", + Max: "2", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "two"}})) + + vals, err = client.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{ + Min: "(1", + Max: "(2", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(vals).To(Equal([]redis.Z{})) + }) + + It("should ZRangeByLex", func() { + err := client.ZAdd(ctx, "zsetrangebylex", redis.Z{ + Score: 0, + Member: "a", + }).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zsetrangebylex", redis.Z{ + Score: 0, + Member: "b", + }).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zsetrangebylex", redis.Z{ + Score: 0, + Member: "c", + }).Err() + Expect(err).NotTo(HaveOccurred()) + + zRangeByLex := client.ZRangeByLex(ctx, "zsetrangebylex", &redis.ZRangeBy{ + Min: "-", + Max: "+", + }) + Expect(zRangeByLex.Err()).NotTo(HaveOccurred()) + Expect(zRangeByLex.Val()).To(Equal([]string{"a", "b", "c"})) + + zRangeByLex = client.ZRangeByLex(ctx, "zsetrangebylex", &redis.ZRangeBy{ + Min: "[a", + Max: "[b", + }) + Expect(zRangeByLex.Err()).NotTo(HaveOccurred()) + Expect(zRangeByLex.Val()).To(Equal([]string{"a", "b"})) + + zRangeByLex = client.ZRangeByLex(ctx, "zsetrangebylex", &redis.ZRangeBy{ + Min: "(a", + Max: "[b", + }) + Expect(zRangeByLex.Err()).NotTo(HaveOccurred()) + Expect(zRangeByLex.Val()).To(Equal([]string{"b"})) + + zRangeByLex = client.ZRangeByLex(ctx, "zsetrangebylex", &redis.ZRangeBy{ + Min: "(a", + Max: "(b", + }) + Expect(zRangeByLex.Err()).NotTo(HaveOccurred()) + Expect(zRangeByLex.Val()).To(Equal([]string{})) + }) + + It("should ZRank", func() { + err := client.ZAdd(ctx, "zrank", redis.Z{ + Score: 1, + Member: "one", + }).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zrank", redis.Z{ + Score: 2, + Member: "two", + }).Err() + Expect(err).NotTo(HaveOccurred()) + + rank, err := client.ZRank(ctx, "zrank", "two").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(rank).To(Equal(int64(1))) + }) + + It("should ZRevrank", func() { + err := client.ZAdd(ctx, "zrevrank", redis.Z{ + Score: 1, + Member: "one", + }).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zrevrank", redis.Z{ + Score: 2, + Member: "two", + }).Err() + Expect(err).NotTo(HaveOccurred()) + + revrank, err := client.ZRevRank(ctx, "zrevrank", "one").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(revrank).To(Equal(int64(1))) + }) + + It("should ZRem", func() { + err := client.ZAdd(ctx, "zrem", redis.Z{ + Score: 1, + Member: "one", + }).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zrem", redis.Z{ + Score: 2, + Member: "two", + }).Err() + Expect(err).NotTo(HaveOccurred()) + + rem, err := client.ZRem(ctx, "zrem", "one").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(rem).To(Equal(int64(1))) + }) + + It("should ZIncrby", func() { + err := client.ZAdd(ctx, "zincrby", redis.Z{ + Score: 1, + Member: "one", + }).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zincrby", redis.Z{ + Score: 2, + Member: "two", + }).Err() + Expect(err).NotTo(HaveOccurred()) + + rem, err := client.ZIncrBy(ctx, "zincrby", 5, "one").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(rem).To(Equal(float64(6))) + }) + + It("should ZRemRangeByScore", func() { + err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err() + Expect(err).NotTo(HaveOccurred()) + + zRemRangeByScore := client.ZRemRangeByScore(ctx, "zset", "-inf", "(2") + Expect(zRemRangeByScore.Err()).NotTo(HaveOccurred()) + Expect(zRemRangeByScore.Val()).To(Equal(int64(1))) + + vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(vals).To(Equal([]redis.Z{{ + Score: 2, + Member: "two", + }, { + Score: 3, + Member: "three", + }})) + }) })