diff --git a/.coveragerc b/.coveragerc index 9be06b93..7dd54b62 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,2 +1,2 @@ [run] -source = redis +source = valkey diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 7323c143..5861f19b 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,7 +1,7 @@ -Thanks for wanting to report an issue you've found in redis-py. Please delete this text and fill in the template below. +Thanks for wanting to report an issue you've found in valkey-py. Please delete this text and fill in the template below. It is of course not always possible to reduce your code to a small test case, but it's highly appreciated to have as much data as possible. Thank you! -**Version**: What redis-py and what redis version is the issue happening on? +**Version**: What valkey-py and what valkey version is the issue happening on? **Platform**: What platform / version? (For example Python 3.5.1 on Windows 7 / Ubuntu 15.10 / Azure) diff --git a/.github/wordlist.txt b/.github/wordlist.txt index 540e9152..af8728c7 100644 --- a/.github/wordlist.txt +++ b/.github/wordlist.txt @@ -139,5 +139,6 @@ txt un unicode url +valkey virtualenv www diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index a3512b46..fa150ca4 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -42,6 +42,6 @@ jobs: - name: upload docs uses: actions/upload-artifact@v4 with: - name: redis-py-docs + name: valkey-py-docs path: | docs/_build/html diff --git a/.github/workflows/install_and_test.sh b/.github/workflows/install_and_test.sh index 33a1edb1..99852513 100755 --- a/.github/workflows/install_and_test.sh +++ b/.github/workflows/install_and_test.sh @@ -38,8 +38,8 @@ cd ${TESTDIR} # install, run tests pip install ${PKG} -# Redis tests +# Valkey tests pytest -m 'not onlycluster' -# RedisCluster tests -CLUSTER_URL="redis://localhost:16379/0" -pytest -m 'not onlynoncluster and not redismod and not ssl' --redis-url=${CLUSTER_URL} +# ValkeyCluster tests +CLUSTER_URL="valkey://localhost:16379/0" +pytest -m 'not onlynoncluster and not valkeymod and not ssl' --valkey-url=${CLUSTER_URL} diff --git a/.gitignore b/.gitignore index 3baa3403..77eba107 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,5 @@ *.pyc -redis.egg-info +valkey.egg-info build/ dist/ dump.rdb diff --git a/.isort.cfg b/.isort.cfg index 039f0337..87db5e44 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -1,5 +1,5 @@ [settings] profile=black multi_line_output=3 -src_paths = ["redis", "tests"] -skip_glob=benchmarks/* \ No newline at end of file +src_paths = ["valkey", "tests"] +skip_glob=benchmarks/* diff --git a/.mypy.ini b/.mypy.ini index 942574e0..0d3b08d4 100644 --- a/.mypy.ini +++ b/.mypy.ini @@ -1,6 +1,6 @@ [mypy] #, docs/examples, tests -files = redis +files = valkey check_untyped_defs = True follow_imports_for_stubs asyncio.= True #disallow_any_decorated = True @@ -19,6 +19,6 @@ warn_unused_ignores = True disallow_any_unimported = True #warn_return_any = True -[mypy-redis.asyncio.lock] +[mypy-valkey.asyncio.lock] # TODO: Remove once locks has been rewritten ignore_errors = True diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..1c530ec7 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,96 @@ +Contributor Covenant Code of Conduct +Our Pledge +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. +Our Standards +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, +and learning from the experience +* Focusing on what is best not just for us as individuals, but for the +overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or +advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email +address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting + +Enforcement Responsibilities +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. +Scope +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. +Enforcement +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +this email address: placeholderkv@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. +Enforcement Guidelines +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: +1. Correction +Community Impact: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. +Consequence: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. +2. Warning +Community Impact: A violation through a single incident or series +of actions. +Consequence: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. +3. Temporary Ban +Community Impact: A serious violation of community standards, including +sustained inappropriate behavior. +Consequence: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. +4. Permanent Ban +Community Impact: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. +Consequence: A permanent ban from any sort of public interaction within +the community. +Attribution +This Code of Conduct is adapted from the Contributor Covenant, +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. +Community Impact Guidelines were inspired by Mozilla's code of conduct +enforcement ladder. +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d87e6ba1..90370ac2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ ## Introduction -We appreciate your interest in considering contributing to redis-py. +We appreciate your interest in considering contributing to valkey-py. Community contributions mean a lot to us. ## Contributions we need @@ -18,7 +18,7 @@ helpful contributions that mean less work for you. Unsure where to begin contributing? You can start by looking through [help-wanted -issues](https://github.com/andymccurdy/redis-py/issues?q=is%3Aopen+is%3Aissue+label%3ahelp-wanted). +issues](https://github.com/valkey-io/valkey-py/issues?q=is%3Aopen+is%3Aissue+label%3ahelp-wanted). Never contributed to open source before? Here are a couple of friendly tutorials: @@ -30,7 +30,7 @@ tutorials: Here's how to get started with your code contribution: -1. Create your own fork of redis-py +1. Create your own fork of valkey-py 2. Do the changes in your fork 3. *Create a virtualenv and install the development dependencies from the dev_requirements.txt file:* @@ -55,11 +55,11 @@ project, and leaves them running. These can be easily cleaned up with `invoke clean`. NOTE: it is assumed that the user running these tests, can execute docker and its various commands. -- A master Redis node -- A Redis replica node -- Three sentinel Redis nodes -- A redis cluster -- An stunnel docker, fronting the master Redis node +- A master Valkey node +- A Valkey replica node +- Three sentinel Valkey nodes +- A valkey cluster +- An stunnel docker, fronting the master Valkey node The replica node, is a replica of the master node, using the [leader-follower replication](https://redis.io/topics/replication) @@ -71,11 +71,11 @@ configuration](https://redis.io/topics/sentinel). ## Testing Call `invoke tests` to run all tests, or `invoke all-tests` to run linters -tests as well. With the 'tests' and 'all-tests' targets, all Redis and -RedisCluster tests will be run. +tests as well. With the 'tests' and 'all-tests' targets, all Valkey and +ValkeyCluster tests will be run. -It is possible to run only Redis client tests (with cluster mode disabled) by -using `invoke standalone-tests`; similarly, RedisCluster tests can be run by using +It is possible to run only Valkey client tests (with cluster mode disabled) by +using `invoke standalone-tests`; similarly, ValkeyCluster tests can be run by using `invoke cluster-tests`. Each run of tests starts and stops the various dockers required. Sometimes @@ -127,7 +127,7 @@ Please try at least versions of Docker. ### Security Vulnerabilities **NOTE**: If you find a security vulnerability, do NOT open an issue. -Email [Redis Open Source ()](mailto:oss@redis.com) instead. +Email [Salvatore Mesoraca ()](mailto:salvatore.mesoraca@aiven.io) instead. In order to determine whether you are dealing with a security issue, ask yourself these two questions: @@ -139,14 +139,14 @@ yourself these two questions: If the answer to either of those two questions are *yes*, then you're probably dealing with a security issue. Note that even if you answer *no* to both questions, you may still be dealing with a security -issue, so if you're unsure, just email [us](mailto:oss@redis.com). +issue, so if you're unsure, just email [us](mailto:salvatore.mesoraca@aiven.io). ### Everything Else When filing an issue, make sure to answer these five questions: -1. What version of redis-py are you using? -2. What version of redis are you using? +1. What version of valkey-py are you using? +2. What version of valkey are you using? 3. What did you do? 4. What did you expect to see? 5. What did you see instead? diff --git a/INSTALL b/INSTALL index 951f7dea..447340ca 100644 --- a/INSTALL +++ b/INSTALL @@ -2,5 +2,5 @@ Please use python setup.py install -and report errors to Andy McCurdy (sedrik@gmail.com) +and report errors to Salvatore Mesoraca (salvatore.mesoraca@aiven.io) diff --git a/README.md b/README.md index 2097e87b..e0ea63db 100644 --- a/README.md +++ b/README.md @@ -1,115 +1,86 @@ -# redis-py +# valkey-py -The Python interface to the Redis key-value store. +The Python interface to the Valkey key-value store. -[![CI](https://github.com/redis/redis-py/workflows/CI/badge.svg?branch=master)](https://github.com/redis/redis-py/actions?query=workflow%3ACI+branch%3Amaster) -[![docs](https://readthedocs.org/projects/redis/badge/?version=stable&style=flat)](https://redis-py.readthedocs.io/en/stable/) +[![CI](https://github.com/aiven-sal/valkey-py/workflows/CI/badge.svg?branch=master)](https://github.com/aiven-sal/valkey-py/actions?query=workflow%3ACI+branch%3Amaster) +[![docs](https://readthedocs.org/projects/valkey-py/badge/?version=stable&style=flat)](https://valkey-py.readthedocs.io/en/stable/) [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) -[![pypi](https://badge.fury.io/py/redis.svg)](https://pypi.org/project/redis/) -[![pre-release](https://img.shields.io/github/v/release/redis/redis-py?include_prereleases&label=latest-prerelease)](https://github.com/redis/redis-py/releases) -[![codecov](https://codecov.io/gh/redis/redis-py/branch/master/graph/badge.svg?token=yenl5fzxxr)](https://codecov.io/gh/redis/redis-py) +[![pypi](https://badge.fury.io/py/valkey.svg)](https://pypi.org/project/valkey/) +[![pre-release](https://img.shields.io/github/v/release/aiven-sal/valkey-py?include_prereleases&label=latest-prerelease)](https://github.com/aiven-sal/valkey-py/releases) +[![codecov](https://codecov.io/gh/aiven-sal/valkey-py/branch/master/graph/badge.svg?token=yenl5fzxxr)](https://codecov.io/gh/aiven-sal/valkey-py) -[Installation](#installation) | [Usage](#usage) | [Advanced Topics](#advanced-topics) | [Contributing](https://github.com/redis/redis-py/blob/master/CONTRIBUTING.md) +[Installation](#installation) | [Usage](#usage) | [Advanced Topics](#advanced-topics) | [Contributing](https://github.com/aiven-sal/valkey-py/blob/master/CONTRIBUTING.md) --------------------------------------------- -**Note: ** redis-py 5.0 will be the last version of redis-py to support Python 3.7, as it has reached [end of life](https://devguide.python.org/versions/). redis-py 5.1 will support Python 3.8+. +**Note: ** valkey-py 5.0 will be the last version of valkey-py to support Python 3.7, as it has reached [end of life](https://devguide.python.org/versions/). valkey-py 5.1 will support Python 3.8+. --------------------------------------------- -## How do I Redis? - -[Learn for free at Redis University](https://university.redis.com/) - -[Build faster with the Redis Launchpad](https://launchpad.redis.com/) - -[Try the Redis Cloud](https://redis.com/try-free/) - -[Dive in developer tutorials](https://developer.redis.com/) - -[Join the Redis community](https://redis.com/community/) - -[Work at Redis](https://redis.com/company/careers/jobs/) - ## Installation -Start a redis via docker: +Start a valkey via docker: ``` bash -docker run -p 6379:6379 -it redis/redis-stack:latest +docker run -p 6379:6379 -it valkey/valkey:latest ``` -To install redis-py, simply: +To install valkey-py, simply: ``` bash -$ pip install redis +$ pip install valkey ``` -For faster performance, install redis with hiredis support, this provides a compiled response parser, and *for most cases* requires zero code changes. -By default, if hiredis >= 1.0 is available, redis-py will attempt to use it for response parsing. +For faster performance, install valkey with hiredis support, this provides a compiled response parser, and *for most cases* requires zero code changes. +By default, if hiredis >= 1.0 is available, valkey-py will attempt to use it for response parsing. ``` bash -$ pip install "redis[hiredis]" +$ pip install "valkey[hiredis]" ``` -Looking for a high-level library to handle object mapping? See [redis-om-python](https://github.com/redis/redis-om-python)! - -## Supported Redis Versions - -The most recent version of this library supports redis version [5.0](https://github.com/redis/redis/blob/5.0/00-RELEASENOTES), [6.0](https://github.com/redis/redis/blob/6.0/00-RELEASENOTES), [6.2](https://github.com/redis/redis/blob/6.2/00-RELEASENOTES), [7.0](https://github.com/redis/redis/blob/7.0/00-RELEASENOTES) and [7.2](https://github.com/redis/redis/blob/7.2/00-RELEASENOTES). - -The table below highlights version compatibility of the most-recent library versions and redis versions. - -| Library version | Supported redis versions | -|-----------------|-------------------| -| 3.5.3 | <= 6.2 Family of releases | -| >= 4.5.0 | Version 5.0 to 7.0 | -| >= 5.0.0 | Version 5.0 to current | - - ## Usage ### Basic Example ``` python ->>> import redis ->>> r = redis.Redis(host='localhost', port=6379, db=0) +>>> import valkey +>>> r = valkey.Valkey(host='localhost', port=6379, db=0) >>> r.set('foo', 'bar') True >>> r.get('foo') b'bar' ``` -The above code connects to localhost on port 6379, sets a value in Redis, and retrieves it. All responses are returned as bytes in Python, to receive decoded strings, set *decode_responses=True*. For this, and more connection options, see [these examples](https://redis.readthedocs.io/en/stable/examples.html). +The above code connects to localhost on port 6379, sets a value in Redis, and retrieves it. All responses are returned as bytes in Python, to receive decoded strings, set *decode_responses=True*. For this, and more connection options, see [these examples](https://valkey-py.readthedocs.io/en/stable/examples.html). #### RESP3 Support To enable support for RESP3, ensure you have at least version 5.0 of the client, and change your connection object to include *protocol=3* ``` python ->>> import redis ->>> r = redis.Redis(host='localhost', port=6379, db=0, protocol=3) +>>> import valkey +>>> r = valkey.Valkey(host='localhost', port=6379, db=0, protocol=3) ``` ### Connection Pools -By default, redis-py uses a connection pool to manage connections. Each instance of a Redis class receives its own connection pool. You can however define your own [redis.ConnectionPool](https://redis.readthedocs.io/en/stable/connections.html#connection-pools). +By default, valkey-py uses a connection pool to manage connections. Each instance of a Valkey class receives its own connection pool. You can however define your own [valkey.ConnectionPool](https://valkey-py.readthedocs.io/en/stable/connections.html#connection-pools). ``` python ->>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0) ->>> r = redis.Redis(connection_pool=pool) +>>> pool = valkey.ConnectionPool(host='localhost', port=6379, db=0) +>>> r = valkey.Valkey(connection_pool=pool) ``` -Alternatively, you might want to look at [Async connections](https://redis.readthedocs.io/en/stable/examples/asyncio_examples.html), or [Cluster connections](https://redis.readthedocs.io/en/stable/connections.html#cluster-client), or even [Async Cluster connections](https://redis.readthedocs.io/en/stable/connections.html#async-cluster-client). +Alternatively, you might want to look at [Async connections](https://valkey-py.readthedocs.io/en/stable/examples/asyncio_examples.html), or [Cluster connections](https://valkey-py.readthedocs.io/en/stable/connections.html#cluster-client), or even [Async Cluster connections](https://valkey-py.readthedocs.io/en/stable/connections.html#async-cluster-client). -### Redis Commands +### Valkey Commands -There is built-in support for all of the [out-of-the-box Redis commands](https://redis.io/commands). They are exposed using the raw Redis command names (`HSET`, `HGETALL`, etc.) except where a word (i.e. del) is reserved by the language. The complete set of commands can be found [here](https://github.com/redis/redis-py/tree/master/redis/commands), or [the documentation](https://redis.readthedocs.io/en/stable/commands.html). +There is built-in support for all of the [out-of-the-box Valkey commands](https://valkey.io/commands). They are exposed using the raw Redis command names (`HSET`, `HGETALL`, etc.) except where a word (i.e. del) is reserved by the language. The complete set of commands can be found [here](https://github.com/valkey/valkey-py/tree/master/redis/commands), or [the documentation](https://valkey-py.readthedocs.io/en/stable/commands.html). ## Advanced Topics -The [official Redis command documentation](https://redis.io/commands) -does a great job of explaining each command in detail. redis-py attempts +The [official Redis command documentation](https://valkey.io/commands) +does a great job of explaining each command in detail. valkey-py attempts to adhere to the official command syntax. There are a few exceptions: - **MULTI/EXEC**: These are implemented as part of the Pipeline class. @@ -126,11 +97,11 @@ to adhere to the official command syntax. There are a few exceptions: #151](https://github.com/redis/redis-py/issues/151#issuecomment-1545015) for details). -For more details, please see the documentation on [advanced topics page](https://redis.readthedocs.io/en/stable/advanced_features.html). +For more details, please see the documentation on [advanced topics page](https://valkey-py.readthedocs.io/en/stable/advanced_features.html). ### Pipelines -The following is a basic example of a [Redis pipeline](https://redis.io/docs/manual/pipelining/), a method to optimize round-trip calls, by batching Redis commands, and receiving their results as a list. +The following is a basic example of a [Redis pipeline](https://valkey.io/docs/manual/pipelining/), a method to optimize round-trip calls, by batching Redis commands, and receiving their results as a list. ``` python @@ -144,10 +115,10 @@ The following is a basic example of a [Redis pipeline](https://redis.io/docs/man ### PubSub -The following example shows how to utilize [Redis Pub/Sub](https://redis.io/docs/manual/pubsub/) to subscribe to specific channels. +The following example shows how to utilize [Redis Pub/Sub](https://valkey.io/docs/manual/pubsub/) to subscribe to specific channels. ``` python ->>> r = redis.Redis(...) +>>> r = valkey.Valkey(...) >>> p = r.pubsub() >>> p.subscribe('my-first-channel', 'my-second-channel', ...) >>> p.get_message() @@ -159,8 +130,9 @@ The following example shows how to utilize [Redis Pub/Sub](https://redis.io/docs ### Author -redis-py is developed and maintained by [Redis Inc](https://redis.com). It can be found [here]( -https://github.com/redis/redis-py), or downloaded from [pypi](https://pypi.org/project/redis/). +valkey-py can be found [here]( +https://github.com/aiven-sal/valkey-py), or downloaded from [pypi](https://pypi.org/project/valkey/). +It was created as a fork of [redis-py](https://github.com/redis/redis-py) Special thanks to: @@ -169,6 +141,6 @@ Special thanks to: from which some of the socket code is still used. - Alexander Solovyov for ideas on the generic response callback system. -- Paul Hubbard for initial packaging support. +- Paul Hubbard for initial packaging support in redis-py. -[![Redis](./docs/logo-redis.png)](https://www.redis.com) +[![Redis](./docs/logo-valkey.png)](https://valkey.io/) diff --git a/benchmarks/base.py b/benchmarks/base.py index f52657f0..713c077d 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -3,7 +3,7 @@ import sys import timeit -import redis +import valkey class Benchmark: @@ -18,8 +18,8 @@ def get_client(self, **kwargs): if self._client is None or kwargs: defaults = {"db": 9} defaults.update(kwargs) - pool = redis.ConnectionPool(**kwargs) - self._client = redis.Redis(connection_pool=pool) + pool = valkey.ConnectionPool(**kwargs) + self._client = valkey.Valkey(connection_pool=pool) return self._client def setup(self, **kwargs): diff --git a/benchmarks/basic_operations.py b/benchmarks/basic_operations.py index c9f58536..b42c35b2 100644 --- a/benchmarks/basic_operations.py +++ b/benchmarks/basic_operations.py @@ -2,7 +2,7 @@ from argparse import ArgumentParser from functools import wraps -import redis +import valkey def parse_args(): @@ -29,7 +29,7 @@ def parse_args(): def run(): args = parse_args() - r = redis.Redis() + r = valkey.Valkey() r.flushall() set_str(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s) set_int(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s) diff --git a/benchmarks/cluster_async.py b/benchmarks/cluster_async.py index 17dd52b5..3b76c87d 100644 --- a/benchmarks/cluster_async.py +++ b/benchmarks/cluster_async.py @@ -2,11 +2,11 @@ import functools import time -import aioredis_cluster -import aredis +import aiovalkey_cluster +import avalkey import uvloop -import redis.asyncio as redispy +import valkey.asyncio as valkeypy def timer(func): @@ -202,7 +202,7 @@ async def run(client, gather): async def main(loop, gather=None): - arc = aredis.StrictRedisCluster( + arc = avalkey.StrictValkeyCluster( host=host, port=port, password=password, @@ -215,23 +215,23 @@ async def main(loop, gather=None): max_idle_time=count, idle_check_interval=count, ) - print(f"{loop} {gather} {await warmup(arc)} aredis") + print(f"{loop} {gather} {await warmup(arc)} avalkey") print(await run(arc, gather=gather)) arc.connection_pool.disconnect() - aiorc = await aioredis_cluster.create_redis_cluster( + aiorc = await aiovalkey_cluster.create_valkey_cluster( [(host, port)], password=password, state_reload_interval=count, idle_connection_timeout=count, pool_maxsize=2**31, ) - print(f"{loop} {gather} {await warmup(aiorc)} aioredis-cluster") + print(f"{loop} {gather} {await warmup(aiorc)} aiovalkey-cluster") print(await run(aiorc, gather=gather)) aiorc.close() await aiorc.wait_closed() - async with redispy.RedisCluster( + async with valkeypy.ValkeyCluster( host=host, port=port, password=password, @@ -240,7 +240,7 @@ async def main(loop, gather=None): decode_responses=False, max_connections=2**31, ) as rca: - print(f"{loop} {gather} {await warmup(rca)} redispy") + print(f"{loop} {gather} {await warmup(rca)} valkeypy") print(await run(rca, gather=gather)) diff --git a/benchmarks/cluster_async_pipeline.py b/benchmarks/cluster_async_pipeline.py index af45b445..e874ecff 100644 --- a/benchmarks/cluster_async_pipeline.py +++ b/benchmarks/cluster_async_pipeline.py @@ -2,11 +2,11 @@ import functools import time -import aioredis_cluster -import aredis +import aiovalkey_cluster +import avalkey import uvloop -import redis.asyncio as redispy +import valkey.asyncio as valkeypy def timer(func): @@ -50,7 +50,7 @@ async def run(client): async def main(loop): - arc = aredis.StrictRedisCluster( + arc = avalkey.StrictValkeyCluster( host=host, port=port, password=password, @@ -63,23 +63,23 @@ async def main(loop): max_idle_time=count, idle_check_interval=count, ) - print(f"{loop} {await warmup(arc)} aredis") + print(f"{loop} {await warmup(arc)} avalkey") print(await run(arc)) arc.connection_pool.disconnect() - aiorc = await aioredis_cluster.create_redis_cluster( + aiorc = await aiovalkey_cluster.create_valkey_cluster( [(host, port)], password=password, state_reload_interval=count, idle_connection_timeout=count, pool_maxsize=2**31, ) - print(f"{loop} {await warmup(aiorc)} aioredis-cluster") + print(f"{loop} {await warmup(aiorc)} aiovalkey-cluster") print(await run(aiorc)) aiorc.close() await aiorc.wait_closed() - async with redispy.RedisCluster( + async with valkeypy.ValkeyCluster( host=host, port=port, password=password, @@ -88,7 +88,7 @@ async def main(loop): decode_responses=False, max_connections=2**31, ) as rca: - print(f"{loop} {await warmup(rca)} redispy") + print(f"{loop} {await warmup(rca)} valkeypy") print(await run(rca)) diff --git a/benchmarks/command_packer_benchmark.py b/benchmarks/command_packer_benchmark.py index e66dbbcb..b4a67ce1 100644 --- a/benchmarks/command_packer_benchmark.py +++ b/benchmarks/command_packer_benchmark.py @@ -1,11 +1,11 @@ from base import Benchmark -from redis.connection import SYM_CRLF, SYM_DOLLAR, SYM_EMPTY, SYM_STAR, Connection +from valkey.connection import SYM_CRLF, SYM_DOLLAR, SYM_EMPTY, SYM_STAR, Connection class StringJoiningConnection(Connection): def send_packed_command(self, command, check_health=True): - "Send an already packed command to the Redis server" + "Send an already packed command to the Valkey server" if not self._sock: self.connect() try: @@ -22,7 +22,7 @@ def send_packed_command(self, command, check_health=True): raise def pack_command(self, *args): - "Pack a series of arguments into a value Redis command" + "Pack a series of arguments into a value Valkey command" args_output = SYM_EMPTY.join( [ SYM_EMPTY.join( diff --git a/benchmarks/socket_read_size.py b/benchmarks/socket_read_size.py index 544c7331..cc7c943b 100644 --- a/benchmarks/socket_read_size.py +++ b/benchmarks/socket_read_size.py @@ -1,6 +1,6 @@ from base import Benchmark -from redis.connection import PythonParser, _HiredisParser +from valkey.connection import PythonParser, _HiredisParser class SocketReadBenchmark(Benchmark): diff --git a/docker-compose.yml b/docker-compose.yml index 17d4b239..f56401d4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -4,13 +4,12 @@ version: "3.8" services: - redis: - image: redis/redis-stack-server:edge - container_name: redis-standalone + valkey: + image: valkey/valkey:latest + container_name: valkey-standalone ports: - 6379:6379 - environment: - - "REDIS_ARGS=--enable-debug-command yes --enable-module-command yes" + entrypoint: "/usr/local/bin/docker-entrypoint.sh --enable-debug-command yes --enable-module-command yes" profiles: - standalone - sentinel @@ -18,12 +17,11 @@ services: - all replica: - image: redis/redis-stack-server:edge - container_name: redis-replica + image: valkey/valkey:latest + container_name: valkey-replica depends_on: - - redis - environment: - - "REDIS_ARGS=--replicaof redis 6379" + - valkey + entrypoint: "/usr/local/bin/docker-entrypoint.sh --replicaof valkey 6379" ports: - 6380:6379 profiles: @@ -31,7 +29,7 @@ services: - all cluster: - container_name: redis-cluster + container_name: valkey-cluster build: context: . dockerfile: dockers/Dockerfile.cluster @@ -43,7 +41,7 @@ services: - 16383:16383 - 16384:16384 volumes: - - "./dockers/cluster.redis.conf:/redis.conf:ro" + - "./dockers/cluster.valkey.conf:/valkey.conf:ro" profiles: - cluster - all @@ -51,7 +49,7 @@ services: stunnel: image: redisfab/stunnel:latest depends_on: - - redis + - valkey ports: - 6666:6666 profiles: @@ -63,47 +61,43 @@ services: - "./dockers/stunnel/keys:/etc/stunnel/keys:ro" sentinel: - image: redis/redis-stack-server:edge - container_name: redis-sentinel + image: valkey/valkey:latest + container_name: valkey-sentinel depends_on: - - redis - environment: - - "REDIS_ARGS=--port 26379" - entrypoint: "/opt/redis-stack/bin/redis-sentinel /redis.conf --port 26379" + - valkey + entrypoint: "/usr/local/bin/valkey-sentinel /valkey.conf --port 26379" ports: - 26379:26379 volumes: - - "./dockers/sentinel.conf:/redis.conf" + - "./dockers/sentinel.conf:/valkey.conf" profiles: - sentinel - all sentinel2: - image: redis/redis-stack-server:edge - container_name: redis-sentinel2 + image: valkey/valkey:latest + container_name: valkey-sentinel2 depends_on: - - redis - environment: - - "REDIS_ARGS=--port 26380" - entrypoint: "/opt/redis-stack/bin/redis-sentinel /redis.conf --port 26380" + - valkey + entrypoint: "/usr/local/bin/valkey-sentinel /valkey.conf --port 26380" ports: - 26380:26380 volumes: - - "./dockers/sentinel.conf:/redis.conf" + - "./dockers/sentinel.conf:/valkey.conf" profiles: - sentinel - all sentinel3: - image: redis/redis-stack-server:edge - container_name: redis-sentinel3 + image: valkey/valkey:latest + container_name: valkey-sentinel3 depends_on: - - redis - entrypoint: "/opt/redis-stack/bin/redis-sentinel /redis.conf --port 26381" + - valkey + entrypoint: "/usr/local/bin/valkey-sentinel /valkey.conf --port 26381" ports: - 26381:26381 volumes: - - "./dockers/sentinel.conf:/redis.conf" + - "./dockers/sentinel.conf:/valkey.conf" profiles: - sentinel - all diff --git a/dockers/Dockerfile.cluster b/dockers/Dockerfile.cluster index 3a0d7341..7d42db5e 100644 --- a/dockers/Dockerfile.cluster +++ b/dockers/Dockerfile.cluster @@ -1,7 +1,6 @@ -FROM redis/redis-stack-server:edge as rss +FROM valkey/valkey:latest as rss COPY dockers/create_cluster.sh /create_cluster.sh -RUN ls -R /opt/redis-stack RUN chmod a+x /create_cluster.sh ENTRYPOINT [ "/create_cluster.sh"] diff --git a/dockers/cluster.redis.conf b/dockers/cluster.redis.conf deleted file mode 100644 index d4de46fb..00000000 --- a/dockers/cluster.redis.conf +++ /dev/null @@ -1,8 +0,0 @@ -protected-mode no -enable-debug-command yes -loadmodule /opt/redis-stack/lib/redisearch.so -loadmodule /opt/redis-stack/lib/redisgraph.so -loadmodule /opt/redis-stack/lib/redistimeseries.so -loadmodule /opt/redis-stack/lib/rejson.so -loadmodule /opt/redis-stack/lib/redisbloom.so -loadmodule /opt/redis-stack/lib/redisgears.so v8-plugin-path /opt/redis-stack/lib/libredisgears_v8_plugin.so diff --git a/dockers/cluster.valkey.conf b/dockers/cluster.valkey.conf new file mode 100644 index 00000000..e9f7617d --- /dev/null +++ b/dockers/cluster.valkey.conf @@ -0,0 +1,2 @@ +protected-mode no +enable-debug-command yes diff --git a/dockers/create_cluster.sh b/dockers/create_cluster.sh index da9a0cb6..1c32be77 100644 --- a/dockers/create_cluster.sh +++ b/dockers/create_cluster.sh @@ -17,31 +17,31 @@ echo "ENDING: ${END_PORT}" for PORT in `seq ${START_PORT} ${END_PORT}`; do mkdir -p /nodes/$PORT - if [[ -e /redis.conf ]]; then - cp /redis.conf /nodes/$PORT/redis.conf + if [[ -e /valkey.conf ]]; then + cp /valkey.conf /nodes/$PORT/valkey.conf else - touch /nodes/$PORT/redis.conf + touch /nodes/$PORT/valkey.conf fi - cat << EOF >> /nodes/$PORT/redis.conf + cat << EOF >> /nodes/$PORT/valkey.conf port ${PORT} cluster-enabled yes daemonize yes -logfile /redis.log +logfile /valkey.log dir /nodes/$PORT EOF set -x - /opt/redis-stack/bin/redis-server /nodes/$PORT/redis.conf + /usr/local/bin/valkey-server /nodes/$PORT/valkey.conf sleep 1 if [ $? -ne 0 ]; then - echo "Redis failed to start, exiting." + echo "Valkey failed to start, exiting." continue fi echo 127.0.0.1:$PORT >> /nodes/nodemap done -if [ -z "${REDIS_PASSWORD}" ]; then - echo yes | /opt/redis-stack/bin/redis-cli --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1 +if [ -z "${VALKEY_PASSWORD}" ]; then + echo yes | /usr/local/bin/valkey-cli --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1 else - echo yes | opt/redis-stack/bin/redis-cli -a ${REDIS_PASSWORD} --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1 + echo yes | /usr/local/bin/valkey-cli -a ${VALKEY_PASSWORD} --cluster create `seq -f 127.0.0.1:%g ${START_PORT} ${END_PORT}` --cluster-replicas 1 fi -tail -f /redis.log +tail -f /valkey.log diff --git a/dockers/sentinel.conf b/dockers/sentinel.conf index 75f711e5..b3cc6666 100644 --- a/dockers/sentinel.conf +++ b/dockers/sentinel.conf @@ -1,5 +1,5 @@ sentinel resolve-hostnames yes -sentinel monitor redis-py-test redis 6379 2 -sentinel down-after-milliseconds redis-py-test 5000 -sentinel failover-timeout redis-py-test 60000 -sentinel parallel-syncs redis-py-test 1 \ No newline at end of file +sentinel monitor valkey-py-test valkey 6379 2 +sentinel down-after-milliseconds valkey-py-test 5000 +sentinel failover-timeout valkey-py-test 60000 +sentinel parallel-syncs valkey-py-test 1 \ No newline at end of file diff --git a/dockers/stunnel/conf/redis.conf b/dockers/stunnel/conf/redis.conf index a150d8b0..22ee574c 100644 --- a/dockers/stunnel/conf/redis.conf +++ b/dockers/stunnel/conf/redis.conf @@ -1,6 +1,6 @@ -[redis] +[valkey] accept = 6666 -connect = redis:6379 +connect = valkey:6379 cert = /etc/stunnel/keys/server-cert.pem key = /etc/stunnel/keys/server-key.pem verify = 0 diff --git a/dockers/stunnel/create_certs.sh b/dockers/stunnel/create_certs.sh index 4065562c..fa3e22d1 100755 --- a/dockers/stunnel/create_certs.sh +++ b/dockers/stunnel/create_certs.sh @@ -17,12 +17,12 @@ openssl genrsa -out ca-key.pem 2048 &>/dev/null openssl req -new -x509 -nodes -days 365000 \ -key ca-key.pem \ -out ca-cert.pem \ - -subj "/CN=redis-py-ca" &>/dev/null + -subj "/CN=valkey-py-ca" &>/dev/null openssl req -newkey rsa:2048 -nodes -days 365000 \ -keyout server-key.pem \ -out server-req.pem \ - -subj "/CN=redis-py-server" &>/dev/null + -subj "/CN=valkey-py-server" &>/dev/null openssl x509 -req -days 365000 -set_serial 01 \ -in server-req.pem \ @@ -33,7 +33,7 @@ openssl x509 -req -days 365000 -set_serial 01 \ openssl req -newkey rsa:2048 -nodes -days 365000 \ -keyout client-key.pem \ -out client-req.pem \ - -subj "/CN=redis-py-client" &>/dev/null + -subj "/CN=valkey-py-client" &>/dev/null openssl x509 -req -days 365000 -set_serial 01 \ -in client-req.pem \ diff --git a/docs/Makefile b/docs/Makefile index c4589862..eb9937f9 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -77,17 +77,17 @@ qthelp: @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/redis-py.qhcp" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/valkey-py.qhcp" @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/redis-py.qhc" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/valkey-py.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/redis-py" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/redis-py" + @echo "# mkdir -p $$HOME/.local/share/devhelp/valkey-py" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/valkey-py" @echo "# devhelp" epub: diff --git a/docs/_static/Valkey-logo.svg b/docs/_static/Valkey-logo.svg new file mode 100644 index 00000000..42b1d8af --- /dev/null +++ b/docs/_static/Valkey-logo.svg @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff --git a/docs/_static/redis-cube-red-white-rgb.svg b/docs/_static/redis-cube-red-white-rgb.svg deleted file mode 100644 index 936eb231..00000000 --- a/docs/_static/redis-cube-red-white-rgb.svg +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/advanced_features.rst b/docs/advanced_features.rst index de645bd7..de0f0904 100644 --- a/docs/advanced_features.rst +++ b/docs/advanced_features.rst @@ -4,20 +4,20 @@ Advanced Features A note about threading ---------------------- -Redis client instances can safely be shared between threads. Internally, +Valkey client instances can safely be shared between threads. Internally, connection instances are only retrieved from the connection pool during command execution, and returned to the pool directly after. Command execution never modifies state on the client instance. -However, there is one caveat: the Redis SELECT command. The SELECT +However, there is one caveat: the Valkey SELECT command. The SELECT command allows you to switch the database currently in use by the connection. That database remains selected until another is selected or until the connection is closed. This creates an issue in that connections could be returned to the pool that are connected to a different database. -As a result, redis-py does not implement the SELECT command on client -instances. If you use multiple Redis databases within the same +As a result, valkey-py does not implement the SELECT command on client +instances. If you use multiple Valkey databases within the same application, you should create a separate client instance (and possibly a separate connection pool) for each database. @@ -29,7 +29,7 @@ Pipelines Default pipelines ~~~~~~~~~~~~~~~~~ -Pipelines are a subclass of the base Redis class that provide support +Pipelines are a subclass of the base Valkey class that provide support for buffering multiple commands to the server in a single request. They can be used to dramatically increase the performance of groups of commands by reducing the number of back-and-forth TCP packets between @@ -39,7 +39,7 @@ Pipelines are quite simple to use: .. code:: python - >>> r = redis.Redis(...) + >>> r = valkey.Valkey(...) >>> r.set('bing', 'baz') >>> # Use the pipeline() method to create a pipeline instance >>> pipe = r.pipeline() @@ -69,7 +69,7 @@ commands, you can turn off transactions. >>> pipe = r.pipeline(transaction=False) A common issue occurs when requiring atomic transactions but needing to -retrieve values in Redis prior for use within the transaction. For +retrieve values in Valkey prior for use within the transaction. For instance, let's assume that the INCR command didn't exist and we need to build an atomic version of INCR in Python. @@ -148,20 +148,20 @@ like this, which is much easier to read: >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY') [True] -Be sure to call pipe.multi() in the callable passed to Redis.transaction +Be sure to call pipe.multi() in the callable passed to Valkey.transaction prior to any write commands. Pipelines in clusters ~~~~~~~~~~~~~~~~~~~~~ -ClusterPipeline is a subclass of RedisCluster that provides support for -Redis pipelines in cluster mode. When calling the execute() command, all +ClusterPipeline is a subclass of ValkeyCluster that provides support for +Valkey pipelines in cluster mode. When calling the execute() command, all the commands are grouped by the node on which they will be executed, and are then executed by the respective nodes in parallel. The pipeline instance will wait for all the nodes to respond before returning the result to the caller. Command responses are returned as a list sorted in the same order in which they were sent. Pipelines can be used to -dramatically increase the throughput of Redis Cluster by significantly +dramatically increase the throughput of Valkey Cluster by significantly reducing the number of network round trips between the client and the server. @@ -177,7 +177,7 @@ the server. ... pipe.set('foo1', 'bar1').get('foo1').execute() [True, b'bar1'] -Please note: - RedisCluster pipelines currently only support key-based +Please note: - ValkeyCluster pipelines currently only support key-based commands. - The pipeline gets its ‘read_from_replicas’ value from the cluster’s parameter. Thus, if read from replications is enabled in the cluster instance, the pipeline will also direct read commands to @@ -195,12 +195,12 @@ most cases they are split up into several smaller pipelines. Publish / Subscribe ------------------- -redis-py includes a PubSub object that subscribes to channels and +valkey-py includes a PubSub object that subscribes to channels and listens for new messages. Creating a PubSub object is easy. .. code:: python - >>> r = redis.Redis(...) + >>> r = valkey.Valkey(...) >>> p = r.pubsub() Once a PubSub instance is created, channels and patterns can be @@ -267,7 +267,7 @@ Unsubscribing works just like subscribing. If no arguments are passed to >>> p.get_message() {'channel': b'my-*', 'data': 0, 'pattern': None, 'type': 'punsubscribe'} -redis-py also allows you to register callback functions to handle +valkey-py also allows you to register callback functions to handle published messages. Message handlers take a single argument, the message, which is a dictionary just like the examples above. To subscribe to a channel or pattern with a message handler, pass the @@ -333,10 +333,10 @@ existing event loop inside your application. >>> # do something with the message >>> time.sleep(0.001) # be nice to the system :) -Older versions of redis-py only read messages with pubsub.listen(). +Older versions of valkey-py only read messages with pubsub.listen(). listen() is a generator that blocks until a message is available. If your application doesn't need to do anything else but receive and act on -messages received from redis, listen() is an easy way to get up an +messages received from valkey, listen() is an easy way to get up an running. .. code:: python @@ -356,7 +356,7 @@ value in each iteration of the loop. Note: Since we're running in a separate thread, there's no way to handle messages that aren't automatically handled with registered message -handlers. Therefore, redis-py prevents you from calling run_in_thread() +handlers. Therefore, valkey-py prevents you from calling run_in_thread() if you're subscribed to patterns or channels that don't have message handlers attached. @@ -387,7 +387,7 @@ run_in_thread. A PubSub object adheres to the same encoding semantics as the client instance it was created from. Any channel or pattern that's unicode will be encoded using the charset specified on the client before being sent -to Redis. If the client's decode_responses flag is set the False (the +to Valkey. If the client's decode_responses flag is set the False (the default), the 'channel', 'pattern' and 'data' values in message dictionaries will be byte strings (str on Python 2, bytes on Python 3). If the client's decode_responses is True, then the 'channel', 'pattern' @@ -424,16 +424,16 @@ supported: Sharded pubsub ~~~~~~~~~~~~~~ -`Sharded pubsub `_ is a feature introduced with Redis 7.0, and fully supported by redis-py as of 5.0. It helps scale the usage of pub/sub in cluster mode, by having the cluster shard messages to nodes that own a slot for a shard channel. Here, the cluster ensures the published shard messages are forwarded to the appropriate nodes. Clients subscribe to a channel by connecting to either the master responsible for the slot, or any of its replicas. +`Sharded pubsub `_ is a feature introduced with Valkey 7.0, and fully supported by valkey-py as of 5.0. It helps scale the usage of pub/sub in cluster mode, by having the cluster shard messages to nodes that own a slot for a shard channel. Here, the cluster ensures the published shard messages are forwarded to the appropriate nodes. Clients subscribe to a channel by connecting to either the master responsible for the slot, or any of its replicas. -This makes use of the `SSUBSCRIBE `_ and `SPUBLISH `_ commands within Redis. +This makes use of the `SSUBSCRIBE `_ and `SPUBLISH `_ commands within Valkey. The following, is a simplified example: .. code:: python - >>> from redis.cluster import RedisCluster, ClusterNode - >>> r = RedisCluster(startup_nodes=[ClusterNode('localhost', 6379), ClusterNode('localhost', 6380)]) + >>> from valkey.cluster import ValkeyCluster, ClusterNode + >>> r = ValkeyCluster(startup_nodes=[ClusterNode('localhost', 6379), ClusterNode('localhost', 6380)]) >>> p = r.pubsub() >>> p.ssubscribe('foo') >>> # assume someone sends a message along the channel via a publish @@ -443,10 +443,10 @@ Similarly, the same process can be used to acquire sharded pubsub messages, that .. code:: python - >>> from redis.cluster import RedisCluster, ClusterNode + >>> from valkey.cluster import ValkeyCluster, ClusterNode >>> first_node = ClusterNode['localhost', 6379] >>> second_node = ClusterNode['localhost', 6380] - >>> r = RedisCluster(startup_nodes=[first_node, second_node]) + >>> r = ValkeyCluster(startup_nodes=[first_node, second_node]) >>> p = r.pubsub() >>> p.ssubscribe('foo') >>> # assume someone sends a message along the channel via a publish @@ -456,13 +456,13 @@ Similarly, the same process can be used to acquire sharded pubsub messages, that Monitor ~~~~~~~ -redis-py includes a Monitor object that streams every command processed -by the Redis server. Use listen() on the Monitor object to block until a +valkey-py includes a Monitor object that streams every command processed +by the Valkey server. Use listen() on the Monitor object to block until a command is received. .. code:: python - >>> r = redis.Redis(...) + >>> r = valkey.Valkey(...) >>> with r.monitor() as m: >>> for command in m.listen(): >>> print(command) diff --git a/docs/backoff.rst b/docs/backoff.rst index c5ab01ab..e093ebf0 100644 --- a/docs/backoff.rst +++ b/docs/backoff.rst @@ -3,5 +3,5 @@ Backoff ############# -.. automodule:: redis.backoff +.. automodule:: valkey.backoff :members: \ No newline at end of file diff --git a/docs/clustering.rst b/docs/clustering.rst index f8320e4e..c66b5a6d 100644 --- a/docs/clustering.rst +++ b/docs/clustering.rst @@ -1,16 +1,16 @@ Clustering ========== -redis-py now supports cluster mode and provides a client for `Redis -Cluster `__. +valkey-py now supports cluster mode and provides a client for `Valkey +Cluster `__. The cluster client is based on Grokzen’s -`redis-py-cluster `__, has +`valkey-py-cluster `__, has added bug fixes, and now supersedes that library. Support for these changes is thanks to his contributions. -To learn more about Redis Cluster, see `Redis Cluster -specifications `__. +To learn more about Valkey Cluster, see `Valkey Cluster +specifications `__. `Creating clusters <#creating-clusters>`__ \| `Specifying Target Nodes <#specifying-target-nodes>`__ \| `Multi-key @@ -20,7 +20,7 @@ Limitations <#known-pubsub-limitations>`__ Creating clusters ----------------- -Connecting redis-py to a Redis Cluster instance(s) requires at a minimum +Connecting valkey-py to a Valkey Cluster instance(s) requires at a minimum a single node for cluster discovery. There are multiple ways in which a cluster instance can be created: @@ -28,40 +28,40 @@ cluster instance can be created: .. code:: python - >>> from redis.cluster import RedisCluster as Redis - >>> rc = Redis(host='localhost', port=6379) + >>> from valkey.cluster import ValkeyCluster as Valkey + >>> rc = Valkey(host='localhost', port=6379) >>> print(rc.get_nodes()) - [[host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis>>], [host=127.0.0.1,port=6378,name=127.0.0.1:6378,server_type=primary,redis_connection=Redis>>], [host=127.0.0.1,port=6377,name=127.0.0.1:6377,server_type=replica,redis_connection=Redis>>]] + [[host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,valkey_connection=Valkey>>], [host=127.0.0.1,port=6378,name=127.0.0.1:6378,server_type=primary,valkey_connection=Valkey>>], [host=127.0.0.1,port=6377,name=127.0.0.1:6377,server_type=replica,valkey_connection=Valkey>>]] -- Using the Redis URL specification: +- Using the Valkey URL specification: .. code:: python - >>> from redis.cluster import RedisCluster as Redis - >>> rc = Redis.from_url("redis://localhost:6379/0") + >>> from valkey.cluster import ValkeyCluster as Valkey + >>> rc = Valkey.from_url("valkey://localhost:6379/0") - Directly, via the ClusterNode class: .. code:: python - >>> from redis.cluster import RedisCluster as Redis - >>> from redis.cluster import ClusterNode + >>> from valkey.cluster import ValkeyCluster as Valkey + >>> from valkey.cluster import ClusterNode >>> nodes = [ClusterNode('localhost', 6379), ClusterNode('localhost', 6378)] - >>> rc = Redis(startup_nodes=nodes) + >>> rc = Valkey(startup_nodes=nodes) -When a RedisCluster instance is being created it first attempts to +When a ValkeyCluster instance is being created it first attempts to establish a connection to one of the provided startup nodes. If none of -the startup nodes are reachable, a ‘RedisClusterException’ will be +the startup nodes are reachable, a ‘ValkeyClusterException’ will be thrown. After a connection to the one of the cluster’s nodes is -established, the RedisCluster instance will be initialized with 3 +established, the ValkeyCluster instance will be initialized with 3 caches: a slots cache which maps each of the 16384 slots to the node/s handling them, a nodes cache that contains ClusterNode objects (name, -host, port, redis connection) for all of the cluster’s nodes, and a +host, port, valkey connection) for all of the cluster’s nodes, and a commands cache contains all the server supported commands that were -retrieved using the Redis ‘COMMAND’ output. See *RedisCluster specific +retrieved using the Valkey ‘COMMAND’ output. See *ValkeyCluster specific options* below for more. -RedisCluster instance can be directly used to execute Redis commands. +ValkeyCluster instance can be directly used to execute Valkey commands. When a command is being executed through the cluster instance, the target node(s) will be internally determined. When using a key-based command, the target node will be the node that holds the key’s slot. @@ -95,10 +95,10 @@ The ‘target_nodes’ parameter is explained in the following section, Specifying Target Nodes ----------------------- -As mentioned above, all non key-based RedisCluster commands accept the +As mentioned above, all non key-based ValkeyCluster commands accept the kwarg parameter ‘target_nodes’ that specifies the node/nodes that the command should be executed on. The best practice is to specify target -nodes using RedisCluster class’s node flags: PRIMARIES, REPLICAS, +nodes using ValkeyCluster class’s node flags: PRIMARIES, REPLICAS, ALL_NODES, RANDOM. When a nodes flag is passed along with a command, it will be internally resolved to the relevant node/s. If the nodes topology of the cluster changes during the execution of a command, the @@ -107,18 +107,18 @@ topology and attempt to retry executing the command. .. code:: python - >>> from redis.cluster import RedisCluster as Redis + >>> from valkey.cluster import ValkeyCluster as Valkey >>> # run cluster-meet command on all of the cluster's nodes - >>> rc.cluster_meet('127.0.0.1', 6379, target_nodes=Redis.ALL_NODES) + >>> rc.cluster_meet('127.0.0.1', 6379, target_nodes=Valkey.ALL_NODES) >>> # ping all replicas - >>> rc.ping(target_nodes=Redis.REPLICAS) + >>> rc.ping(target_nodes=Valkey.REPLICAS) >>> # ping a random node - >>> rc.ping(target_nodes=Redis.RANDOM) + >>> rc.ping(target_nodes=Valkey.RANDOM) >>> # get the keys from all cluster nodes - >>> rc.keys(target_nodes=Redis.ALL_NODES) + >>> rc.keys(target_nodes=Valkey.ALL_NODES) [b'foo1', b'foo2'] >>> # execute bgsave in all primaries - >>> rc.bgsave(Redis.PRIMARIES) + >>> rc.bgsave(Valkey.PRIMARIES) You could also pass ClusterNodes directly if you want to execute a command on a specific node / node group that isn’t addressed by the @@ -132,20 +132,20 @@ connection error will be returned. >>> node = rc.get_node('localhost', 6379) >>> # Get the keys only for that specific node >>> rc.keys(target_nodes=node) - >>> # get Redis info from a subset of primaries + >>> # get Valkey info from a subset of primaries >>> subset_primaries = [node for node in rc.get_primaries() if node.port > 6378] >>> rc.info(target_nodes=subset_primaries) -In addition, the RedisCluster instance can query the Redis instance of a -specific node and execute commands on that node directly. The Redis +In addition, the ValkeyCluster instance can query the Valkey instance of a +specific node and execute commands on that node directly. The Valkey client, however, does not handle cluster failures and retries. .. code:: python >>> cluster_node = rc.get_node(host='localhost', port=6379) >>> print(cluster_node) - [host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis>>] - >>> r = cluster_node.redis_connection + [host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,valkey_connection=Valkey>>] + >>> r = cluster_node.valkey_connection >>> r.client_list() [{'id': '276', 'addr': '127.0.0.1:64108', 'fd': '16', 'name': '', 'age': '0', 'idle': '0', 'flags': 'N', 'db': '0', 'sub': '0', 'psub': '0', 'multi': '-1', 'qbuf': '26', 'qbuf-free': '32742', 'argv-mem': '10', 'obl': '0', 'oll': '0', 'omem': '0', 'tot-mem': '54298', 'events': 'r', 'cmd': 'client', 'user': 'default'}] >>> # Get the keys only for that specific node @@ -155,15 +155,15 @@ client, however, does not handle cluster failures and retries. Multi-key Commands ------------------ -Redis supports multi-key commands in Cluster Mode, such as Set type +Valkey supports multi-key commands in Cluster Mode, such as Set type unions or intersections, mset and mget, as long as the keys all hash to -the same slot. By using RedisCluster client, you can use the known +the same slot. By using ValkeyCluster client, you can use the known functions (e.g. mget, mset) to perform an atomic multi-key operation. However, you must ensure all keys are mapped to the same slot, otherwise -a RedisClusterException will be thrown. Redis Cluster implements a +a ValkeyClusterException will be thrown. Valkey Cluster implements a concept called hash tags that can be used in order to force certain keys to be stored in the same hash slot, see `Keys hash -tag `__. You can +tag `__. You can also use nonatomic for some of the multikey operations, and pass keys that aren’t mapped to the same slot. The client will then map the keys to the relevant slots, sending the commands to the slots’ node owners. @@ -198,8 +198,8 @@ slots. If we hash a pattern like fo\* we will receive a keyslot for that string but there are endless possibilities for channel names based on this pattern - unknowable in advance. This feature is not disabled but the commands are not currently recommended for use. See -`redis-py-cluster -documentation `__ +`valkey-py-cluster +documentation `__ for more. .. code:: python @@ -212,11 +212,11 @@ for more. **Read Only Mode** -By default, Redis Cluster always returns MOVE redirection response on +By default, Valkey Cluster always returns MOVE redirection response on accessing a replica node. You can overcome this limitation and scale read commands by triggering READONLY mode. -To enable READONLY mode pass read_from_replicas=True to RedisCluster +To enable READONLY mode pass read_from_replicas=True to ValkeyCluster constructor. When set to true, read commands will be assigned between the primary and its replications in a Round-Robin manner. @@ -226,9 +226,9 @@ calling the readwrite() method. .. code:: python - >>> from cluster import RedisCluster as Redis + >>> from cluster import ValkeyCluster as Valkey # Use 'debug' log level to print the node that the command is executed on - >>> rc_readonly = Redis(startup_nodes=startup_nodes, + >>> rc_readonly = Valkey(startup_nodes=startup_nodes, ... read_from_replicas=True) >>> rc_readonly.set('{foo}1', 'bar1') >>> for i in range(0, 4): diff --git a/docs/commands.rst b/docs/commands.rst index d35f290a..2f5ce338 100644 --- a/docs/commands.rst +++ b/docs/commands.rst @@ -1,30 +1,30 @@ -Redis Commands +Valkey Commands ############## Core Commands ************* -The following functions can be used to replicate their equivalent `Redis command `_. Generally they can be used as functions on your redis connection. For the simplest example, see below: +The following functions can be used to replicate their equivalent `Valkey command `_. Generally they can be used as functions on your valkey connection. For the simplest example, see below: -Getting and settings data in redis:: +Getting and settings data in valkey:: - import redis - r = redis.Redis(decode_responses=True) + import valkey + r = valkey.Valkey(decode_responses=True) r.set('mykey', 'thevalueofmykey') r.get('mykey') -.. autoclass:: redis.commands.core.CoreCommands +.. autoclass:: valkey.commands.core.CoreCommands :inherited-members: Sentinel Commands ***************** -.. autoclass:: redis.commands.sentinel.SentinelCommands +.. autoclass:: valkey.commands.sentinel.SentinelCommands :inherited-members: -Redis Cluster Commands +Valkey Cluster Commands ********************** -The following `Redis commands `_ are available within a `Redis Cluster `_. Generally they can be used as functions on your redis connection. +The following `Valkey commands `_ are available within a `Valkey Cluster `_. Generally they can be used as functions on your valkey connection. -.. autoclass:: redis.commands.cluster.RedisClusterCommands +.. autoclass:: valkey.commands.cluster.ValkeyClusterCommands :inherited-members: diff --git a/docs/conf.py b/docs/conf.py index 33a85896..e1730a72 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,4 +1,4 @@ -# redis-py documentation build configuration file, created by +# valkey-py documentation build configuration file, created by # sphinx-quickstart on Fri Feb 8 00:47:08 2013. # # This file is execfile()d with the current directory set to its containing @@ -64,18 +64,18 @@ master_doc = "index" # General information about the project. -project = "redis-py" +project = "valkey-py" current_year = datetime.datetime.now().year -copyright = f"{current_year}, Redis Inc" +copyright = f"{current_year}, Valkey Inc" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -import redis +import valkey -version = ".".join(redis.__version__.split(".")[0:3]) +version = ".".join(valkey.__version__.split(".")[0:3]) release = version if version == "99.99.99": release = "dev" @@ -132,7 +132,7 @@ "footer_icons": [ { "name": "GitHub", - "url": "https://github.com/redis/redis-py", + "url": "https://github.com/valkey/valkey-py", "html": """ @@ -141,7 +141,7 @@ "class": "", }, ], - "source_repository": "https://github.com/redis/redis-py/", + "source_repository": "https://github.com/valkey/valkey-py/", "source_branch": "master", "source_directory": "docs/", } @@ -158,7 +158,7 @@ # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = "_static/redis-cube-red-white-rgb.svg" +html_logo = "_static/valkey-cube-red-white-rgb.svg" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 @@ -212,7 +212,7 @@ # html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = "redis-pydoc" +htmlhelp_basename = "valkey-pydoc" # -- Options for LaTeX output ------------------------------------------------- @@ -230,7 +230,7 @@ # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ - ("index", "redis-py.tex", "redis-py Documentation", "Redis Inc", "manual") + ("index", "valkey-py.tex", "valkey-py Documentation", "Valkey Inc", "manual") ] # The name of an image file (relative to this directory) to place at the top of @@ -258,7 +258,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [("index", "redis-py", "redis-py Documentation", ["Andy McCurdy"], 1)] +man_pages = [("index", "valkey-py", "valkey-py Documentation", ["Andy McCurdy"], 1)] # If true, show URL addresses after external links. # man_show_urls = False @@ -272,10 +272,10 @@ texinfo_documents = [ ( "index", - "redis-py", - "redis-py Documentation", - "Redis Inc", - "redis-py", + "valkey-py", + "valkey-py Documentation", + "Valkey Inc", + "valkey-py", "One line description of project.", "Miscellaneous", ) @@ -290,7 +290,7 @@ # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' -epub_title = "redis-py" -epub_author = "Redis Inc" -epub_publisher = "Redis Inc" -epub_copyright = "2023, Redis Inc" +epub_title = "valkey-py" +epub_author = "Valkey Inc" +epub_publisher = "Valkey Inc" +epub_copyright = "2023, Valkey Inc" diff --git a/docs/connections.rst b/docs/connections.rst index 1c826a0c..8f7758ed 100644 --- a/docs/connections.rst +++ b/docs/connections.rst @@ -1,24 +1,24 @@ -Connecting to Redis +Connecting to Valkey ################### Generic Client ************** -This is the client used to connect directly to a standard Redis node. +This is the client used to connect directly to a standard Valkey node. -.. autoclass:: redis.Redis +.. autoclass:: valkey.Valkey :members: Sentinel Client *************** -Redis `Sentinel `_ provides high availability for Redis. There are commands that can only be executed against a Redis node running in sentinel mode. Connecting to those nodes, and executing commands against them requires a Sentinel connection. +Valkey `Sentinel `_ provides high availability for Valkey. There are commands that can only be executed against a Valkey node running in sentinel mode. Connecting to those nodes, and executing commands against them requires a Sentinel connection. -Connection example (assumes Redis exists on the ports listed below): +Connection example (assumes Valkey exists on the ports listed below): - >>> from redis import Sentinel + >>> from valkey import Sentinel >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) >>> sentinel.discover_master('mymaster') ('127.0.0.1', 6379) @@ -27,28 +27,28 @@ Connection example (assumes Redis exists on the ports listed below): Sentinel ======== -.. autoclass:: redis.sentinel.Sentinel +.. autoclass:: valkey.sentinel.Sentinel :members: SentinelConnectionPool ====================== -.. autoclass:: redis.sentinel.SentinelConnectionPool +.. autoclass:: valkey.sentinel.SentinelConnectionPool :members: Cluster Client ************** -This client is used for connecting to a Redis Cluster. +This client is used for connecting to a Valkey Cluster. -RedisCluster +ValkeyCluster ============ -.. autoclass:: redis.cluster.RedisCluster +.. autoclass:: valkey.cluster.ValkeyCluster :members: ClusterNode =========== -.. autoclass:: redis.cluster.ClusterNode +.. autoclass:: valkey.cluster.ClusterNode :members: @@ -57,30 +57,30 @@ Async Client See complete example: `here `_ -This client is used for communicating with Redis, asynchronously. +This client is used for communicating with Valkey, asynchronously. -.. autoclass:: redis.asyncio.client.Redis +.. autoclass:: valkey.asyncio.client.Valkey :members: Async Cluster Client ******************** -RedisCluster (Async) +ValkeyCluster (Async) ==================== -.. autoclass:: redis.asyncio.cluster.RedisCluster +.. autoclass:: valkey.asyncio.cluster.ValkeyCluster :members: :member-order: bysource ClusterNode (Async) =================== -.. autoclass:: redis.asyncio.cluster.ClusterNode +.. autoclass:: valkey.asyncio.cluster.ClusterNode :members: :member-order: bysource ClusterPipeline (Async) ======================= -.. autoclass:: redis.asyncio.cluster.ClusterPipeline +.. autoclass:: valkey.asyncio.cluster.ClusterPipeline :members: execute_command, execute :member-order: bysource @@ -92,12 +92,12 @@ See complete example: `here `_ Connection ========== -.. autoclass:: redis.connection.Connection +.. autoclass:: valkey.connection.Connection :members: Connection (Async) ================== -.. autoclass:: redis.asyncio.connection.Connection +.. autoclass:: valkey.asyncio.connection.Connection :members: @@ -108,10 +108,10 @@ See complete example: `here `_ ConnectionPool ============== -.. autoclass:: redis.connection.ConnectionPool +.. autoclass:: valkey.connection.ConnectionPool :members: ConnectionPool (Async) ====================== -.. autoclass:: redis.asyncio.connection.ConnectionPool +.. autoclass:: valkey.asyncio.connection.ConnectionPool :members: diff --git a/docs/examples.rst b/docs/examples.rst index 47fdbdf4..b9cf30e3 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -13,5 +13,5 @@ Examples examples/search_vector_similarity_examples examples/pipeline_examples examples/timeseries_examples - examples/redis-stream-example + examples/valkey-stream-example examples/opentelemetry_api_examples diff --git a/docs/examples/README.md b/docs/examples/README.md index ca6d5dcf..6462fd29 100644 --- a/docs/examples/README.md +++ b/docs/examples/README.md @@ -1,3 +1,3 @@ # Examples -Examples of redis-py usage go here. They're being linked to the [generated documentation](https://redis-py.readthedocs.org). +Examples of valkey-py usage go here. They're being linked to the [generated documentation](https://valkey-py.readthedocs.org). diff --git a/docs/examples/asyncio_examples.ipynb b/docs/examples/asyncio_examples.ipynb index d2b11b56..c1fa7c54 100644 --- a/docs/examples/asyncio_examples.ipynb +++ b/docs/examples/asyncio_examples.ipynb @@ -15,7 +15,7 @@ "\n", "## Connecting and Disconnecting\n", "\n", - "Using asyncio Redis requires an explicit disconnect of the connection since there is no asyncio deconstructor magic method. By default, an internal connection pool is created on `redis.Redis()` and attached to the `Redis` instance. When calling `Redis.aclose` this internal connection pool closes automatically, which disconnects all connections." + "Using asyncio Valkey requires an explicit disconnect of the connection since there is no asyncio deconstructor magic method. By default, an internal connection pool is created on `valkey.Valkey()` and attached to the `Valkey` instance. When calling `Valkey.aclose` this internal connection pool closes automatically, which disconnects all connections." ] }, { @@ -37,9 +37,9 @@ } ], "source": [ - "import redis.asyncio as redis\n", + "import valkey.asyncio as valkey\n", "\n", - "client = redis.Redis()\n", + "client = valkey.Valkey()\n", "print(f\"Ping successful: {await client.ping()}\")\n", "await client.aclose()" ] @@ -48,7 +48,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If you create a custom `ConnectionPool` to be used by a single `Redis` instance, use the `Redis.from_pool` class method. The Redis client will take ownership of the connection pool. This will cause the pool to be disconnected along with the Redis instance. Disconnecting the connection pool simply disconnects all connections hosted in the pool." + "If you create a custom `ConnectionPool` to be used by a single `Valkey` instance, use the `Valkey.from_pool` class method. The Valkey client will take ownership of the connection pool. This will cause the pool to be disconnected along with the Valkey instance. Disconnecting the connection pool simply disconnects all connections hosted in the pool." ] }, { @@ -57,10 +57,10 @@ "metadata": {}, "outputs": [], "source": [ - "import redis.asyncio as redis\n", + "import valkey.asyncio as valkey\n", "\n", - "pool = redis.ConnectionPool.from_url(\"redis://localhost\")\n", - "client = redis.Redis.from_pool(pool)\n", + "pool = valkey.ConnectionPool.from_url(\"valkey://localhost\")\n", + "client = valkey.Valkey.from_pool(pool)\n", "await client.aclose()" ] }, @@ -74,7 +74,7 @@ }, "source": [ "\n", - "However, if the `ConnectionPool` is to be shared by several `Redis` instances, you should use the `connection_pool` argument, and you may want to disconnect the connection pool explicitly." + "However, if the `ConnectionPool` is to be shared by several `Valkey` instances, you should use the `connection_pool` argument, and you may want to disconnect the connection pool explicitly." ] }, { @@ -88,11 +88,11 @@ }, "outputs": [], "source": [ - "import redis.asyncio as redis\n", + "import valkey.asyncio as valkey\n", "\n", - "pool = redis.ConnectionPool.from_url(\"redis://localhost\")\n", - "client1 = redis.Redis(connection_pool=pool)\n", - "client2 = redis.Redis(connection_pool=pool)\n", + "pool = valkey.ConnectionPool.from_url(\"valkey://localhost\")\n", + "client1 = valkey.Valkey(connection_pool=pool)\n", + "client2 = valkey.Valkey(connection_pool=pool)\n", "await client1.aclose()\n", "await client2.aclose()\n", "await pool.aclose()" @@ -111,9 +111,9 @@ "metadata": {}, "outputs": [], "source": [ - "import redis.asyncio as redis\n", + "import valkey.asyncio as valkey\n", "\n", - "client = redis.Redis(protocol=3)\n", + "client = valkey.Valkey(protocol=3)\n", "await client.aclose()\n", "await client.ping()" ] @@ -129,9 +129,9 @@ "source": [ "## Transactions (Multi/Exec)\n", "\n", - "The aioredis.Redis.pipeline will return a aioredis.Pipeline object, which will buffer all commands in-memory and compile them into batches using the Redis Bulk String protocol. Additionally, each command will return the Pipeline instance, allowing you to chain your commands, i.e., p.set('foo', 1).set('bar', 2).mget('foo', 'bar').\n", + "The aiovalkey.Valkey.pipeline will return a aiovalkey.Pipeline object, which will buffer all commands in-memory and compile them into batches using the Valkey Bulk String protocol. Additionally, each command will return the Pipeline instance, allowing you to chain your commands, i.e., p.set('foo', 1).set('bar', 2).mget('foo', 'bar').\n", "\n", - "The commands will not be reflected in Redis until execute() is called & awaited.\n", + "The commands will not be reflected in Valkey until execute() is called & awaited.\n", "\n", "Usually, when performing a bulk operation, taking advantage of a “transaction” (e.g., Multi/Exec) is to be desired, as it will also add a layer of atomicity to your bulk operation." ] @@ -147,9 +147,9 @@ }, "outputs": [], "source": [ - "import redis.asyncio as redis\n", + "import valkey.asyncio as valkey\n", "\n", - "r = await redis.from_url(\"redis://localhost\")\n", + "r = await valkey.from_url(\"valkey://localhost\")\n", "async with r.pipeline(transaction=True) as pipe:\n", " ok1, ok2 = await (pipe.set(\"key1\", \"value1\").set(\"key2\", \"value2\").execute())\n", "assert ok1\n", @@ -194,12 +194,12 @@ "source": [ "import asyncio\n", "\n", - "import redis.asyncio as redis\n", + "import valkey.asyncio as valkey\n", "\n", "STOPWORD = \"STOP\"\n", "\n", "\n", - "async def reader(channel: redis.client.PubSub):\n", + "async def reader(channel: valkey.client.PubSub):\n", " while True:\n", " message = await channel.get_message(ignore_subscribe_messages=True, timeout=None)\n", " if message is not None:\n", @@ -208,7 +208,7 @@ " print(\"(Reader) STOP\")\n", " break\n", "\n", - "r = redis.from_url(\"redis://localhost\")\n", + "r = valkey.from_url(\"valkey://localhost\")\n", "async with r.pubsub() as pubsub:\n", " await pubsub.subscribe(\"channel:1\", \"channel:2\")\n", "\n", @@ -257,12 +257,12 @@ "source": [ "import asyncio\n", "\n", - "import redis.asyncio as redis\n", + "import valkey.asyncio as valkey\n", "\n", "STOPWORD = \"STOP\"\n", "\n", "\n", - "async def reader(channel: redis.client.PubSub):\n", + "async def reader(channel: valkey.client.PubSub):\n", " while True:\n", " message = await channel.get_message(ignore_subscribe_messages=True, timeout=None)\n", " if message is not None:\n", @@ -272,7 +272,7 @@ " break\n", "\n", "\n", - "r = await redis.from_url(\"redis://localhost\")\n", + "r = await valkey.from_url(\"valkey://localhost\")\n", "async with r.pubsub() as pubsub:\n", " await pubsub.psubscribe(\"channel:*\")\n", "\n", @@ -296,11 +296,11 @@ "source": [ "## Sentinel Client\n", "\n", - "The Sentinel client requires a list of Redis Sentinel addresses to connect to and start discovering services.\n", + "The Sentinel client requires a list of Valkey Sentinel addresses to connect to and start discovering services.\n", "\n", - "Calling aioredis.sentinel.Sentinel.master_for or aioredis.sentinel.Sentinel.slave_for methods will return Redis clients connected to specified services monitored by Sentinel.\n", + "Calling aiovalkey.sentinel.Sentinel.master_for or aiovalkey.sentinel.Sentinel.slave_for methods will return Valkey clients connected to specified services monitored by Sentinel.\n", "\n", - "Sentinel client will detect failover and reconnect Redis clients automatically." + "Sentinel client will detect failover and reconnect Valkey clients automatically." ] }, { @@ -316,7 +316,7 @@ "source": [ "import asyncio\n", "\n", - "from redis.asyncio.sentinel import Sentinel\n", + "from valkey.asyncio.sentinel import Sentinel\n", "\n", "\n", "sentinel = Sentinel([(\"localhost\", 26379), (\"sentinel2\", 26379)])\n", @@ -332,13 +332,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to Redis instances by specifying a URL scheme.\n", + "## Connecting to Valkey instances by specifying a URL scheme.\n", "Parameters are passed to the following schems, as parameters to the url scheme.\n", "\n", "Three URL schemes are supported:\n", "\n", - "- `redis://` creates a TCP socket connection. \n", - "- `rediss://` creates a SSL wrapped TCP socket connection. \n", + "- `valkey://` creates a TCP socket connection. \n", + "- `valkeys://` creates a SSL wrapped TCP socket connection. \n", "- ``unix://``: creates a Unix Domain Socket connection.\n" ] }, @@ -358,8 +358,8 @@ } ], "source": [ - "import redis.asyncio as redis\n", - "url_connection = redis.from_url(\"redis://localhost:6379?decode_responses=True\")\n", + "import valkey.asyncio as valkey\n", + "url_connection = valkey.from_url(\"valkey://localhost:6379?decode_responses=True\")\n", "url_connection.ping()" ] }, @@ -376,9 +376,9 @@ "metadata": {}, "outputs": [], "source": [ - "import redis.asyncio as redis\n", + "import valkey.asyncio as valkey\n", "\n", - "url_connection = redis.from_url(\"redis://localhost:6379?decode_responses=True&protocol=3\")\n", + "url_connection = valkey.from_url(\"valkey://localhost:6379?decode_responses=True&protocol=3\")\n", "url_connection.ping()" ] } diff --git a/docs/examples/connection_examples.ipynb b/docs/examples/connection_examples.ipynb index cddded28..48347d6e 100644 --- a/docs/examples/connection_examples.ipynb +++ b/docs/examples/connection_examples.ipynb @@ -11,7 +11,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to a default Redis instance, running locally." + "## Connecting to a default Valkey instance, running locally." ] }, { @@ -31,9 +31,9 @@ } ], "source": [ - "import redis\n", + "import valkey\n", "\n", - "connection = redis.Redis()\n", + "connection = valkey.Valkey()\n", "connection.ping()" ] }, @@ -41,7 +41,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### By default Redis return binary responses, to decode them use decode_responses=True" + "### By default Valkey return binary responses, to decode them use decode_responses=True" ] }, { @@ -61,9 +61,9 @@ } ], "source": [ - "import redis\n", + "import valkey\n", "\n", - "decoded_connection = redis.Redis(decode_responses=True)\n", + "decoded_connection = valkey.Valkey(decode_responses=True)\n", "decoded_connection.ping()" ] }, @@ -80,9 +80,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "import redis\n", + "import valkey\n", "\n", - "r = redis.Redis(protocol=3)\n", + "r = valkey.Valkey(protocol=3)\n", "rcon.ping()" ] }, @@ -90,7 +90,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to a redis instance, specifying a host and port with credentials." + "## Connecting to a valkey instance, specifying a host and port with credentials." ] }, { @@ -110,9 +110,9 @@ } ], "source": [ - "import redis\n", + "import valkey\n", "\n", - "user_connection = redis.Redis(host='localhost', port=6380, username='dvora', password='redis', decode_responses=True)\n", + "user_connection = valkey.Valkey(host='localhost', port=6380, username='dvora', password='valkey', decode_responses=True)\n", "user_connection.ping()" ] }, @@ -120,7 +120,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to a redis instance with username and password credential provider" + "## Connecting to a valkey instance with username and password credential provider" ] }, { @@ -129,10 +129,10 @@ "metadata": {}, "outputs": [], "source": [ - "import redis\n", + "import valkey\n", "\n", - "creds_provider = redis.UsernamePasswordCredentialProvider(\"username\", \"password\")\n", - "user_connection = redis.Redis(host=\"localhost\", port=6379, credential_provider=creds_provider)\n", + "creds_provider = valkey.UsernamePasswordCredentialProvider(\"username\", \"password\")\n", + "user_connection = valkey.Valkey(host=\"localhost\", port=6379, credential_provider=creds_provider)\n", "user_connection.ping()" ] }, @@ -140,7 +140,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to a redis instance with standard credential provider" + "## Connecting to a valkey instance with standard credential provider" ] }, { @@ -150,12 +150,12 @@ "outputs": [], "source": [ "from typing import Tuple\n", - "import redis\n", + "import valkey\n", "\n", "creds_map = {\"user_1\": \"pass_1\",\n", " \"user_2\": \"pass_2\"}\n", "\n", - "class UserMapCredentialProvider(redis.CredentialProvider):\n", + "class UserMapCredentialProvider(valkey.CredentialProvider):\n", " def __init__(self, username: str):\n", " self.username = username\n", "\n", @@ -163,7 +163,7 @@ " return self.username, creds_map.get(self.username)\n", "\n", "# Create a default connection to set the ACL user\n", - "default_connection = redis.Redis(host=\"localhost\", port=6379)\n", + "default_connection = valkey.Valkey(host=\"localhost\", port=6379)\n", "default_connection.acl_setuser(\n", " \"user_1\",\n", " enabled=True,\n", @@ -175,7 +175,7 @@ "# Create a UserMapCredentialProvider instance for user_1\n", "creds_provider = UserMapCredentialProvider(\"user_1\")\n", "# Initiate user connection with the credential provider\n", - "user_connection = redis.Redis(host=\"localhost\", port=6379,\n", + "user_connection = valkey.Valkey(host=\"localhost\", port=6379,\n", " credential_provider=creds_provider)\n", "user_connection.ping()" ] @@ -184,7 +184,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to a redis instance first with an initial credential set and then calling the credential provider" + "## Connecting to a valkey instance first with an initial credential set and then calling the credential provider" ] }, { @@ -194,9 +194,9 @@ "outputs": [], "source": [ "from typing import Union\n", - "import redis\n", + "import valkey\n", "\n", - "class InitCredsSetCredentialProvider(redis.CredentialProvider):\n", + "class InitCredsSetCredentialProvider(valkey.CredentialProvider):\n", " def __init__(self, username, password):\n", " self.username = username\n", " self.password = password\n", @@ -222,7 +222,7 @@ "collapsed": false }, "source": [ - "## Connecting to a redis instance with AWS Secrets Manager credential provider." + "## Connecting to a valkey instance with AWS Secrets Manager credential provider." ] }, { @@ -236,12 +236,12 @@ }, "outputs": [], "source": [ - "import redis\n", + "import valkey\n", "import boto3\n", "import json\n", "import cachetools.func\n", "\n", - "class SecretsManagerProvider(redis.CredentialProvider):\n", + "class SecretsManagerProvider(valkey.CredentialProvider):\n", " def __init__(self, secret_id, version_id=None, version_stage='AWSCURRENT'):\n", " self.sm_client = boto3.client('secretsmanager')\n", " self.secret_id = secret_id\n", @@ -258,7 +258,7 @@ "\n", "my_secret_id = \"EXAMPLE1-90ab-cdef-fedc-ba987SECRET1\"\n", "creds_provider = SecretsManagerProvider(secret_id=my_secret_id)\n", - "user_connection = redis.Redis(host=\"localhost\", port=6379, credential_provider=creds_provider)\n", + "user_connection = valkey.Valkey(host=\"localhost\", port=6379, credential_provider=creds_provider)\n", "user_connection.ping()" ] }, @@ -266,7 +266,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to a redis instance with ElastiCache IAM credential provider." + "## Connecting to a valkey instance with ElastiCache IAM credential provider." ] }, { @@ -290,12 +290,12 @@ "from urllib.parse import ParseResult, urlencode, urlunparse\n", "\n", "import botocore.session\n", - "import redis\n", + "import valkey\n", "from botocore.model import ServiceId\n", "from botocore.signers import RequestSigner\n", "from cachetools import TTLCache, cached\n", "\n", - "class ElastiCacheIAMProvider(redis.CredentialProvider):\n", + "class ElastiCacheIAMProvider(valkey.CredentialProvider):\n", " def __init__(self, user, cluster_name, region=\"us-east-1\"):\n", " self.user = user\n", " self.cluster_name = cluster_name\n", @@ -340,7 +340,7 @@ "cluster_name = \"test-001\"\n", "endpoint = \"test-001.use1.cache.amazonaws.com\"\n", "creds_provider = ElastiCacheIAMProvider(user=username, cluster_name=cluster_name)\n", - "user_connection = redis.Redis(host=endpoint, port=6379, credential_provider=creds_provider)\n", + "user_connection = valkey.Valkey(host=endpoint, port=6379, credential_provider=creds_provider)\n", "user_connection.ping()" ] }, @@ -348,13 +348,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to Redis instances by specifying a URL scheme.\n", + "## Connecting to Valkey instances by specifying a URL scheme.\n", "Parameters are passed to the following schems, as parameters to the url scheme.\n", "\n", "Three URL schemes are supported:\n", "\n", - "- `redis://` creates a TCP socket connection. \n", - "- `rediss://` creates a SSL wrapped TCP socket connection. \n", + "- `valkey://` creates a TCP socket connection. \n", + "- `valkeys://` creates a SSL wrapped TCP socket connection. \n", "- ``unix://``: creates a Unix Domain Socket connection.\n" ] }, @@ -375,7 +375,7 @@ } ], "source": [ - "url_connection = redis.from_url(\"redis://localhost:6379?decode_responses=True&health_check_interval=2\")\n", + "url_connection = valkey.from_url(\"valkey://localhost:6379?decode_responses=True&health_check_interval=2\")\n", "url_connection.ping()" ] }, @@ -383,7 +383,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to Redis instances by specifying a URL scheme and the RESP3 protocol.\n" + "## Connecting to Valkey instances by specifying a URL scheme and the RESP3 protocol.\n" ] }, { @@ -392,7 +392,7 @@ "metadata": {}, "outputs": [], "source": [ - "url_connection = redis.from_url(\"redis://localhost:6379?decode_responses=True&health_check_interval=2&protocol=3\")\n", + "url_connection = valkey.from_url(\"valkey://localhost:6379?decode_responses=True&health_check_interval=2&protocol=3\")\n", "url_connection.ping()" ] }, @@ -409,9 +409,9 @@ "metadata": {}, "outputs": [], "source": [ - "from redis.sentinel import Sentinel\n", + "from valkey.sentinel import Sentinel\n", "sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)\n", - "sentinel.discover_master(\"redis-py-test\")" + "sentinel.discover_master(\"valkey-py-test\")" ] } ], diff --git a/docs/examples/opentelemetry/README.md b/docs/examples/opentelemetry/README.md index a1d1c04e..4409924a 100644 --- a/docs/examples/opentelemetry/README.md +++ b/docs/examples/opentelemetry/README.md @@ -1,16 +1,16 @@ -# Example for redis-py OpenTelemetry instrumentation +# Example for valkey-py OpenTelemetry instrumentation -This example demonstrates how to monitor Redis using [OpenTelemetry](https://opentelemetry.io/) and -[Uptrace](https://github.com/uptrace/uptrace). It requires Docker to start Redis Server and Uptrace. +This example demonstrates how to monitor Valkey using [OpenTelemetry](https://opentelemetry.io/) and +[Uptrace](https://github.com/uptrace/uptrace). It requires Docker to start Valkey Server and Uptrace. See -[Monitoring redis-py performance with OpenTelemetry](https://redis-py.readthedocs.io/en/latest/opentelemetry.html) +[Monitoring valkey-py performance with OpenTelemetry](https://valkey-py.readthedocs.io/en/latest/opentelemetry.html) for details. **Step 1**. Download the example using Git: ```shell -git clone https://github.com/redis/redis-py.git +git clone https://github.com/valkey/valkey-py.git cd example/opentelemetry ``` @@ -34,14 +34,14 @@ docker-compose up -d docker-compose logs uptrace ``` -**Step 5**. Run the Redis client example and follow the link from the CLI to view the trace: +**Step 5**. Run the Valkey client example and follow the link from the CLI to view the trace: ```shell python3 main.py trace: http://localhost:14318/traces/ee029d8782242c8ed38b16d961093b35 ``` -![Redis trace](./image/redis-py-trace.png) +![Valkey trace](./image/valkey-py-trace.png) You can also open Uptrace UI at [http://localhost:14318](http://localhost:14318) to view available spans, logs, and metrics. diff --git a/docs/examples/opentelemetry/config/otel-collector.yaml b/docs/examples/opentelemetry/config/otel-collector.yaml index b44dd1fc..c7aecea0 100644 --- a/docs/examples/opentelemetry/config/otel-collector.yaml +++ b/docs/examples/opentelemetry/config/otel-collector.yaml @@ -20,8 +20,8 @@ receivers: memory: network: paging: - redis: - endpoint: "redis-server:6379" + valkey: + endpoint: "valkey-server:6379" collection_interval: 10s jaeger: protocols: @@ -57,7 +57,7 @@ service: processors: [batch] exporters: [otlp] metrics/hostmetrics: - receivers: [hostmetrics, redis] + receivers: [hostmetrics, valkey] processors: [batch, resourcedetection] exporters: [otlp] logs: diff --git a/docs/examples/opentelemetry/docker-compose.yml b/docs/examples/opentelemetry/docker-compose.yml index ea1d6dca..a0a4119b 100644 --- a/docs/examples/opentelemetry/docker-compose.yml +++ b/docs/examples/opentelemetry/docker-compose.yml @@ -65,12 +65,12 @@ services: ports: - "8025:8025" - redis-server: - image: redis + valkey-server: + image: valkey ports: - "6379:6379" - redis-cli: - image: redis + valkey-cli: + image: valkey volumes: uptrace_data: diff --git a/docs/examples/opentelemetry/main.py b/docs/examples/opentelemetry/main.py index 9ef67233..716c35e1 100755 --- a/docs/examples/opentelemetry/main.py +++ b/docs/examples/opentelemetry/main.py @@ -2,10 +2,10 @@ import time -import redis +import valkey import uptrace from opentelemetry import trace -from opentelemetry.instrumentation.redis import RedisInstrumentor +from opentelemetry.instrumentation.valkey import ValkeyInstrumentor tracer = trace.get_tracer("app_or_package_name", "1.0.0") @@ -16,9 +16,9 @@ def main(): service_name="myservice", service_version="1.0.0", ) - RedisInstrumentor().instrument() + ValkeyInstrumentor().instrument() - client = redis.StrictRedis(host="localhost", port=6379) + client = valkey.StrictValkey(host="localhost", port=6379) span = handle_request(client) print("trace:", uptrace.trace_url(span)) diff --git a/docs/examples/opentelemetry/requirements.txt b/docs/examples/opentelemetry/requirements.txt index 21328014..9ddd00ce 100644 --- a/docs/examples/opentelemetry/requirements.txt +++ b/docs/examples/opentelemetry/requirements.txt @@ -1,3 +1,3 @@ -redis==4.3.4 +valkey==4.3.4 uptrace==1.14.0 -opentelemetry-instrumentation-redis==0.35b0 +opentelemetry-instrumentation-valkey==0.35b0 diff --git a/docs/examples/opentelemetry_api_examples.ipynb b/docs/examples/opentelemetry_api_examples.ipynb index 28fe7586..4f654c36 100644 --- a/docs/examples/opentelemetry_api_examples.ipynb +++ b/docs/examples/opentelemetry_api_examples.ipynb @@ -159,7 +159,7 @@ " },\n", " \"attributes\": {\n", " \"enduser.id\": \"jupyter\",\n", - " \"enduser.email\": \"jupyter@redis-py\"\n", + " \"enduser.email\": \"jupyter@valkey-py\"\n", " },\n", " \"events\": [],\n", " \"links\": [],\n", @@ -180,7 +180,7 @@ "with tracer.start_as_current_span(\"operation-name\") as span:\n", " if span.is_recording():\n", " span.set_attribute(\"enduser.id\", \"jupyter\")\n", - " span.set_attribute(\"enduser.email\", \"jupyter@redis-py\")\n", + " span.set_attribute(\"enduser.email\", \"jupyter@valkey-py\")\n", " time.sleep(1)" ] }, diff --git a/docs/examples/pipeline_examples.ipynb b/docs/examples/pipeline_examples.ipynb index 36ce31d7..c513a2fe 100644 --- a/docs/examples/pipeline_examples.ipynb +++ b/docs/examples/pipeline_examples.ipynb @@ -6,14 +6,14 @@ "source": [ "# Pipeline examples\n", "\n", - "This example show quickly how to use pipelines in `redis-py`." + "This example show quickly how to use pipelines in `valkey-py`." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Checking that Redis is running" + "## Checking that Valkey is running" ] }, { @@ -33,9 +33,9 @@ } ], "source": [ - "import redis \n", + "import valkey \n", "\n", - "r = redis.Redis(decode_responses=True)\n", + "r = valkey.Valkey(decode_responses=True)\n", "r.ping()" ] }, @@ -162,7 +162,7 @@ "source": [ "## Performance comparison\n", "\n", - "Using pipelines can improve performance, for more informations, see [Redis documentation about pipelining](https://redis.io/docs/manual/pipelining/). Here is a simple comparison test of performance between basic and pipelined commands (we simply increment a value and measure the time taken by both method)." + "Using pipelines can improve performance, for more informations, see [Valkey documentation about pipelining](https://valkey.io/docs/manual/pipelining/). Here is a simple comparison test of performance between basic and pipelined commands (we simply increment a value and measure the time taken by both method)." ] }, { diff --git a/docs/examples/redis-stream-example.ipynb b/docs/examples/redis-stream-example.ipynb index 60f4ddd3..6f7f5a8c 100644 --- a/docs/examples/redis-stream-example.ipynb +++ b/docs/examples/redis-stream-example.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Redis Stream Examples" + "# Valkey Stream Examples" ] }, { @@ -20,7 +20,7 @@ "metadata": {}, "outputs": [], "source": [ - "redis_host = \"redis\"\n", + "valkey_host = \"valkey\"\n", "stream_key = \"skey\"\n", "stream2_key = \"s2key\"\n", "group1 = \"grp1\"\n", @@ -51,11 +51,11 @@ } ], "source": [ - "import redis\n", + "import valkey\n", "from time import time\n", - "from redis.exceptions import ConnectionError, DataError, NoScriptError, RedisError, ResponseError\n", + "from valkey.exceptions import ConnectionError, DataError, NoScriptError, ValkeyError, ResponseError\n", "\n", - "r = redis.Redis( redis_host )\n", + "r = valkey.Valkey( valkey_host )\n", "r.ping()" ] }, @@ -342,7 +342,7 @@ "metadata": {}, "source": [ "# Stream groups\n", - "With the groups is possible track, for many consumers, and at the Redis side, which message have been already consumed.\n", + "With the groups is possible track, for many consumers, and at the Valkey side, which message have been already consumed.\n", "## add some data to streams\n", "Creating 2 streams with 10 messages each." ] diff --git a/docs/examples/search_json_examples.ipynb b/docs/examples/search_json_examples.ipynb index 9ce1efc0..cf507a79 100644 --- a/docs/examples/search_json_examples.ipynb +++ b/docs/examples/search_json_examples.ipynb @@ -25,16 +25,16 @@ } ], "source": [ - "import redis\n", - "from redis.commands.json.path import Path\n", - "import redis.commands.search.aggregation as aggregations\n", - "import redis.commands.search.reducers as reducers\n", - "from redis.commands.search.field import TextField, NumericField, TagField\n", - "from redis.commands.search.indexDefinition import IndexDefinition, IndexType\n", - "from redis.commands.search.query import NumericFilter, Query\n", + "import valkey\n", + "from valkey.commands.json.path import Path\n", + "import valkey.commands.search.aggregation as aggregations\n", + "import valkey.commands.search.reducers as reducers\n", + "from valkey.commands.search.field import TextField, NumericField, TagField\n", + "from valkey.commands.search.indexDefinition import IndexDefinition, IndexType\n", + "from valkey.commands.search.query import NumericFilter, Query\n", "\n", "\n", - "r = redis.Redis(host='localhost', port=6379)\n", + "r = valkey.Valkey(host='localhost', port=6379)\n", "user1 = {\n", " \"user\":{\n", " \"name\": \"Paul John\",\n", @@ -291,9 +291,9 @@ "hash": "d45c99ba0feda92868abafa8257cbb4709c97f1a0b5dc62bbeebdf89d4fad7fe" }, "kernelspec": { - "display_name": "redis-py", + "display_name": "valkey-py", "language": "python", - "name": "redis-py" + "name": "valkey-py" }, "language_info": { "codemirror_mode": { diff --git a/docs/examples/search_vector_similarity_examples.ipynb b/docs/examples/search_vector_similarity_examples.ipynb index bd1df3c1..c0a5bc3a 100644 --- a/docs/examples/search_vector_similarity_examples.ipynb +++ b/docs/examples/search_vector_similarity_examples.ipynb @@ -18,12 +18,12 @@ "metadata": {}, "outputs": [], "source": [ - "import redis\n", - "from redis.commands.search.field import TagField, VectorField\n", - "from redis.commands.search.indexDefinition import IndexDefinition, IndexType\n", - "from redis.commands.search.query import Query\n", + "import valkey\n", + "from valkey.commands.search.field import TagField, VectorField\n", + "from valkey.commands.search.indexDefinition import IndexDefinition, IndexType\n", + "from valkey.commands.search.query import Query\n", "\n", - "r = redis.Redis(host=\"localhost\", port=6379)\n", + "r = valkey.Valkey(host=\"localhost\", port=6379)\n", "\n", "INDEX_NAME = \"index\" # Vector Index Name\n", "DOC_PREFIX = \"doc:\" # RediSearch Key Prefix for the Index\n", @@ -79,9 +79,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Adding Vectors to Redis\n", + "## Adding Vectors to Valkey\n", "\n", - "Next, we add vectors (dummy data) to Redis using `hset`. The search index listens to keyspace notifications and will include any written HASH objects prefixed by `DOC_PREFIX`." + "Next, we add vectors (dummy data) to Valkey using `hset`. The search index listens to keyspace notifications and will include any written HASH objects prefixed by `DOC_PREFIX`." ] }, { @@ -108,7 +108,7 @@ "metadata": {}, "outputs": [], "source": [ - "# instantiate a redis pipeline\n", + "# instantiate a valkey pipeline\n", "pipe = r.pipeline()\n", "\n", "# define some dummy data\n", @@ -138,7 +138,7 @@ "## Searching\n", "You can use VSS queries with the `.ft(...).search(...)` query command. To use a VSS query, you must specify the option `.dialect(2)`.\n", "\n", - "There are two supported types of vector queries in Redis: `KNN` and `Range`. `Hybrid` queries can work in both settings and combine elements of traditional search and VSS." + "There are two supported types of vector queries in Valkey: `KNN` and `Range`. `Hybrid` queries can work in both settings and combine elements of traditional search and VSS." ] }, { @@ -192,7 +192,7 @@ "metadata": {}, "source": [ "### Range Queries\n", - "Range queries provide a way to filter results by the distance between a vector field in Redis and a query vector based on some pre-defined threshold (radius)." + "Range queries provide a way to filter results by the distance between a vector field in Valkey and a query vector based on some pre-defined threshold (radius)." ] }, { @@ -244,7 +244,7 @@ "metadata": {}, "source": [ "### Hybrid Queries\n", - "Hybrid queries contain both traditional filters (numeric, tags, text) and VSS in one single Redis command." + "Hybrid queries contain both traditional filters (numeric, tags, text) and VSS in one single Valkey command." ] }, { @@ -293,7 +293,7 @@ "metadata": {}, "source": [ "## Vector Creation and Storage Examples\n", - "The above examples use dummy data as vectors. However, in reality, most use cases leverage production-grade AI models for creating embeddings. Below we will take some sample text data, pass it to the OpenAI and Cohere API's respectively, and then write them to Redis." + "The above examples use dummy data as vectors. However, in reality, most use cases leverage production-grade AI models for creating embeddings. Below we will take some sample text data, pass it to the OpenAI and Cohere API's respectively, and then write them to Valkey." ] }, { @@ -364,7 +364,7 @@ "response = openai.Embedding.create(input=texts, engine=\"text-embedding-ada-002\")\n", "embeddings = np.array([r[\"embedding\"] for r in response[\"data\"]], dtype=np.float32)\n", "\n", - "# Write to Redis\n", + "# Write to Valkey\n", "pipe = r.pipeline()\n", "for i, embedding in enumerate(embeddings):\n", " pipe.hset(f\"doc:{i}\", mapping = {\n", @@ -526,7 +526,7 @@ "response = co.embed(texts=texts, model=\"small\")\n", "embeddings = np.array(response.embeddings, dtype=np.float32)\n", "\n", - "# Write to Redis\n", + "# Write to Valkey\n", "for i, embedding in enumerate(embeddings):\n", " r.hset(f\"doc:{i}\", mapping = {\n", " \"vector\": embedding.tobytes(),\n", @@ -637,7 +637,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Find more example apps, tutorials, and projects using Redis Vector Similarity Search [in this GitHub organization](https://github.com/RedisVentures)." + "Find more example apps, tutorials, and projects using Valkey Vector Similarity Search [in this GitHub organization](https://github.com/ValkeyVentures)." ] } ], diff --git a/docs/examples/set_and_get_examples.ipynb b/docs/examples/set_and_get_examples.ipynb index e45c72a6..a96d2ac0 100644 --- a/docs/examples/set_and_get_examples.ipynb +++ b/docs/examples/set_and_get_examples.ipynb @@ -13,7 +13,7 @@ "id": "a59abd54", "metadata": {}, "source": [ - "## Start off by connecting to the redis server\n", + "## Start off by connecting to the valkey server\n", "\n", "To understand what ```decode_responses=True``` does, refer back to [this document](connection_examples.ipynb)" ] @@ -36,9 +36,9 @@ } ], "source": [ - "import redis \n", + "import valkey \n", "\n", - "r = redis.Redis(decode_responses=True)\n", + "r = valkey.Valkey(decode_responses=True)\n", "r.ping()" ] }, @@ -253,7 +253,7 @@ "id": "6d32dbee", "metadata": {}, "source": [ - "To get multiple keys' values, we can use mget. If a non-existing key is also passed, Redis return None for that key" + "To get multiple keys' values, we can use mget. If a non-existing key is also passed, Valkey return None for that key" ] }, { diff --git a/docs/examples/ssl_connection_examples.ipynb b/docs/examples/ssl_connection_examples.ipynb index c94c4e01..961a569e 100644 --- a/docs/examples/ssl_connection_examples.ipynb +++ b/docs/examples/ssl_connection_examples.ipynb @@ -11,7 +11,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to a Redis instance via SSL" + "## Connecting to a Valkey instance via SSL" ] }, { @@ -31,9 +31,9 @@ } ], "source": [ - "import redis\n", + "import valkey\n", "\n", - "r = redis.Redis(\n", + "r = valkey.Valkey(\n", " host='localhost', \n", " port=6666, \n", " ssl=True, \n", @@ -46,7 +46,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to a Redis instance via a URL string" + "## Connecting to a Valkey instance via a URL string" ] }, { @@ -66,9 +66,9 @@ } ], "source": [ - "import redis\n", + "import valkey\n", "\n", - "r = redis.from_url(\"rediss://localhost:6666?ssl_cert_reqs=none&decode_responses=True&health_check_interval=2\")\n", + "r = valkey.from_url(\"valkeys://localhost:6666?ssl_cert_reqs=none&decode_responses=True&health_check_interval=2\")\n", "r.ping()" ] }, @@ -76,7 +76,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to a Redis instance using a ConnectionPool" + "## Connecting to a Valkey instance using a ConnectionPool" ] }, { @@ -96,16 +96,16 @@ } ], "source": [ - "import redis\n", + "import valkey\n", "\n", - "redis_pool = redis.ConnectionPool(\n", + "valkey_pool = valkey.ConnectionPool(\n", " host=\"localhost\", \n", " port=6666, \n", - " connection_class=redis.SSLConnection, \n", + " connection_class=valkey.SSLConnection, \n", " ssl_cert_reqs=\"none\",\n", ")\n", "\n", - "r = redis.StrictRedis(connection_pool=redis_pool) \n", + "r = valkey.StrictValkey(connection_pool=valkey_pool) \n", "r.ping()" ] }, @@ -113,7 +113,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to a Redis instance via SSL, while specifying a minimum TLS version" + "## Connecting to a Valkey instance via SSL, while specifying a minimum TLS version" ] }, { @@ -133,10 +133,10 @@ } ], "source": [ - "import redis\n", + "import valkey\n", "import ssl\n", "\n", - "r = redis.Redis(\n", + "r = valkey.Valkey(\n", " host=\"localhost\",\n", " port=6666,\n", " ssl=True,\n", @@ -150,7 +150,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to a Redis instance via SSL, while specifying a self-signed SSL CA certificate" + "## Connecting to a Valkey instance via SSL, while specifying a self-signed SSL CA certificate" ] }, { @@ -171,11 +171,11 @@ ], "source": [ "import os\n", - "import redis\n", + "import valkey\n", "\n", "pki_dir = os.path.join(\"..\", \"..\", \"dockers\", \"stunnel\", \"keys\")\n", "\n", - "r = redis.Redis(\n", + "r = valkey.Valkey(\n", " host=\"localhost\",\n", " port=6666,\n", " ssl=True,\n", @@ -191,15 +191,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connecting to a Redis instance via SSL, and validate the OCSP status of the certificate\n", + "## Connecting to a Valkey instance via SSL, and validate the OCSP status of the certificate\n", "\n", - "The redis package is designed to be small, meaning extra libraries must be installed, in order to support OCSP stapling. As a result, first install redis via:\n", + "The valkey package is designed to be small, meaning extra libraries must be installed, in order to support OCSP stapling. As a result, first install valkey via:\n", "\n", - "`pip install redis[ocsp]`\n", + "`pip install valkey[ocsp]`\n", "\n", - "This will install cryptography, requests, and PyOpenSSL, none of which are generally required to use Redis.\n", + "This will install cryptography, requests, and PyOpenSSL, none of which are generally required to use Valkey.\n", "\n", - "In the next example, we will connect to a Redis instance via SSL, and validate the OCSP status of the certificate. However, the certificate we are using does not have an AIA extension, which means that the OCSP validation cannot be performed." + "In the next example, we will connect to a Valkey instance via SSL, and validate the OCSP status of the certificate. However, the certificate we are using does not have an AIA extension, which means that the OCSP validation cannot be performed." ] }, { @@ -217,11 +217,11 @@ ], "source": [ "import os\n", - "import redis\n", + "import valkey\n", "\n", "pki_dir = os.path.join(\"..\", \"..\", \"dockers\", \"stunnel\", \"keys\")\n", "\n", - "r = redis.Redis(\n", + "r = valkey.Valkey(\n", " host=\"localhost\",\n", " port=6666,\n", " ssl=True,\n", @@ -234,7 +234,7 @@ "\n", "try:\n", " r.ping()\n", - "except redis.ConnectionError as e:\n", + "except valkey.ConnectionError as e:\n", " assert e.args[0] == \"No AIA information present in ssl certificate\"\n", " print(\"OCSP validation failed as expected.\")" ] @@ -243,7 +243,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Connect to a Redis instance via SSL, and validate OCSP-stapled certificates\n", + "## Connect to a Valkey instance via SSL, and validate OCSP-stapled certificates\n", "\n", "It is also possible to validate an OCSP stapled response. Again, for this example the server does not send an OCSP stapled response, so the validation will fail." ] @@ -263,7 +263,7 @@ ], "source": [ "import os\n", - "import redis\n", + "import valkey\n", "\n", "pki_dir = os.path.join(\"..\", \"..\", \"dockers\", \"stunnel\", \"keys\")\n", "ca_cert = os.path.join(pki_dir, \"ca-cert.pem\")\n", @@ -273,7 +273,7 @@ "\n", "# If needed, a custom SSL context for OCSP can be specified via ssl_ocsp_context\n", "\n", - "r = redis.Redis(\n", + "r = valkey.Valkey(\n", " host=\"localhost\",\n", " port=6666,\n", " ssl=True,\n", @@ -287,7 +287,7 @@ "\n", "try:\n", " r.ping()\n", - "except redis.ConnectionError as e:\n", + "except valkey.ConnectionError as e:\n", " assert e.args[0] == \"no ocsp response present\"\n", " print(\"OCSP validation failed as expected.\")" ] diff --git a/docs/examples/timeseries_examples.ipynb b/docs/examples/timeseries_examples.ipynb index 691e1335..7e62aac6 100644 --- a/docs/examples/timeseries_examples.ipynb +++ b/docs/examples/timeseries_examples.ipynb @@ -6,9 +6,9 @@ "source": [ "# Timeseries\n", "\n", - "`redis-py` supports [RedisTimeSeries](https://github.com/RedisTimeSeries/RedisTimeSeries/) which is a time-series-database module for Redis.\n", + "`valkey-py` supports [ValkeyTimeSeries](https://github.com/ValkeyTimeSeries/ValkeyTimeSeries/) which is a time-series-database module for Valkey.\n", "\n", - "This example shows how to handle timeseries data with `redis-py`." + "This example shows how to handle timeseries data with `valkey-py`." ] }, { @@ -35,9 +35,9 @@ } ], "source": [ - "import redis \n", + "import valkey \n", "\n", - "r = redis.Redis(decode_responses=True)\n", + "r = valkey.Valkey(decode_responses=True)\n", "ts = r.ts()\n", "\n", "r.ping()" @@ -300,7 +300,7 @@ "source": [ "### Get the last sample matching specific label\n", "\n", - "Get the last sample that matches \"label1=1\", see [Redis documentation](https://redis.io/commands/ts.mget/) to see the posible filter values." + "Get the last sample that matches \"label1=1\", see [Valkey documentation](https://valkey.io/commands/ts.mget/) to see the posible filter values." ] }, { @@ -521,14 +521,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "For more informations about duplicate policies, see [Redis documentation](https://redis.io/commands/ts.add/)." + "For more informations about duplicate policies, see [Valkey documentation](https://valkey.io/commands/ts.add/)." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Using Redis TSDB to keep track of a value" + "## Using Valkey TSDB to keep track of a value" ] }, { @@ -603,7 +603,7 @@ { "cell_type": "markdown", "source": [ - "## How to execute multi-key commands on Open Source Redis Cluster" + "## How to execute multi-key commands on Open Source Valkey Cluster" ], "metadata": { "collapsed": false @@ -623,9 +623,9 @@ } ], "source": [ - "import redis\n", + "import valkey\n", "\n", - "r = redis.RedisCluster(host=\"localhost\", port=46379)\n", + "r = valkey.ValkeyCluster(host=\"localhost\", port=46379)\n", "\n", "# This command should be executed on all cluster nodes after creation and any re-sharding\n", "# Please note that this command is internal and will be deprecated in the future\n", diff --git a/docs/exceptions.rst b/docs/exceptions.rst index 8a9fe457..87acff92 100644 --- a/docs/exceptions.rst +++ b/docs/exceptions.rst @@ -3,5 +3,5 @@ Exceptions ########## -.. automodule:: redis.exceptions +.. automodule:: valkey.exceptions :members: \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index 2c0557cb..302585ef 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,63 +1,63 @@ -.. redis-py documentation master file, created by +.. valkey-py documentation master file, created by sphinx-quickstart on Thu Jul 28 13:55:57 2011. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -redis-py - Python Client for Redis +valkey-py - Python Client for Valkey ==================================== Getting Started **************** -`redis-py `_ requires a running Redis server, and Python 3.7+. See the `Redis -quickstart `_ for Redis installation instructions. +`valkey-py `_ requires a running Valkey server, and Python 3.7+. See the `Valkey +quickstart `_ for Valkey installation instructions. -redis-py can be installed using pip via ``pip install redis``. +valkey-py can be installed using pip via ``pip install valkey``. -Quickly connecting to redis +Quickly connecting to valkey *************************** -There are two quick ways to connect to Redis. +There are two quick ways to connect to Valkey. -**Assuming you run Redis on localhost:6379 (the default)** +**Assuming you run Valkey on localhost:6379 (the default)** .. code-block:: python - import redis - r = redis.Redis() + import valkey + r = valkey.Valkey() r.ping() -**Running redis on foo.bar.com, port 12345** +**Running valkey on foo.bar.com, port 12345** .. code-block:: python - import redis - r = redis.Redis(host='foo.bar.com', port=12345) + import valkey + r = valkey.Valkey(host='foo.bar.com', port=12345) r.ping() **Another example with foo.bar.com, port 12345** .. code-block:: python - import redis - r = redis.from_url('redis://foo.bar.com:12345') + import valkey + r = valkey.from_url('valkey://foo.bar.com:12345') r.ping() -After that, you probably want to `run redis commands `_. +After that, you probably want to `run valkey commands `_. .. toctree:: :hidden: genindex -Redis Command Functions +Valkey Command Functions *********************** .. toctree:: :maxdepth: 2 commands - redismodules + valkeymodules Module Documentation ******************** @@ -79,12 +79,12 @@ Module Documentation Contributing ************* -- `How to contribute `_ -- `Issue Tracker `_ -- `Source Code `_ -- `Release History `_ +- `How to contribute `_ +- `Issue Tracker `_ +- `Source Code `_ +- `Release History `_ License ******* -This project is licensed under the `MIT license `_. +This project is licensed under the `MIT license `_. diff --git a/docs/lock.rst b/docs/lock.rst index cce0867a..b3c74ef3 100644 --- a/docs/lock.rst +++ b/docs/lock.rst @@ -1,5 +1,5 @@ Lock ######### -.. automodule:: redis.lock +.. automodule:: valkey.lock :members: \ No newline at end of file diff --git a/docs/logo-redis.png b/docs/logo-redis.png deleted file mode 100644 index 45b4a3f2..00000000 Binary files a/docs/logo-redis.png and /dev/null differ diff --git a/docs/logo-valkey.png b/docs/logo-valkey.png new file mode 100644 index 00000000..2eaf093a Binary files /dev/null and b/docs/logo-valkey.png differ diff --git a/docs/lua_scripting.rst b/docs/lua_scripting.rst index bd7b9bc0..44f98eb3 100644 --- a/docs/lua_scripting.rst +++ b/docs/lua_scripting.rst @@ -9,10 +9,10 @@ Lua Scripting Lua Scripting in default connections ------------------------------------ -redis-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there +valkey-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there are a number of edge cases that make these commands tedious to use in -real world scenarios. Therefore, redis-py exposes a Script object that -makes scripting much easier to use. (RedisClusters have limited support +real world scenarios. Therefore, valkey-py exposes a Script object that +makes scripting much easier to use. (ValkeyClusters have limited support for scripting.) To create a Script instance, use the register_script function on a @@ -40,7 +40,7 @@ function. Script instances accept the following optional arguments: becomes the KEYS list in Lua. - **args**: A list of argument values. This becomes the ARGV list in Lua. -- **client**: A redis-py Client or Pipeline instance that will invoke +- **client**: A valkey-py Client or Pipeline instance that will invoke the script. If client isn't specified, the client that initially created the Script instance (the one that register_script was invoked from) will be used. @@ -58,16 +58,16 @@ key is passed to the script along with the multiplier value of 5. Lua executes the script and returns the result, 10. Script instances can be executed using a different client instance, even -one that points to a completely different Redis server. +one that points to a completely different Valkey server. .. code:: python - >>> r2 = redis.Redis('redis2.example.com') + >>> r2 = redis.Redis('valkey2.example.com') >>> r2.set('foo', 3) >>> multiply(keys=['foo'], args=[5], client=r2) 15 -The Script object ensures that the Lua script is loaded into Redis's +The Script object ensures that the Lua script is loaded into Valkey's script cache. In the event of a NOSCRIPT error, it will load the script and retry executing it. @@ -76,7 +76,7 @@ Pipelines Script objects can also be used in pipelines. The pipeline instance should be passed as the client argument when calling the script. Care is -taken to ensure that the script is registered in Redis's script cache +taken to ensure that the script is registered in Valkey's script cache just prior to pipeline execution. .. code:: python diff --git a/docs/opentelemetry.rst b/docs/opentelemetry.rst index d006a604..fe7af669 100644 --- a/docs/opentelemetry.rst +++ b/docs/opentelemetry.rst @@ -32,30 +32,30 @@ OpenTelemetry instrumentation Instrumentations are plugins for popular frameworks and libraries that use OpenTelemetry API to record important operations, for example, HTTP requests, DB queries, logs, errors, and more. -To install OpenTelemetry `instrumentation `_ for redis-py: +To install OpenTelemetry `instrumentation `_ for valkey-py: .. code-block:: shell - pip install opentelemetry-instrumentation-redis + pip install opentelemetry-instrumentation-valkey You can then use it to instrument code like this: .. code-block:: python - from opentelemetry.instrumentation.redis import RedisInstrumentor + from opentelemetry.instrumentation.valkey import ValkeyInstrumentor - RedisInstrumentor().instrument() + ValkeyInstrumentor().instrument() -Once the code is patched, you can use redis-py as usually: +Once the code is patched, you can use valkey-py as usually: .. code-block:: python # Sync client - client = redis.Redis() + client = valkey.Valkey() client.get("my-key") # Async client - client = redis.asyncio.Redis() + client = valkey.asyncio.Valkey() await client.get("my-key") OpenTelemetry API @@ -99,43 +99,43 @@ Uptrace Uptrace is an `open source APM `_ that supports distributed tracing, metrics, and logs. You can use it to monitor applications and set up automatic alerts to receive notifications via email, Slack, Telegram, and more. -You can use Uptrace to monitor redis-py using this `GitHub example `_ as a starting point. +You can use Uptrace to monitor valkey-py using this `GitHub example `_ as a starting point. -.. image:: images/opentelemetry/redis-py-trace.png - :alt: Redis-py trace +.. image:: images/opentelemetry/valkey-py-trace.png + :alt: Valkey-py trace You can `install Uptrace `_ by downloading a DEB/RPM package or a pre-compiled binary. -Monitoring Redis Server performance +Monitoring Valkey Server performance ----------------------------------- -In addition to monitoring redis-py client, you can also monitor Redis Server performance using OpenTelemetry Collector Agent. +In addition to monitoring valkey-py client, you can also monitor Valkey Server performance using OpenTelemetry Collector Agent. OpenTelemetry Collector is a proxy/middleman between your application and a `distributed tracing tool `_ such as Uptrace or Jaeger. Collector receives telemetry data, processes it, and then exports the data to APM tools that can store it permanently. -For example, you can use the `OpenTelemetry Redis receiver ` provided by Otel Collector to monitor Redis performance: +For example, you can use the `OpenTelemetry Valkey receiver ` provided by Otel Collector to monitor Valkey performance: -.. image:: images/opentelemetry/redis-metrics.png - :alt: Redis metrics +.. image:: images/opentelemetry/valkey-metrics.png + :alt: Valkey metrics See introduction to `OpenTelemetry Collector `_ for details. Alerting and notifications -------------------------- -Uptrace also allows you to monitor `OpenTelemetry metrics `_ using alerting rules. For example, the following monitor uses the group by node expression to create an alert whenever an individual Redis shard is down: +Uptrace also allows you to monitor `OpenTelemetry metrics `_ using alerting rules. For example, the following monitor uses the group by node expression to create an alert whenever an individual Valkey shard is down: .. code-block:: python monitors: - - name: Redis shard is down + - name: Valkey shard is down metrics: - - redis_up as $redis_up + - valkey_up as $valkey_up query: - group by cluster # monitor each cluster, - group by bdb # each database, - group by node # and each shard - - $redis_up + - $valkey_up min_allowed_value: 1 # shard should be down for 5 minutes to trigger an alert for_duration: 5m @@ -145,10 +145,10 @@ You can also create queries with more complex expressions. For example, the foll .. code-block:: python monitors: - - name: Redis read hit rate < 75% + - name: Valkey read hit rate < 75% metrics: - - redis_keyspace_read_hits as $hits - - redis_keyspace_read_misses as $misses + - valkey_keyspace_read_hits as $hits + - valkey_keyspace_read_misses as $misses query: - group by cluster - group by bdb diff --git a/docs/redismodules.rst b/docs/redismodules.rst index 27757cb6..4368a17e 100644 --- a/docs/redismodules.rst +++ b/docs/redismodules.rst @@ -1,20 +1,20 @@ -Redis Modules Commands +Valkey Modules Commands ###################### -Accessing redis module commands requires the installation of the supported `Redis module `_. For a quick start with redis modules, try the `Redismod docker `_. +Accessing valkey module commands requires the installation of the supported `Valkey module `_. For a quick start with valkey modules, try the `Valkeymod docker `_. -RedisBloom Commands +ValkeyBloom Commands ******************* -These are the commands for interacting with the `RedisBloom module `_. Below is a brief example, as well as documentation on the commands themselves. +These are the commands for interacting with the `ValkeyBloom module `_. Below is a brief example, as well as documentation on the commands themselves. **Create and add to a bloom filter** .. code-block:: python - import redis - r = redis.Redis() + import valkey + r = valkey.Valkey() r.bf().create("bloom", 0.01, 1000) r.bf().add("bloom", "foo") @@ -22,8 +22,8 @@ These are the commands for interacting with the `RedisBloom module `_. Below is a brief example, as well as documentation on the commands themselves. +These are the commands for interacting with the `ValkeyGraph module `_. Below is a brief example, as well as documentation on the commands themselves. **Create a graph, adding two nodes** .. code-block:: python - import redis - from redis.graph.node import Node + import valkey + from valkey.graph.node import Node john = Node(label="person", properties={"name": "John Doe", "age": 33} jane = Node(label="person", properties={"name": "Jane Doe", "age": 34} - r = redis.Redis() + r = valkey.Valkey() graph = r.graph() graph.add_node(john) graph.add_node(jane) graph.add_node(pat) graph.commit() -.. automodule:: redis.commands.graph.node +.. automodule:: valkey.commands.graph.node :members: Node -.. automodule:: redis.commands.graph.edge +.. automodule:: valkey.commands.graph.edge :members: Edge -.. automodule:: redis.commands.graph.commands +.. automodule:: valkey.commands.graph.commands :members: GraphCommands ------ @@ -87,19 +87,19 @@ These are the commands for interacting with the `RedisGraph module `_. Below is a brief example, as well as documentation on the commands themselves. +These are the commands for interacting with the `RedisJSON module `_. Below is a brief example, as well as documentation on the commands themselves. **Create a json object** .. code-block:: python - import redis - r = redis.Redis() + import valkey + r = valkey.Valkey() r.json().set("mykey", ".", {"hello": "world", "i am": ["a", "json", "object!"]}) Examples of how to combine search and json can be found `here `_. -.. automodule:: redis.commands.json.commands +.. automodule:: valkey.commands.json.commands :members: JSONCommands ----- @@ -107,17 +107,17 @@ Examples of how to combine search and json can be found `here `_. Below is a brief example, as well as documentation on the commands themselves. In the example +These are the commands for interacting with the `RediSearch module `_. Below is a brief example, as well as documentation on the commands themselves. In the example below, an index named *my_index* is being created. When an index name is not specified, an index named *idx* is created. **Create a search index, and display its information** .. code-block:: python - import redis - from redis.commands.search.field import TextField + import valkey + from valkey.commands.search.field import TextField - r = redis.Redis() + r = valkey.Valkey() index_name = "my_index" schema = ( TextField("play", weight=5.0), @@ -127,26 +127,26 @@ below, an index named *my_index* is being created. When an index name is not spe print(r.ft(index_name).info()) -.. automodule:: redis.commands.search.commands +.. automodule:: valkey.commands.search.commands :members: SearchCommands ----- -RedisTimeSeries Commands +ValkeyTimeSeries Commands ************************ -These are the commands for interacting with the `RedisTimeSeries module `_. Below is a brief example, as well as documentation on the commands themselves. +These are the commands for interacting with the `ValkeyTimeSeries module `_. Below is a brief example, as well as documentation on the commands themselves. **Create a timeseries object with 5 second retention** .. code-block:: python - import redis - r = redis.Redis() + import valkey + r = valkey.Valkey() r.ts().create(2, retention_msecs=5000) -.. automodule:: redis.commands.timeseries.commands +.. automodule:: valkey.commands.timeseries.commands :members: TimeSeriesCommands diff --git a/docs/resp3_features.rst b/docs/resp3_features.rst index 11c01985..2e5617a0 100644 --- a/docs/resp3_features.rst +++ b/docs/resp3_features.rst @@ -1,69 +1,69 @@ RESP 3 Features =============== -As of version 5.0, redis-py supports the `RESP 3 standard `_. Practically, this means that client using RESP 3 will be faster and more performant as fewer type translations occur in the client. It also means new response types like doubles, true simple strings, maps, and booleans are available. +As of version 5.0, valkey-py supports the `RESP 3 standard `_. Practically, this means that client using RESP 3 will be faster and more performant as fewer type translations occur in the client. It also means new response types like doubles, true simple strings, maps, and booleans are available. Connecting ----------- -Enabling RESP3 is no different than other connections in redis-py. In all cases, the connection type must be extending by setting `protocol=3`. The following are some base examples illustrating how to enable a RESP 3 connection. +Enabling RESP3 is no different than other connections in valkey-py. In all cases, the connection type must be extending by setting `protocol=3`. The following are some base examples illustrating how to enable a RESP 3 connection. Connect with a standard connection, but specifying resp 3: .. code:: python - >>> import redis - >>> r = redis.Redis(host='localhost', port=6379, protocol=3) + >>> import valkey + >>> r = valkey.Valkey(host='localhost', port=6379, protocol=3) >>> r.ping() Or using the URL scheme: .. code:: python - >>> import redis - >>> r = redis.from_url("redis://localhost:6379?protocol=3") + >>> import valkey + >>> r = valkey.from_url("valkey://localhost:6379?protocol=3") >>> r.ping() Connect with async, specifying resp 3: .. code:: python - >>> import redis.asyncio as redis - >>> r = redis.Redis(host='localhost', port=6379, protocol=3) + >>> import valkey.asyncio as valkey + >>> r = valkey.Valkey(host='localhost', port=6379, protocol=3) >>> await r.ping() The URL scheme with the async client .. code:: python - >>> import redis.asyncio as Redis - >>> r = redis.from_url("redis://localhost:6379?protocol=3") + >>> import valkey.asyncio as Valkey + >>> r = valkey.from_url("valkey://localhost:6379?protocol=3") >>> await r.ping() -Connecting to an OSS Redis Cluster with RESP 3 +Connecting to an OSS Valkey Cluster with RESP 3 .. code:: python - >>> from redis.cluster import RedisCluster, ClusterNode - >>> r = RedisCluster(startup_nodes=[ClusterNode('localhost', 6379), ClusterNode('localhost', 6380)], protocol=3) + >>> from valkey.cluster import ValkeyCluster, ClusterNode + >>> r = ValkeyCluster(startup_nodes=[ClusterNode('localhost', 6379), ClusterNode('localhost', 6380)], protocol=3) >>> r.ping() Push notifications ------------------ -Push notifications are a way that redis sends out of band data. The RESP 3 protocol includes a `push type `_ that allows our client to intercept these out of band messages. By default, clients will log simple messages, but redis-py includes the ability to bring your own function processor. +Push notifications are a way that valkey sends out of band data. The RESP 3 protocol includes a `push type `_ that allows our client to intercept these out of band messages. By default, clients will log simple messages, but valkey-py includes the ability to bring your own function processor. This means that should you want to perform something, on a given push notification, you specify a function during the connection, as per this examples: .. code:: python - >> from redis import Redis + >> from valkey import Valkey >> >> def our_func(message): >> if message.find("This special thing happened"): >> raise IOError("This was the message: \n" + message) >> - >> r = Redis(protocol=3) + >> r = Valkey(protocol=3) >> p = r.pubsub(push_handler_func=our_func) In the example above, upon receipt of a push notification, rather than log the message, in the case where specific text occurs, an IOError is raised. This example, highlights how one could start implementing a customized message handler. diff --git a/docs/retry.rst b/docs/retry.rst index acf198ec..1f8b8c53 100644 --- a/docs/retry.rst +++ b/docs/retry.rst @@ -1,17 +1,17 @@ Retry Helpers ############# -.. automodule:: redis.retry +.. automodule:: valkey.retry :members: -Retry in Redis Standalone +Retry in Valkey Standalone ************************** ->>> from redis.backoff import ExponentialBackoff ->>> from redis.retry import Retry ->>> from redis.client import Redis ->>> from redis.exceptions import ( +>>> from valkey.backoff import ExponentialBackoff +>>> from valkey.retry import Retry +>>> from valkey.client import Valkey +>>> from valkey.exceptions import ( >>> BusyLoadingError, >>> ConnectionError, >>> TimeoutError @@ -19,12 +19,12 @@ Retry in Redis Standalone >>> >>> # Run 3 retries with exponential backoff strategy >>> retry = Retry(ExponentialBackoff(), 3) ->>> # Redis client with retries on custom errors ->>> r = Redis(host='localhost', port=6379, retry=retry, retry_on_error=[BusyLoadingError, ConnectionError, TimeoutError]) ->>> # Redis client with retries on TimeoutError only ->>> r_only_timeout = Redis(host='localhost', port=6379, retry=retry, retry_on_timeout=True) +>>> # Valkey client with retries on custom errors +>>> r = Valkey(host='localhost', port=6379, retry=retry, retry_on_error=[BusyLoadingError, ConnectionError, TimeoutError]) +>>> # Valkey client with retries on TimeoutError only +>>> r_only_timeout = Valkey(host='localhost', port=6379, retry=retry, retry_on_timeout=True) -As you can see from the example above, Redis client supports 3 parameters to configure the retry behaviour: +As you can see from the example above, Valkey client supports 3 parameters to configure the retry behaviour: * ``retry``: :class:`~.Retry` instance with a :ref:`backoff-label` strategy and the max number of retries * ``retry_on_error``: list of :ref:`exceptions-label` to retry on @@ -34,30 +34,30 @@ If either ``retry_on_error`` or ``retry_on_timeout`` are passed and no ``retry`` by default it uses a ``Retry(NoBackoff(), 1)`` (meaning 1 retry right after the first failure). -Retry in Redis Cluster +Retry in Valkey Cluster ************************** ->>> from redis.backoff import ExponentialBackoff ->>> from redis.retry import Retry ->>> from redis.cluster import RedisCluster +>>> from valkey.backoff import ExponentialBackoff +>>> from valkey.retry import Retry +>>> from valkey.cluster import ValkeyCluster >>> >>> # Run 3 retries with exponential backoff strategy >>> retry = Retry(ExponentialBackoff(), 3) ->>> # Redis Cluster client with retries ->>> rc = RedisCluster(host='localhost', port=6379, retry=retry, cluster_error_retry_attempts=2) +>>> # Valkey Cluster client with retries +>>> rc = ValkeyCluster(host='localhost', port=6379, retry=retry, cluster_error_retry_attempts=2) -Retry behaviour in Redis Cluster is a little bit different from Standalone: +Retry behaviour in Valkey Cluster is a little bit different from Standalone: * ``retry``: :class:`~.Retry` instance with a :ref:`backoff-label` strategy and the max number of retries, default value is ``Retry(NoBackoff(), 0)`` * ``cluster_error_retry_attempts``: number of times to retry before raising an error when :class:`~.TimeoutError` or :class:`~.ConnectionError` or :class:`~.ClusterDownError` are encountered, default value is ``3`` Let's consider the following example: ->>> from redis.backoff import ExponentialBackoff ->>> from redis.retry import Retry ->>> from redis.cluster import RedisCluster +>>> from valkey.backoff import ExponentialBackoff +>>> from valkey.retry import Retry +>>> from valkey.cluster import ValkeyCluster >>> ->>> rc = RedisCluster(host='localhost', port=6379, retry=Retry(ExponentialBackoff(), 6), cluster_error_retry_attempts=1) +>>> rc = ValkeyCluster(host='localhost', port=6379, retry=Retry(ExponentialBackoff(), 6), cluster_error_retry_attempts=1) >>> rc.set('foo', 'bar') #. the client library calculates the hash slot for key 'foo'. diff --git a/pytest.ini b/pytest.ini index f1b716ae..49090d24 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,10 +1,10 @@ [pytest] addopts = -s markers = - redismod: run only the redis module tests + valkeymod: run only the valkey module tests pipeline: pipeline tests - onlycluster: marks tests to be run only with cluster mode redis - onlynoncluster: marks tests to be run only with standalone redis + onlycluster: marks tests to be run only with cluster mode valkey + onlynoncluster: marks tests to be run only with standalone valkey ssl: marker for only the ssl tests asyncio: marker for async tests replica: replica tests diff --git a/setup.py b/setup.py index cf3d5827..4c0f9039 100644 --- a/setup.py +++ b/setup.py @@ -2,38 +2,38 @@ from setuptools import find_packages, setup setup( - name="redis", - description="Python client for Redis database and key-value store", + name="valkey", + description="Python client for Valkey forked from redis-py", long_description=open("README.md").read().strip(), long_description_content_type="text/markdown", - keywords=["Redis", "key-value store", "database"], + keywords=["Valkey", "key-value store", "database"], license="MIT", version="5.1.0b5", packages=find_packages( include=[ - "redis", - "redis._parsers", - "redis.asyncio", - "redis.commands", - "redis.commands.bf", - "redis.commands.json", - "redis.commands.search", - "redis.commands.timeseries", - "redis.commands.graph", - "redis.parsers", + "valkey", + "valkey._parsers", + "valkey.asyncio", + "valkey.commands", + "valkey.commands.bf", + "valkey.commands.json", + "valkey.commands.search", + "valkey.commands.timeseries", + "valkey.commands.graph", + "valkey.parsers", ] ), - package_data={"redis": ["py.typed"]}, + package_data={"valkey": ["py.typed"]}, include_package_data=True, - url="https://github.com/redis/redis-py", + url="https://github.com/valkey-io/valkey-py", project_urls={ - "Documentation": "https://redis.readthedocs.io/en/latest/", - "Changes": "https://github.com/redis/redis-py/releases", - "Code": "https://github.com/redis/redis-py", - "Issue tracker": "https://github.com/redis/redis-py/issues", + "Documentation": "https://valkey-py.readthedocs.io/en/latest/", + "Changes": "https://github.com/valkey-io/valkey-py/releases", + "Code": "https://github.com/valkey-io/valkey-py", + "Issue tracker": "https://github.com/valkey-io/valkey-py/issues", }, - author="Redis Inc.", - author_email="oss@redis.com", + author="placeholder", + author_email="placeholder@valkey.io", python_requires=">=3.8", install_requires=[ 'async-timeout>=4.0.3; python_full_version<"3.11.3"', diff --git a/tasks.py b/tasks.py index c60fa279..1175bcdd 100644 --- a/tasks.py +++ b/tasks.py @@ -27,23 +27,23 @@ def build_docs(c): @task def linters(c): """Run code linters""" - run("flake8 tests redis") - run("black --target-version py37 --check --diff tests redis") - run("isort --check-only --diff tests redis") - run("vulture redis whitelist.py --min-confidence 80") - run("flynt --fail-on-change --dry-run tests redis") + run("flake8 tests valkey") + run("black --target-version py37 --check --diff tests valkey") + run("isort --check-only --diff tests valkey") + run("vulture valkey whitelist.py --min-confidence 80") + run("flynt --fail-on-change --dry-run tests valkey") @task def all_tests(c): - """Run all linters, and tests in redis-py.""" + """Run all linters, and tests in valkey-py.""" linters(c) tests(c) @task def tests(c, uvloop=False, protocol=2): - """Run the redis-py test suite against the current python, + """Run the valkey-py test suite against the current python, with and without hiredis. """ print("Starting Redis tests") @@ -53,28 +53,28 @@ def tests(c, uvloop=False, protocol=2): @task def standalone_tests(c, uvloop=False, protocol=2): - """Run tests against a standalone redis instance""" + """Run tests against a standalone valkey instance""" if uvloop: run( - f"pytest --protocol={protocol} --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster' --uvloop --junit-xml=standalone-uvloop-results.xml" + f"pytest --protocol={protocol} --cov=./ --cov-report=xml:coverage_valkey.xml -W always -m 'not onlycluster' --uvloop --junit-xml=standalone-uvloop-results.xml" ) else: run( - f"pytest --protocol={protocol} --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster' --junit-xml=standalone-results.xml" + f"pytest --protocol={protocol} --cov=./ --cov-report=xml:coverage_valkey.xml -W always -m 'not onlycluster' --junit-xml=standalone-results.xml" ) @task def cluster_tests(c, uvloop=False, protocol=2): - """Run tests against a redis cluster""" - cluster_url = "redis://localhost:16379/0" + """Run tests against a valkey cluster""" + cluster_url = "valkey://localhost:16379/0" if uvloop: run( - f"pytest --protocol={protocol} --cov=./ --cov-report=xml:coverage_cluster.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={cluster_url} --junit-xml=cluster-uvloop-results.xml --uvloop" + f"pytest --protocol={protocol} --cov=./ --cov-report=xml:coverage_cluster.xml -W always -m 'not onlynoncluster and not valkeymod' --valkey-url={cluster_url} --junit-xml=cluster-uvloop-results.xml --uvloop" ) else: run( - f"pytest --protocol={protocol} --cov=./ --cov-report=xml:coverage_clusteclient.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={cluster_url} --junit-xml=cluster-results.xml" + f"pytest --protocol={protocol} --cov=./ --cov-report=xml:coverage_clusteclient.xml -W always -m 'not onlynoncluster and not valkeymod' --valkey-url={cluster_url} --junit-xml=cluster-results.xml" ) diff --git a/tests/conftest.py b/tests/conftest.py index e783b6e8..e46348ae 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,21 +7,21 @@ from urllib.parse import urlparse import pytest -import redis +import valkey from packaging.version import Version -from redis import Sentinel -from redis.backoff import NoBackoff -from redis.connection import Connection, parse_url -from redis.exceptions import RedisClusterException -from redis.retry import Retry - -REDIS_INFO = {} -default_redis_url = "redis://localhost:6379/0" +from valkey import Sentinel +from valkey.backoff import NoBackoff +from valkey.connection import Connection, parse_url +from valkey.exceptions import ValkeyClusterException +from valkey.retry import Retry + +VALKEY_INFO = {} +default_valkey_url = "valkey://localhost:6379/0" default_protocol = "2" -default_redismod_url = "redis://localhost:6379" +default_valkeymod_url = "valkey://localhost:6379" # default ssl client ignores verification for the purpose of testing -default_redis_ssl_url = "rediss://localhost:6666" +default_valkey_ssl_url = "valkeys://localhost:6666" default_cluster_nodes = 6 _DecoratedTest = TypeVar("_DecoratedTest", bound="Callable") @@ -74,10 +74,10 @@ def format_usage(self): def pytest_addoption(parser): parser.addoption( - "--redis-url", - default=default_redis_url, + "--valkey-url", + default=default_valkey_url, action="store", - help="Redis connection string, defaults to `%(default)s`", + help="Valkey connection string, defaults to `%(default)s`", ) parser.addoption( @@ -87,14 +87,14 @@ def pytest_addoption(parser): help="Protocol version, defaults to `%(default)s`", ) parser.addoption( - "--redis-ssl-url", - default=default_redis_ssl_url, + "--valkey-ssl-url", + default=default_valkey_ssl_url, action="store", - help="Redis SSL connection string, defaults to `%(default)s`", + help="Valkey SSL connection string, defaults to `%(default)s`", ) parser.addoption( - "--redis-cluster-nodes", + "--valkey-cluster-nodes", default=default_cluster_nodes, action="store", help="The number of cluster nodes that need to be " @@ -115,18 +115,18 @@ def pytest_addoption(parser): parser.addoption( "--master-service", action="store", - default="redis-py-test", - help="Name of the Redis master service that the sentinels are monitoring", + default="valkey-py-test", + help="Name of the Valkey master service that the sentinels are monitoring", ) -def _get_info(redis_url): - client = redis.Redis.from_url(redis_url) +def _get_info(valkey_url): + client = valkey.Valkey.from_url(valkey_url) info = client.info() try: client.execute_command("DPING") info["enterprise"] = True - except redis.ResponseError: + except valkey.ResponseError: info["enterprise"] = False client.connection_pool.disconnect() return info @@ -135,36 +135,36 @@ def _get_info(redis_url): def pytest_sessionstart(session): # during test discovery, e.g. with VS Code, we may not # have a server running. - redis_url = session.config.getoption("--redis-url") + valkey_url = session.config.getoption("--valkey-url") try: - info = _get_info(redis_url) - version = info["redis_version"] + info = _get_info(valkey_url) + version = info["valkey_version"] arch_bits = info["arch_bits"] cluster_enabled = info["cluster_enabled"] enterprise = info["enterprise"] - except redis.ConnectionError: + except valkey.ConnectionError: # provide optimistic defaults info = {} version = "10.0.0" arch_bits = 64 cluster_enabled = False enterprise = False - REDIS_INFO["version"] = version - REDIS_INFO["arch_bits"] = arch_bits - REDIS_INFO["cluster_enabled"] = cluster_enabled - REDIS_INFO["enterprise"] = enterprise - # store REDIS_INFO in config so that it is available from "condition strings" - session.config.REDIS_INFO = REDIS_INFO + VALKEY_INFO["version"] = version + VALKEY_INFO["arch_bits"] = arch_bits + VALKEY_INFO["cluster_enabled"] = cluster_enabled + VALKEY_INFO["enterprise"] = enterprise + # store VALKEY_INFO in config so that it is available from "condition strings" + session.config.VALKEY_INFO = VALKEY_INFO # module info try: - REDIS_INFO["modules"] = info["modules"] - except (KeyError, redis.exceptions.ConnectionError): + VALKEY_INFO["modules"] = info["modules"] + except (KeyError, valkey.exceptions.ConnectionError): pass if cluster_enabled: - cluster_nodes = session.config.getoption("--redis-cluster-nodes") - wait_for_cluster_creation(redis_url, cluster_nodes) + cluster_nodes = session.config.getoption("--valkey-cluster-nodes") + wait_for_cluster_creation(valkey_url, cluster_nodes) use_uvloop = session.config.getoption("--uvloop") @@ -179,12 +179,12 @@ def pytest_sessionstart(session): ) from e -def wait_for_cluster_creation(redis_url, cluster_nodes, timeout=60): +def wait_for_cluster_creation(valkey_url, cluster_nodes, timeout=60): """ Waits for the cluster creation to complete. As soon as all :cluster_nodes: nodes become available, the cluster will be considered ready. - :param redis_url: the cluster's url, e.g. redis://localhost:16379/0 + :param valkey_url: the cluster's url, e.g. valkey://localhost:16379/0 :param cluster_nodes: The number of nodes in the cluster :param timeout: the amount of time to wait (in seconds) """ @@ -194,67 +194,67 @@ def wait_for_cluster_creation(redis_url, cluster_nodes, timeout=60): print(f"Waiting for {cluster_nodes} cluster nodes to become available") while now < end_time: try: - client = redis.RedisCluster.from_url(redis_url) + client = valkey.ValkeyCluster.from_url(valkey_url) if len(client.get_nodes()) == int(cluster_nodes): print("All nodes are available!") break - except RedisClusterException: + except ValkeyClusterException: pass time.sleep(1) now = time.time() if now >= end_time: available_nodes = 0 if client is None else len(client.get_nodes()) - raise RedisClusterException( + raise ValkeyClusterException( f"The cluster did not become available after {timeout} seconds. " f"Only {available_nodes} nodes out of {cluster_nodes} are available" ) def skip_if_server_version_lt(min_version: str) -> _TestDecorator: - redis_version = REDIS_INFO.get("version", "0") - check = Version(redis_version) < Version(min_version) - return pytest.mark.skipif(check, reason=f"Redis version required >= {min_version}") + valkey_version = VALKEY_INFO.get("version", "0") + check = Version(valkey_version) < Version(min_version) + return pytest.mark.skipif(check, reason=f"Valkey version required >= {min_version}") def skip_if_server_version_gte(min_version: str) -> _TestDecorator: - redis_version = REDIS_INFO.get("version", "0") - check = Version(redis_version) >= Version(min_version) - return pytest.mark.skipif(check, reason=f"Redis version required < {min_version}") + valkey_version = VALKEY_INFO.get("version", "0") + check = Version(valkey_version) >= Version(min_version) + return pytest.mark.skipif(check, reason=f"Valkey version required < {min_version}") def skip_unless_arch_bits(arch_bits: int) -> _TestDecorator: return pytest.mark.skipif( - REDIS_INFO.get("arch_bits", "") != arch_bits, + VALKEY_INFO.get("arch_bits", "") != arch_bits, reason=f"server is not {arch_bits}-bit", ) def skip_ifmodversion_lt(min_version: str, module_name: str): try: - modules = REDIS_INFO["modules"] + modules = VALKEY_INFO["modules"] except KeyError: - return pytest.mark.skipif(True, reason="Redis server does not have modules") + return pytest.mark.skipif(True, reason="Valkey server does not have modules") if modules == []: - return pytest.mark.skipif(True, reason="No redis modules found") + return pytest.mark.skipif(True, reason="No valkey modules found") for j in modules: if module_name == j.get("name"): version = j.get("ver") mv = int(min_version.replace(".", "")) check = version < mv - return pytest.mark.skipif(check, reason="Redis module version") + return pytest.mark.skipif(check, reason="Valkey module version") - raise AttributeError(f"No redis module named {module_name}") + raise AttributeError(f"No valkey module named {module_name}") -def skip_if_redis_enterprise() -> _TestDecorator: - check = REDIS_INFO.get("enterprise", False) is True - return pytest.mark.skipif(check, reason="Redis enterprise") +def skip_if_valkey_enterprise() -> _TestDecorator: + check = VALKEY_INFO.get("enterprise", False) is True + return pytest.mark.skipif(check, reason="Valkey enterprise") -def skip_ifnot_redis_enterprise() -> _TestDecorator: - check = REDIS_INFO.get("enterprise", False) is False - return pytest.mark.skipif(check, reason="Not running in redis enterprise") +def skip_ifnot_valkey_enterprise() -> _TestDecorator: + check = VALKEY_INFO.get("enterprise", False) is False + return pytest.mark.skipif(check, reason="Not running in valkey enterprise") def skip_if_nocryptography() -> _TestDecorator: @@ -279,27 +279,27 @@ def _get_client( cls, request, single_connection_client=True, flushdb=True, from_url=None, **kwargs ): """ - Helper for fixtures or tests that need a Redis client + Helper for fixtures or tests that need a Valkey client - Uses the "--redis-url" command line argument for connection info. Unlike + Uses the "--valkey-url" command line argument for connection info. Unlike ConnectionPool.from_url, keyword arguments to this function override values specified in the URL. """ if from_url is None: - redis_url = request.config.getoption("--redis-url") + valkey_url = request.config.getoption("--valkey-url") else: - redis_url = from_url - if "protocol" not in redis_url and kwargs.get("protocol") is None: + valkey_url = from_url + if "protocol" not in valkey_url and kwargs.get("protocol") is None: kwargs["protocol"] = request.config.getoption("--protocol") - cluster_mode = REDIS_INFO["cluster_enabled"] + cluster_mode = VALKEY_INFO["cluster_enabled"] if not cluster_mode: - url_options = parse_url(redis_url) + url_options = parse_url(valkey_url) url_options.update(kwargs) - pool = redis.ConnectionPool(**url_options) + pool = valkey.ConnectionPool(**url_options) client = cls(connection_pool=pool) else: - client = redis.RedisCluster.from_url(redis_url, **kwargs) + client = valkey.ValkeyCluster.from_url(valkey_url, **kwargs) single_connection_client = False if single_connection_client: client = client.client() @@ -310,7 +310,7 @@ def teardown(): if flushdb: try: client.flushdb() - except redis.ConnectionError: + except valkey.ConnectionError: # handle cases where a test disconnected a client # just manually retry the flushdb client.flushdb() @@ -327,7 +327,7 @@ def cluster_teardown(client, flushdb): if flushdb: try: client.flushdb(target_nodes="primaries") - except redis.ConnectionError: + except valkey.ConnectionError: # handle cases where a test disconnected a client # just manually retry the flushdb client.flushdb(target_nodes="primaries") @@ -337,32 +337,32 @@ def cluster_teardown(client, flushdb): @pytest.fixture() def r(request): - with _get_client(redis.Redis, request) as client: + with _get_client(valkey.Valkey, request) as client: yield client @pytest.fixture() def decoded_r(request): - with _get_client(redis.Redis, request, decode_responses=True) as client: + with _get_client(valkey.Valkey, request, decode_responses=True) as client: yield client @pytest.fixture() def r_timeout(request): - with _get_client(redis.Redis, request, socket_timeout=1) as client: + with _get_client(valkey.Valkey, request, socket_timeout=1) as client: yield client @pytest.fixture() def r2(request): "A second client for tests that need multiple" - with _get_client(redis.Redis, request) as client: + with _get_client(valkey.Valkey, request) as client: yield client @pytest.fixture() def sslclient(request): - with _get_client(redis.Redis, request, ssl=True) as client: + with _get_client(valkey.Valkey, request, ssl=True) as client: yield client @@ -405,19 +405,19 @@ def _gen_cluster_mock_resp(r, response): @pytest.fixture() def mock_cluster_resp_ok(request, **kwargs): - r = _get_client(redis.Redis, request, **kwargs) + r = _get_client(valkey.Valkey, request, **kwargs) yield from _gen_cluster_mock_resp(r, "OK") @pytest.fixture() def mock_cluster_resp_int(request, **kwargs): - r = _get_client(redis.Redis, request, **kwargs) + r = _get_client(valkey.Valkey, request, **kwargs) yield from _gen_cluster_mock_resp(r, 2) @pytest.fixture() def mock_cluster_resp_info(request, **kwargs): - r = _get_client(redis.Redis, request, **kwargs) + r = _get_client(valkey.Valkey, request, **kwargs) response = ( "cluster_state:ok\r\ncluster_slots_assigned:16384\r\n" "cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n" @@ -431,7 +431,7 @@ def mock_cluster_resp_info(request, **kwargs): @pytest.fixture() def mock_cluster_resp_nodes(request, **kwargs): - r = _get_client(redis.Redis, request, **kwargs) + r = _get_client(valkey.Valkey, request, **kwargs) response = ( "c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 " "slave aa90da731f673a99617dfe930306549a09f83a6b 0 " @@ -455,7 +455,7 @@ def mock_cluster_resp_nodes(request, **kwargs): @pytest.fixture() def mock_cluster_resp_slaves(request, **kwargs): - r = _get_client(redis.Redis, request, **kwargs) + r = _get_client(valkey.Valkey, request, **kwargs) response = ( "['1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 " "slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 " @@ -466,7 +466,7 @@ def mock_cluster_resp_slaves(request, **kwargs): @pytest.fixture(scope="session") def master_host(request): - url = request.config.getoption("--redis-url") + url = request.config.getoption("--valkey-url") parts = urlparse(url) return parts.hostname, (parts.port or 6379) @@ -477,12 +477,12 @@ def wait_for_command(client, monitor, command, key=None): # for, something went wrong if key is None: # generate key - redis_version = REDIS_INFO["version"] - if Version(redis_version) >= Version("5.0.0"): + valkey_version = VALKEY_INFO["version"] + if Version(valkey_version) >= Version("5.0.0"): id_str = str(client.client_id()) else: id_str = f"{random.randrange(2 ** 32):08x}" - key = f"__REDIS-PY-{id_str}__" + key = f"__VALKEY-PY-{id_str}__" client.get(key) while True: monitor_response = monitor.next_command() @@ -493,17 +493,17 @@ def wait_for_command(client, monitor, command, key=None): def is_resp2_connection(r): - if isinstance(r, redis.Redis) or isinstance(r, redis.asyncio.Redis): + if isinstance(r, valkey.Valkey) or isinstance(r, valkey.asyncio.Valkey): protocol = r.connection_pool.connection_kwargs.get("protocol") - elif isinstance(r, redis.cluster.AbstractRedisCluster): + elif isinstance(r, valkey.cluster.AbstractValkeyCluster): protocol = r.nodes_manager.connection_kwargs.get("protocol") return protocol in ["2", 2, None] def get_protocol_version(r): - if isinstance(r, redis.Redis) or isinstance(r, redis.asyncio.Redis): + if isinstance(r, valkey.Valkey) or isinstance(r, valkey.asyncio.Valkey): return r.connection_pool.connection_kwargs.get("protocol") - elif isinstance(r, redis.cluster.AbstractRedisCluster): + elif isinstance(r, valkey.cluster.AbstractValkeyCluster): return r.nodes_manager.connection_kwargs.get("protocol") diff --git a/tests/test_asyncio/conftest.py b/tests/test_asyncio/conftest.py index cff239fa..55fd1016 100644 --- a/tests/test_asyncio/conftest.py +++ b/tests/test_asyncio/conftest.py @@ -4,22 +4,22 @@ import pytest import pytest_asyncio -import redis.asyncio as redis +import valkey.asyncio as valkey from packaging.version import Version -from redis._parsers import _AsyncHiredisParser, _AsyncRESP2Parser -from redis.asyncio import Sentinel -from redis.asyncio.client import Monitor -from redis.asyncio.connection import Connection, parse_url -from redis.asyncio.retry import Retry -from redis.backoff import NoBackoff -from redis.utils import HIREDIS_AVAILABLE -from tests.conftest import REDIS_INFO +from tests.conftest import VALKEY_INFO +from valkey._parsers import _AsyncHiredisParser, _AsyncRESP2Parser +from valkey.asyncio import Sentinel +from valkey.asyncio.client import Monitor +from valkey.asyncio.connection import Connection, parse_url +from valkey.asyncio.retry import Retry +from valkey.backoff import NoBackoff +from valkey.utils import HIREDIS_AVAILABLE from .compat import mock -async def _get_info(redis_url): - client = redis.Redis.from_url(redis_url) +async def _get_info(valkey_url): + client = valkey.Valkey.from_url(valkey_url) info = await client.info() await client.connection_pool.disconnect() return info @@ -30,7 +30,7 @@ async def _get_info(redis_url): pytest.param( (True, _AsyncRESP2Parser), marks=pytest.mark.skipif( - 'config.REDIS_INFO["cluster_enabled"]', reason="cluster mode enabled" + 'config.VALKEY_INFO["cluster_enabled"]', reason="cluster mode enabled" ), ), (False, _AsyncRESP2Parser), @@ -38,7 +38,7 @@ async def _get_info(redis_url): (True, _AsyncHiredisParser), marks=[ pytest.mark.skipif( - 'config.REDIS_INFO["cluster_enabled"]', + 'config.VALKEY_INFO["cluster_enabled"]', reason="cluster mode enabled", ), pytest.mark.skipif( @@ -60,31 +60,31 @@ async def _get_info(redis_url): "pool-hiredis", ], ) -async def create_redis(request): - """Wrapper around redis.create_redis.""" +async def create_valkey(request): + """Wrapper around valkey.create_valkey.""" single_connection, parser_cls = request.param teardown_clients = [] async def client_factory( - url: str = request.config.getoption("--redis-url"), - cls=redis.Redis, + url: str = request.config.getoption("--valkey-url"), + cls=valkey.Valkey, flushdb=True, **kwargs, ): if "protocol" not in url and kwargs.get("protocol") is None: kwargs["protocol"] = request.config.getoption("--protocol") - cluster_mode = REDIS_INFO["cluster_enabled"] + cluster_mode = VALKEY_INFO["cluster_enabled"] if not cluster_mode: single = kwargs.pop("single_connection_client", False) or single_connection parser_class = kwargs.pop("parser_class", None) or parser_cls url_options = parse_url(url) url_options.update(kwargs) - pool = redis.ConnectionPool(parser_class=parser_class, **url_options) + pool = valkey.ConnectionPool(parser_class=parser_class, **url_options) client = cls(connection_pool=pool) else: - client = redis.RedisCluster.from_url(url, **kwargs) + client = valkey.ValkeyCluster.from_url(url, **kwargs) await client.initialize() single = False if single: @@ -96,7 +96,7 @@ async def teardown(): if flushdb and "username" not in kwargs: try: await client.flushdb() - except redis.ConnectionError: + except valkey.ConnectionError: # handle cases where a test disconnected a client # just manually retry the flushdb await client.flushdb() @@ -106,7 +106,7 @@ async def teardown(): if flushdb: try: await client.flushdb(target_nodes="primaries") - except redis.ConnectionError: + except valkey.ConnectionError: # handle cases where a test disconnected a client # just manually retry the flushdb await client.flushdb(target_nodes="primaries") @@ -122,19 +122,19 @@ async def teardown(): @pytest_asyncio.fixture() -async def r(create_redis): - return await create_redis() +async def r(create_valkey): + return await create_valkey() @pytest_asyncio.fixture() -async def r2(create_redis): +async def r2(create_valkey): """A second client for tests that need multiple""" - return await create_redis() + return await create_valkey() @pytest_asyncio.fixture() -async def decoded_r(create_redis): - return await create_redis(decode_responses=True) +async def decoded_r(create_valkey): + return await create_valkey(decode_responses=True) @pytest_asyncio.fixture() @@ -175,22 +175,22 @@ def _gen_cluster_mock_resp(r, response): @pytest_asyncio.fixture() -async def mock_cluster_resp_ok(create_redis, **kwargs): - r = await create_redis(**kwargs) +async def mock_cluster_resp_ok(create_valkey, **kwargs): + r = await create_valkey(**kwargs) for mocked in _gen_cluster_mock_resp(r, "OK"): yield mocked @pytest_asyncio.fixture() -async def mock_cluster_resp_int(create_redis, **kwargs): - r = await create_redis(**kwargs) +async def mock_cluster_resp_int(create_valkey, **kwargs): + r = await create_valkey(**kwargs) for mocked in _gen_cluster_mock_resp(r, 2): yield mocked @pytest_asyncio.fixture() -async def mock_cluster_resp_info(create_redis, **kwargs): - r = await create_redis(**kwargs) +async def mock_cluster_resp_info(create_valkey, **kwargs): + r = await create_valkey(**kwargs) response = ( "cluster_state:ok\r\ncluster_slots_assigned:16384\r\n" "cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n" @@ -204,8 +204,8 @@ async def mock_cluster_resp_info(create_redis, **kwargs): @pytest_asyncio.fixture() -async def mock_cluster_resp_nodes(create_redis, **kwargs): - r = await create_redis(**kwargs) +async def mock_cluster_resp_nodes(create_valkey, **kwargs): + r = await create_valkey(**kwargs) response = ( "c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 " "slave aa90da731f673a99617dfe930306549a09f83a6b 0 " @@ -229,8 +229,8 @@ async def mock_cluster_resp_nodes(create_redis, **kwargs): @pytest_asyncio.fixture() -async def mock_cluster_resp_slaves(create_redis, **kwargs): - r = await create_redis(**kwargs) +async def mock_cluster_resp_slaves(create_valkey, **kwargs): + r = await create_valkey(**kwargs) response = ( "['1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 " "slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 " @@ -241,19 +241,19 @@ async def mock_cluster_resp_slaves(create_redis, **kwargs): async def wait_for_command( - client: redis.Redis, monitor: Monitor, command: str, key: Union[str, None] = None + client: valkey.Valkey, monitor: Monitor, command: str, key: Union[str, None] = None ): # issue a command with a key name that's local to this process. # if we find a command with our key before the command we're waiting # for, something went wrong if key is None: # generate key - redis_version = REDIS_INFO["version"] - if Version(redis_version) >= Version("5.0.0"): + valkey_version = VALKEY_INFO["version"] + if Version(valkey_version) >= Version("5.0.0"): id_str = str(await client.client_id()) else: id_str = f"{random.randrange(2 ** 32):08x}" - key = f"__REDIS-PY-{id_str}__" + key = f"__VALKEY-PY-{id_str}__" await client.get(key) while True: monitor_response = await monitor.next_command() @@ -291,11 +291,11 @@ def asynccontextmanager(func): # helpers to get the connection arguments for this run @pytest.fixture() -def redis_url(request): - return request.config.getoption("--redis-url") +def valkey_url(request): + return request.config.getoption("--valkey-url") @pytest.fixture() def connect_args(request): - url = request.config.getoption("--redis-url") + url = request.config.getoption("--valkey-url") return parse_url(url) diff --git a/tests/test_asyncio/test_bloom.py b/tests/test_asyncio/test_bloom.py index 27884441..b19f3d24 100644 --- a/tests/test_asyncio/test_bloom.py +++ b/tests/test_asyncio/test_bloom.py @@ -1,21 +1,23 @@ from math import inf import pytest -import redis.asyncio as redis -from redis.exceptions import ModuleError, RedisError -from redis.utils import HIREDIS_AVAILABLE +import valkey.asyncio as valkey from tests.conftest import ( assert_resp_response, is_resp2_connection, skip_ifmodversion_lt, ) +from valkey.exceptions import ModuleError, ValkeyError +from valkey.utils import HIREDIS_AVAILABLE + +pytestmark = pytest.mark.skip def intlist(obj): return [int(v) for v in obj] -async def test_create(decoded_r: redis.Redis): +async def test_create(decoded_r: valkey.Valkey): """Test CREATE/RESERVE calls""" assert await decoded_r.bf().create("bloom", 0.01, 1000) assert await decoded_r.bf().create("bloom_e", 0.01, 1000, expansion=1) @@ -30,11 +32,11 @@ async def test_create(decoded_r: redis.Redis): @pytest.mark.experimental -async def test_tdigest_create(decoded_r: redis.Redis): +async def test_tdigest_create(decoded_r: valkey.Valkey): assert await decoded_r.tdigest().create("tDigest", 100) -async def test_bf_add(decoded_r: redis.Redis): +async def test_bf_add(decoded_r: valkey.Valkey): assert await decoded_r.bf().create("bloom", 0.01, 1000) assert 1 == await decoded_r.bf().add("bloom", "foo") assert 0 == await decoded_r.bf().add("bloom", "foo") @@ -46,7 +48,7 @@ async def test_bf_add(decoded_r: redis.Redis): assert [1, 0] == intlist(await decoded_r.bf().mexists("bloom", "foo", "noexist")) -async def test_bf_insert(decoded_r: redis.Redis): +async def test_bf_insert(decoded_r: valkey.Valkey): assert await decoded_r.bf().create("bloom", 0.01, 1000) assert [1] == intlist(await decoded_r.bf().insert("bloom", ["foo"])) assert [0, 1] == intlist(await decoded_r.bf().insert("bloom", ["foo", "bar"])) @@ -76,7 +78,7 @@ async def test_bf_insert(decoded_r: redis.Redis): ) -async def test_bf_scandump_and_loadchunk(decoded_r: redis.Redis): +async def test_bf_scandump_and_loadchunk(decoded_r: valkey.Valkey): # Store a filter await decoded_r.bf().create("myBloom", "0.0001", "1000") @@ -127,7 +129,7 @@ async def do_verify(): await decoded_r.bf().create("myBloom", "0.0001", "10000000") -async def test_bf_info(decoded_r: redis.Redis): +async def test_bf_info(decoded_r: valkey.Valkey): expansion = 4 # Store a filter await decoded_r.bf().create("nonscaling", "0.0001", "1000", noScale=True) @@ -154,11 +156,11 @@ async def test_bf_info(decoded_r: redis.Redis): "myBloom", "0.0001", "1000", expansion=expansion, noScale=True ) assert False - except RedisError: + except ValkeyError: assert True -async def test_bf_card(decoded_r: redis.Redis): +async def test_bf_card(decoded_r: valkey.Valkey): # return 0 if the key does not exist assert await decoded_r.bf().card("not_exist") == 0 @@ -167,12 +169,12 @@ async def test_bf_card(decoded_r: redis.Redis): assert await decoded_r.bf().card("bf1") == 1 # Error when key is of a type other than Bloom filtedecoded_r. - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): await decoded_r.set("setKey", "value") await decoded_r.bf().card("setKey") -async def test_cf_add_and_insert(decoded_r: redis.Redis): +async def test_cf_add_and_insert(decoded_r: valkey.Valkey): assert await decoded_r.cf().create("cuckoo", 1000) assert await decoded_r.cf().add("cuckoo", "filter") assert not await decoded_r.cf().addnx("cuckoo", "filter") @@ -197,7 +199,7 @@ async def test_cf_add_and_insert(decoded_r: redis.Redis): ) -async def test_cf_exists_and_del(decoded_r: redis.Redis): +async def test_cf_exists_and_del(decoded_r: valkey.Valkey): assert await decoded_r.cf().create("cuckoo", 1000) assert await decoded_r.cf().add("cuckoo", "filter") assert await decoded_r.cf().exists("cuckoo", "filter") @@ -208,7 +210,7 @@ async def test_cf_exists_and_del(decoded_r: redis.Redis): assert 0 == await decoded_r.cf().count("cuckoo", "filter") -async def test_cms(decoded_r: redis.Redis): +async def test_cms(decoded_r: valkey.Valkey): assert await decoded_r.cms().initbydim("dim", 1000, 5) assert await decoded_r.cms().initbyprob("prob", 0.01, 0.01) assert await decoded_r.cms().incrby("dim", ["foo"], [5]) @@ -224,7 +226,7 @@ async def test_cms(decoded_r: redis.Redis): @pytest.mark.onlynoncluster -async def test_cms_merge(decoded_r: redis.Redis): +async def test_cms_merge(decoded_r: valkey.Valkey): assert await decoded_r.cms().initbydim("A", 1000, 5) assert await decoded_r.cms().initbydim("B", 1000, 5) assert await decoded_r.cms().initbydim("C", 1000, 5) @@ -240,7 +242,7 @@ async def test_cms_merge(decoded_r: redis.Redis): assert [16, 15, 21] == await decoded_r.cms().query("C", "foo", "bar", "baz") -async def test_topk(decoded_r: redis.Redis): +async def test_topk(decoded_r: valkey.Valkey): # test list with empty buckets assert await decoded_r.topk().reserve("topk", 3, 50, 4, 0.9) assert [ @@ -320,7 +322,7 @@ async def test_topk(decoded_r: redis.Redis): assert 0.9 == round(float(info["decay"]), 1) -async def test_topk_incrby(decoded_r: redis.Redis): +async def test_topk_incrby(decoded_r: valkey.Valkey): await decoded_r.flushdb() assert await decoded_r.topk().reserve("topk", 3, 10, 3, 1) assert [None, None, None] == await decoded_r.topk().incrby( @@ -335,7 +337,7 @@ async def test_topk_incrby(decoded_r: redis.Redis): @pytest.mark.experimental -async def test_tdigest_reset(decoded_r: redis.Redis): +async def test_tdigest_reset(decoded_r: valkey.Valkey): assert await decoded_r.tdigest().create("tDigest", 10) # reset on empty histogram assert await decoded_r.tdigest().reset("tDigest") @@ -351,7 +353,7 @@ async def test_tdigest_reset(decoded_r: redis.Redis): @pytest.mark.onlynoncluster -async def test_tdigest_merge(decoded_r: redis.Redis): +async def test_tdigest_merge(decoded_r: valkey.Valkey): assert await decoded_r.tdigest().create("to-tDigest", 10) assert await decoded_r.tdigest().create("from-tDigest", 10) # insert data-points into sketch @@ -378,7 +380,7 @@ async def test_tdigest_merge(decoded_r: redis.Redis): @pytest.mark.experimental -async def test_tdigest_min_and_max(decoded_r: redis.Redis): +async def test_tdigest_min_and_max(decoded_r: valkey.Valkey): assert await decoded_r.tdigest().create("tDigest", 100) # insert data-points into sketch assert await decoded_r.tdigest().add("tDigest", [1, 2, 3]) @@ -389,7 +391,7 @@ async def test_tdigest_min_and_max(decoded_r: redis.Redis): @pytest.mark.experimental @skip_ifmodversion_lt("2.4.0", "bf") -async def test_tdigest_quantile(decoded_r: redis.Redis): +async def test_tdigest_quantile(decoded_r: valkey.Valkey): assert await decoded_r.tdigest().create("tDigest", 500) # insert data-points into sketch assert await decoded_r.tdigest().add( @@ -416,7 +418,7 @@ async def test_tdigest_quantile(decoded_r: redis.Redis): @pytest.mark.experimental -async def test_tdigest_cdf(decoded_r: redis.Redis): +async def test_tdigest_cdf(decoded_r: valkey.Valkey): assert await decoded_r.tdigest().create("tDigest", 100) # insert data-points into sketch assert await decoded_r.tdigest().add("tDigest", list(range(1, 10))) @@ -428,7 +430,7 @@ async def test_tdigest_cdf(decoded_r: redis.Redis): @pytest.mark.experimental @skip_ifmodversion_lt("2.4.0", "bf") -async def test_tdigest_trimmed_mean(decoded_r: redis.Redis): +async def test_tdigest_trimmed_mean(decoded_r: valkey.Valkey): assert await decoded_r.tdigest().create("tDigest", 100) # insert data-points into sketch assert await decoded_r.tdigest().add("tDigest", list(range(1, 10))) @@ -437,7 +439,7 @@ async def test_tdigest_trimmed_mean(decoded_r: redis.Redis): @pytest.mark.experimental -async def test_tdigest_rank(decoded_r: redis.Redis): +async def test_tdigest_rank(decoded_r: valkey.Valkey): assert await decoded_r.tdigest().create("t-digest", 500) assert await decoded_r.tdigest().add("t-digest", list(range(0, 20))) assert -1 == (await decoded_r.tdigest().rank("t-digest", -1))[0] @@ -447,7 +449,7 @@ async def test_tdigest_rank(decoded_r: redis.Redis): @pytest.mark.experimental -async def test_tdigest_revrank(decoded_r: redis.Redis): +async def test_tdigest_revrank(decoded_r: valkey.Valkey): assert await decoded_r.tdigest().create("t-digest", 500) assert await decoded_r.tdigest().add("t-digest", list(range(0, 20))) assert -1 == (await decoded_r.tdigest().revrank("t-digest", 20))[0] @@ -456,28 +458,28 @@ async def test_tdigest_revrank(decoded_r: redis.Redis): @pytest.mark.experimental -async def test_tdigest_byrank(decoded_r: redis.Redis): +async def test_tdigest_byrank(decoded_r: valkey.Valkey): assert await decoded_r.tdigest().create("t-digest", 500) assert await decoded_r.tdigest().add("t-digest", list(range(1, 11))) assert 1 == (await decoded_r.tdigest().byrank("t-digest", 0))[0] assert 10 == (await decoded_r.tdigest().byrank("t-digest", 9))[0] assert (await decoded_r.tdigest().byrank("t-digest", 100))[0] == inf - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): (await decoded_r.tdigest().byrank("t-digest", -1))[0] @pytest.mark.experimental -async def test_tdigest_byrevrank(decoded_r: redis.Redis): +async def test_tdigest_byrevrank(decoded_r: valkey.Valkey): assert await decoded_r.tdigest().create("t-digest", 500) assert await decoded_r.tdigest().add("t-digest", list(range(1, 11))) assert 10 == (await decoded_r.tdigest().byrevrank("t-digest", 0))[0] assert 1 == (await decoded_r.tdigest().byrevrank("t-digest", 9))[0] assert (await decoded_r.tdigest().byrevrank("t-digest", 100))[0] == -inf - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): (await decoded_r.tdigest().byrevrank("t-digest", -1))[0] -# # async def test_pipeline(decoded_r: redis.Redis): +# # async def test_pipeline(decoded_r: valkey.Valkey): # pipeline = await decoded_r.bf().pipeline() # assert not await decoded_r.bf().execute_command("get pipeline") # diff --git a/tests/test_asyncio/test_cache.py b/tests/test_asyncio/test_cache.py index 7a7f881c..13092a23 100644 --- a/tests/test_asyncio/test_cache.py +++ b/tests/test_asyncio/test_cache.py @@ -2,15 +2,15 @@ import pytest import pytest_asyncio -from redis._cache import EvictionPolicy, _LocalCache -from redis.utils import HIREDIS_AVAILABLE +from valkey._cache import EvictionPolicy, _LocalCache +from valkey.utils import HIREDIS_AVAILABLE @pytest_asyncio.fixture -async def r(request, create_redis): +async def r(request, create_valkey): cache = request.param.get("cache") kwargs = request.param.get("kwargs", {}) - r = await create_redis(protocol=3, client_cache=cache, **kwargs) + r = await create_valkey(protocol=3, client_cache=cache, **kwargs) yield r, cache @@ -25,29 +25,29 @@ class TestLocalCache: @pytest.mark.onlynoncluster async def test_get_from_cache(self, r, r2): r, cache = r - # add key to redis + # add key to valkey await r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert await r.get("foo") == b"bar" # get key from local cache assert cache.get(("GET", "foo")) == b"bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) await r2.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) await r.ping() # the command is not in the local cache anymore assert cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert await r.get("foo") == b"barbar" @pytest.mark.parametrize("r", [{"cache": _LocalCache(max_size=3)}], indirect=True) async def test_cache_lru_eviction(self, r): r, cache = r - # add 3 keys to redis + # add 3 keys to valkey await r.set("foo", "bar") await r.set("foo2", "bar2") await r.set("foo3", "bar3") - # get 3 keys from redis and save in local cache + # get 3 keys from valkey and save in local cache assert await r.get("foo") == b"bar" assert await r.get("foo2") == b"bar2" assert await r.get("foo3") == b"bar3" @@ -55,7 +55,7 @@ async def test_cache_lru_eviction(self, r): assert cache.get(("GET", "foo")) == b"bar" assert cache.get(("GET", "foo2")) == b"bar2" assert cache.get(("GET", "foo3")) == b"bar3" - # add 1 more key to redis (exceed the max size) + # add 1 more key to valkey (exceed the max size) await r.set("foo4", "bar4") assert await r.get("foo4") == b"bar4" # the first key is not in the local cache anymore @@ -64,9 +64,9 @@ async def test_cache_lru_eviction(self, r): @pytest.mark.parametrize("r", [{"cache": _LocalCache(ttl=1)}], indirect=True) async def test_cache_ttl(self, r): r, cache = r - # add key to redis + # add key to valkey await r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert await r.get("foo") == b"bar" # get key from local cache assert cache.get(("GET", "foo")) == b"bar" @@ -82,11 +82,11 @@ async def test_cache_ttl(self, r): ) async def test_cache_lfu_eviction(self, r): r, cache = r - # add 3 keys to redis + # add 3 keys to valkey await r.set("foo", "bar") await r.set("foo2", "bar2") await r.set("foo3", "bar3") - # get 3 keys from redis and save in local cache + # get 3 keys from valkey and save in local cache assert await r.get("foo") == b"bar" assert await r.get("foo2") == b"bar2" assert await r.get("foo3") == b"bar3" @@ -94,7 +94,7 @@ async def test_cache_lfu_eviction(self, r): assert cache.get(("GET", "foo")) == b"bar" assert cache.get(("GET", "foo")) == b"bar" assert cache.get(("GET", "foo3")) == b"bar3" - # add 1 more key to redis (exceed the max size) + # add 1 more key to valkey (exceed the max size) await r.set("foo4", "bar4") assert await r.get("foo4") == b"bar4" # test the eviction policy @@ -111,17 +111,17 @@ async def test_cache_lfu_eviction(self, r): async def test_cache_decode_response(self, r): r, cache = r await r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert await r.get("foo") == "bar" # get key from local cache assert cache.get(("GET", "foo")) == "bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) await r.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) await r.ping() # the command is not in the local cache anymore assert cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert await r.get("foo") == "barbar" @pytest.mark.parametrize( @@ -131,7 +131,7 @@ async def test_cache_decode_response(self, r): ) async def test_cache_deny_list(self, r): r, cache = r - # add list to redis + # add list to valkey await r.lpush("mylist", "foo", "bar", "baz") assert await r.llen("mylist") == 3 assert await r.lindex("mylist", 1) == b"bar" @@ -145,7 +145,7 @@ async def test_cache_deny_list(self, r): ) async def test_cache_allow_list(self, r): r, cache = r - # add list to redis + # add list to valkey await r.lpush("mylist", "foo", "bar", "baz") assert await r.llen("mylist") == 3 assert await r.lindex("mylist", 1) == b"bar" @@ -188,7 +188,7 @@ async def test_csc_not_cause_disconnects(self, r): await r.mset({"a": 2, "b": 2, "c": 2, "d": 2, "e": 2}) id3 = await r.client_id() - # client should get value from redis server post invalidate messages + # client should get value from valkey server post invalidate messages assert await r.mget("a", "b", "c", "d", "e") == ["2", "2", "2", "2", "2"] await r.mset({"a": 3, "b": 3, "c": 3, "d": 3, "e": 3}) @@ -248,7 +248,7 @@ async def test_delete_one_command(self, r): # the other command is still in the local cache anymore assert cache.get(("MGET", "a{a}", "b{a}")) is None assert cache.get(("GET", "c")) == "1" - # get from redis + # get from valkey assert await r.mget("a{a}", "b{a}") == ["1", "1"] assert await r.get("c") == "1" @@ -271,7 +271,7 @@ async def test_invalidate_key(self, r): # one other command is still in the local cache anymore assert cache.get(("MGET", "a{a}", "b{a}")) is None assert cache.get(("GET", "c")) == "1" - # get from redis + # get from valkey assert await r.mget("a{a}", "b{a}") == ["1", "1"] assert await r.get("c") == "1" @@ -294,7 +294,7 @@ async def test_flush_entire_cache(self, r): # the commands are not in the local cache anymore assert cache.get(("MGET", "a{a}", "b{a}")) is None assert cache.get(("GET", "c")) is None - # get from redis + # get from valkey assert await r.mget("a{a}", "b{a}") == ["1", "1"] assert await r.get("c") == "1" @@ -305,20 +305,20 @@ class TestClusterLocalCache: @pytest.mark.parametrize("r", [{"cache": _LocalCache()}], indirect=True) async def test_get_from_cache(self, r, r2): r, cache = r - # add key to redis + # add key to valkey await r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert await r.get("foo") == b"bar" # get key from local cache assert cache.get(("GET", "foo")) == b"bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) await r2.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) node = r.get_node_from_key("foo") await r.ping(target_nodes=node) # the command is not in the local cache anymore assert cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert await r.get("foo") == b"barbar" @pytest.mark.parametrize( @@ -329,18 +329,18 @@ async def test_get_from_cache(self, r, r2): async def test_cache_decode_response(self, r): r, cache = r await r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert await r.get("foo") == "bar" # get key from local cache assert cache.get(("GET", "foo")) == "bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) await r.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) node = r.get_node_from_key("foo") await r.ping(target_nodes=node) # the command is not in the local cache anymore assert cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert await r.get("foo") == "barbar" @pytest.mark.parametrize( @@ -374,17 +374,17 @@ class TestSentinelLocalCache: async def test_get_from_cache(self, local_cache, master): await master.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert await master.get("foo") == b"bar" # get key from local cache assert local_cache.get(("GET", "foo")) == b"bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) await master.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) await master.ping() # the command is not in the local cache anymore assert local_cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert await master.get("foo") == b"barbar" @pytest.mark.parametrize( @@ -394,15 +394,15 @@ async def test_get_from_cache(self, local_cache, master): ) async def test_cache_decode_response(self, local_cache, sentinel_setup, master): await master.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert await master.get("foo") == "bar" # get key from local cache assert local_cache.get(("GET", "foo")) == "bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) await master.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) await master.ping() # the command is not in the local cache anymore assert local_cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert await master.get("foo") == "barbar" diff --git a/tests/test_asyncio/test_cluster.py b/tests/test_asyncio/test_cluster.py index 0d9510c6..3e8dead6 100644 --- a/tests/test_asyncio/test_cluster.py +++ b/tests/test_asyncio/test_cluster.py @@ -9,14 +9,22 @@ import pytest import pytest_asyncio from _pytest.fixtures import FixtureRequest -from redis._parsers import AsyncCommandsParser -from redis.asyncio.cluster import ClusterNode, NodesManager, RedisCluster -from redis.asyncio.connection import Connection, SSLConnection, async_timeout -from redis.asyncio.retry import Retry -from redis.backoff import ExponentialBackoff, NoBackoff, default_backoff -from redis.cluster import PIPELINE_BLOCKED_COMMANDS, PRIMARY, REPLICA, get_node_name -from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot -from redis.exceptions import ( +from tests.conftest import ( + assert_resp_response, + is_resp2_connection, + skip_if_server_version_gte, + skip_if_server_version_lt, + skip_if_valkey_enterprise, + skip_unless_arch_bits, +) +from valkey._parsers import AsyncCommandsParser +from valkey.asyncio.cluster import ClusterNode, NodesManager, ValkeyCluster +from valkey.asyncio.connection import Connection, SSLConnection, async_timeout +from valkey.asyncio.retry import Retry +from valkey.backoff import ExponentialBackoff, NoBackoff, default_backoff +from valkey.cluster import PIPELINE_BLOCKED_COMMANDS, PRIMARY, REPLICA, get_node_name +from valkey.crc import VALKEY_CLUSTER_HASH_SLOTS, key_slot +from valkey.exceptions import ( AskError, ClusterDownError, ConnectionError, @@ -24,19 +32,11 @@ MaxConnectionsError, MovedError, NoPermissionError, - RedisClusterException, - RedisError, ResponseError, + ValkeyClusterException, + ValkeyError, ) -from redis.utils import str_if_bytes -from tests.conftest import ( - assert_resp_response, - is_resp2_connection, - skip_if_redis_enterprise, - skip_if_server_version_gte, - skip_if_server_version_lt, - skip_unless_arch_bits, -) +from valkey.utils import str_if_bytes from ..ssl_utils import get_ssl_filename from .compat import aclosing, mock @@ -55,34 +55,34 @@ class NodeProxy: """A class to proxy a node connection to a different port""" - def __init__(self, addr, redis_addr): + def __init__(self, addr, valkey_addr): self.addr = addr - self.redis_addr = redis_addr + self.valkey_addr = valkey_addr self.send_event = asyncio.Event() self.server = None self.task = None self.n_connections = 0 async def start(self): - # test that we can connect to redis + # test that we can connect to valkey async with async_timeout(2): - _, redis_writer = await asyncio.open_connection(*self.redis_addr) - redis_writer.close() + _, valkey_writer = await asyncio.open_connection(*self.valkey_addr) + valkey_writer.close() self.server = await asyncio.start_server( self.handle, *self.addr, reuse_address=True ) self.task = asyncio.create_task(self.server.serve_forever()) async def handle(self, reader, writer): - # establish connection to redis - redis_reader, redis_writer = await asyncio.open_connection(*self.redis_addr) + # establish connection to valkey + valkey_reader, valkey_writer = await asyncio.open_connection(*self.valkey_addr) try: self.n_connections += 1 - pipe1 = asyncio.create_task(self.pipe(reader, redis_writer)) - pipe2 = asyncio.create_task(self.pipe(redis_reader, writer)) + pipe1 = asyncio.create_task(self.pipe(reader, valkey_writer)) + pipe2 = asyncio.create_task(self.pipe(valkey_reader, writer)) await asyncio.gather(pipe1, pipe2) finally: - redis_writer.close() + valkey_writer.close() async def aclose(self): self.task.cancel() @@ -106,7 +106,7 @@ async def pipe( @pytest_asyncio.fixture() -async def slowlog(r: RedisCluster) -> None: +async def slowlog(r: ValkeyCluster) -> None: """ Set the slowlog threshold to 0, and the max length to 128. This will force every @@ -128,11 +128,11 @@ async def slowlog(r: RedisCluster) -> None: await r.config_set("slowlog-max-len", old_max_length_value) -async def get_mocked_redis_client( +async def get_mocked_valkey_client( cluster_slots_raise_error=False, *args, **kwargs -) -> RedisCluster: +) -> ValkeyCluster: """ - Return a stable RedisCluster object that have deterministic + Return a stable ValkeyCluster object that have deterministic nodes and slots setup to remove the problem of different IP addresses on different installations and machines. """ @@ -178,7 +178,7 @@ def cmd_init_mock(self, r: ClusterNode) -> None: cmd_parser_initialize.side_effect = cmd_init_mock - return await RedisCluster(*args, **kwargs) + return await ValkeyCluster(*args, **kwargs) def mock_node_resp(node: ClusterNode, response: Any) -> ClusterNode: @@ -203,14 +203,14 @@ def mock_node_resp_exc(node: ClusterNode, exc: Exception) -> ClusterNode: return node -def mock_all_nodes_resp(rc: RedisCluster, response: Any) -> RedisCluster: +def mock_all_nodes_resp(rc: ValkeyCluster, response: Any) -> ValkeyCluster: for node in rc.get_nodes(): mock_node_resp(node, response) return rc async def moved_redirection_helper( - create_redis: Callable[..., RedisCluster], failover: bool = False + create_valkey: Callable[..., ValkeyCluster], failover: bool = False ) -> None: """ Test that the client handles MOVED response after a failover. @@ -228,7 +228,7 @@ async def moved_redirection_helper( 3. the redirected node's server type updated to 'primary' 4. the server type of the previous slot owner updated to 'replica' """ - rc = await create_redis(cls=RedisCluster, flushdb=False) + rc = await create_valkey(cls=ValkeyCluster, flushdb=False) slot = 12182 redirect_node = None # Get the current primary that holds this slot @@ -266,9 +266,9 @@ def ok_response(self, *args, **options): assert prev_primary.server_type == REPLICA -class TestRedisClusterObj: +class TestValkeyClusterObj: """ - Tests for the RedisCluster class + Tests for the ValkeyCluster class """ async def test_host_port_startup_node(self) -> None: @@ -276,13 +276,13 @@ async def test_host_port_startup_node(self) -> None: Test that it is possible to use host & port arguments as startup node args """ - cluster = await get_mocked_redis_client(host=default_host, port=default_port) + cluster = await get_mocked_valkey_client(host=default_host, port=default_port) assert cluster.get_node(host=default_host, port=default_port) is not None await cluster.aclose() async def test_aclosing(self) -> None: - cluster = await get_mocked_redis_client(host=default_host, port=default_port) + cluster = await get_mocked_valkey_client(host=default_host, port=default_port) called = 0 async def mock_aclose(): @@ -300,7 +300,7 @@ async def test_close_is_aclose(self) -> None: Test that it is possible to use host & port arguments as startup node args """ - cluster = await get_mocked_redis_client(host=default_host, port=default_port) + cluster = await get_mocked_valkey_client(host=default_host, port=default_port) called = 0 async def mock_aclose(): @@ -323,7 +323,7 @@ async def test_startup_nodes(self) -> None: ClusterNode(default_host, port_1), ClusterNode(default_host, port_2), ] - cluster = await get_mocked_redis_client(startup_nodes=startup_nodes) + cluster = await get_mocked_valkey_client(startup_nodes=startup_nodes) assert ( cluster.get_node(host=default_host, port=port_1) is not None and cluster.get_node(host=default_host, port=port_2) is not None @@ -332,7 +332,9 @@ async def test_startup_nodes(self) -> None: await cluster.aclose() startup_node = ClusterNode("127.0.0.1", 16379) - async with RedisCluster(startup_nodes=[startup_node], client_name="test") as rc: + async with ValkeyCluster( + startup_nodes=[startup_node], client_name="test" + ) as rc: assert await rc.set("A", 1) assert await rc.get("A") == b"1" assert all( @@ -346,8 +348,8 @@ async def test_startup_nodes(self) -> None: async def test_cluster_set_get_retry_object(self, request: FixtureRequest): retry = Retry(NoBackoff(), 2) - url = request.config.getoption("--redis-url") - async with RedisCluster.from_url(url, retry=retry) as r: + url = request.config.getoption("--valkey-url") + async with ValkeyCluster.from_url(url, retry=retry) as r: assert r.get_retry()._retries == retry._retries assert isinstance(r.get_retry()._backoff, NoBackoff) for node in r.get_nodes(): @@ -372,8 +374,8 @@ async def test_cluster_set_get_retry_object(self, request: FixtureRequest): assert new_conn.retry._retries == new_retry._retries async def test_cluster_retry_object(self, request: FixtureRequest) -> None: - url = request.config.getoption("--redis-url") - async with RedisCluster.from_url(url) as rc_default: + url = request.config.getoption("--valkey-url") + async with ValkeyCluster.from_url(url) as rc_default: # Test default retry retry = rc_default.connection_kwargs.get("retry") assert isinstance(retry, Retry) @@ -384,7 +386,7 @@ async def test_cluster_retry_object(self, request: FixtureRequest) -> None: ) == rc_default.get_node("127.0.0.1", 16380).connection_kwargs.get("retry") retry = Retry(ExponentialBackoff(10, 5), 5) - async with RedisCluster.from_url(url, retry=retry) as rc_custom_retry: + async with ValkeyCluster.from_url(url, retry=retry) as rc_custom_retry: # Test custom retry assert ( rc_custom_retry.get_node("127.0.0.1", 16379).connection_kwargs.get( @@ -393,7 +395,7 @@ async def test_cluster_retry_object(self, request: FixtureRequest) -> None: == retry ) - async with RedisCluster.from_url( + async with ValkeyCluster.from_url( url, connection_error_retry_attempts=0 ) as rc_no_retries: # Test no connection retries @@ -404,7 +406,7 @@ async def test_cluster_retry_object(self, request: FixtureRequest) -> None: is None ) - async with RedisCluster.from_url( + async with ValkeyCluster.from_url( url, retry=Retry(NoBackoff(), 0) ) as rc_no_retries: assert ( @@ -418,27 +420,27 @@ async def test_empty_startup_nodes(self) -> None: """ Test that exception is raised when empty providing empty startup_nodes """ - with pytest.raises(RedisClusterException) as ex: - RedisCluster(startup_nodes=[]) + with pytest.raises(ValkeyClusterException) as ex: + ValkeyCluster(startup_nodes=[]) assert str(ex.value).startswith( - "RedisCluster requires at least one node to discover the cluster" + "ValkeyCluster requires at least one node to discover the cluster" ), str_if_bytes(ex.value) async def test_from_url(self, request: FixtureRequest) -> None: - url = request.config.getoption("--redis-url") + url = request.config.getoption("--valkey-url") - async with RedisCluster.from_url(url) as rc: + async with ValkeyCluster.from_url(url) as rc: await rc.set("a", 1) await rc.get("a") == 1 - rc = RedisCluster.from_url("rediss://localhost:16379") + rc = ValkeyCluster.from_url("valkeys://localhost:16379") assert rc.connection_kwargs["connection_class"] is SSLConnection async def test_max_connections( - self, create_redis: Callable[..., RedisCluster] + self, create_valkey: Callable[..., ValkeyCluster] ) -> None: - rc = await create_redis(cls=RedisCluster, max_connections=10) + rc = await create_valkey(cls=ValkeyCluster, max_connections=10) for node in rc.get_nodes(): assert node.max_connections == 10 @@ -452,31 +454,31 @@ async def read_response_mocked(*args: Any, **kwargs: Any) -> None: with pytest.raises(MaxConnectionsError): await asyncio.gather( *( - rc.ping(target_nodes=RedisCluster.DEFAULT_NODE) + rc.ping(target_nodes=ValkeyCluster.DEFAULT_NODE) for _ in range(11) ) ) await rc.aclose() - async def test_execute_command_errors(self, r: RedisCluster) -> None: + async def test_execute_command_errors(self, r: ValkeyCluster) -> None: """ Test that if no key is provided then exception should be raised. """ - with pytest.raises(RedisClusterException) as ex: + with pytest.raises(ValkeyClusterException) as ex: await r.execute_command("GET") assert str(ex.value).startswith( - "No way to dispatch this command to Redis Cluster. Missing key." + "No way to dispatch this command to Valkey Cluster. Missing key." ) - async def test_execute_command_node_flag_primaries(self, r: RedisCluster) -> None: + async def test_execute_command_node_flag_primaries(self, r: ValkeyCluster) -> None: """ Test command execution with nodes flag PRIMARIES """ primaries = r.get_primaries() replicas = r.get_replicas() mock_all_nodes_resp(r, "PONG") - assert await r.ping(target_nodes=RedisCluster.PRIMARIES) is True + assert await r.ping(target_nodes=ValkeyCluster.PRIMARIES) is True for primary in primaries: conn = primary._free.pop() assert conn.read_response.called is True @@ -484,16 +486,16 @@ async def test_execute_command_node_flag_primaries(self, r: RedisCluster) -> Non conn = replica._free.pop() assert conn.read_response.called is not True - async def test_execute_command_node_flag_replicas(self, r: RedisCluster) -> None: + async def test_execute_command_node_flag_replicas(self, r: ValkeyCluster) -> None: """ Test command execution with nodes flag REPLICAS """ replicas = r.get_replicas() if not replicas: - r = await get_mocked_redis_client(default_host, default_port) + r = await get_mocked_valkey_client(default_host, default_port) primaries = r.get_primaries() mock_all_nodes_resp(r, "PONG") - assert await r.ping(target_nodes=RedisCluster.REPLICAS) is True + assert await r.ping(target_nodes=ValkeyCluster.REPLICAS) is True for replica in replicas: conn = replica._free.pop() assert conn.read_response.called is True @@ -503,22 +505,22 @@ async def test_execute_command_node_flag_replicas(self, r: RedisCluster) -> None await r.aclose() - async def test_execute_command_node_flag_all_nodes(self, r: RedisCluster) -> None: + async def test_execute_command_node_flag_all_nodes(self, r: ValkeyCluster) -> None: """ Test command execution with nodes flag ALL_NODES """ mock_all_nodes_resp(r, "PONG") - assert await r.ping(target_nodes=RedisCluster.ALL_NODES) is True + assert await r.ping(target_nodes=ValkeyCluster.ALL_NODES) is True for node in r.get_nodes(): conn = node._free.pop() assert conn.read_response.called is True - async def test_execute_command_node_flag_random(self, r: RedisCluster) -> None: + async def test_execute_command_node_flag_random(self, r: ValkeyCluster) -> None: """ Test command execution with nodes flag RANDOM """ mock_all_nodes_resp(r, "PONG") - assert await r.ping(target_nodes=RedisCluster.RANDOM) is True + assert await r.ping(target_nodes=ValkeyCluster.RANDOM) is True called_count = 0 for node in r.get_nodes(): conn = node._free.pop() @@ -526,7 +528,7 @@ async def test_execute_command_node_flag_random(self, r: RedisCluster) -> None: called_count += 1 assert called_count == 1 - async def test_execute_command_default_node(self, r: RedisCluster) -> None: + async def test_execute_command_default_node(self, r: ValkeyCluster) -> None: """ Test command execution without node flag is being executed on the default node @@ -537,7 +539,7 @@ async def test_execute_command_default_node(self, r: RedisCluster) -> None: conn = def_node._free.pop() assert conn.read_response.called - async def test_ask_redirection(self, r: RedisCluster) -> None: + async def test_ask_redirection(self, r: ValkeyCluster) -> None: """ Test that the server handles ASK response. @@ -566,23 +568,23 @@ def ok_response(self, *args, **options): assert await r.execute_command("SET", "foo", "bar") == "MOCK_OK" async def test_moved_redirection( - self, create_redis: Callable[..., RedisCluster] + self, create_valkey: Callable[..., ValkeyCluster] ) -> None: """ Test that the client handles MOVED response. """ - await moved_redirection_helper(create_redis, failover=False) + await moved_redirection_helper(create_valkey, failover=False) async def test_moved_redirection_after_failover( - self, create_redis: Callable[..., RedisCluster] + self, create_valkey: Callable[..., ValkeyCluster] ) -> None: """ Test that the client handles MOVED response after a failover. """ - await moved_redirection_helper(create_redis, failover=True) + await moved_redirection_helper(create_valkey, failover=True) async def test_refresh_using_specific_nodes( - self, create_redis: Callable[..., RedisCluster] + self, create_valkey: Callable[..., ValkeyCluster] ) -> None: """ Test making calls on specific nodes when the cluster has failed over to @@ -661,7 +663,7 @@ def cmd_init_mock(self, r: ClusterNode) -> None: cmd_parser_initialize.side_effect = cmd_init_mock - rc = await create_redis(cls=RedisCluster, flushdb=False) + rc = await create_valkey(cls=ValkeyCluster, flushdb=False) assert len(rc.get_nodes()) == 1 assert rc.get_node(node_name=node_7006.name) is not None @@ -707,7 +709,7 @@ def execute_command_mock_third(self, *args, **options): return "MOCK_OK" # We don't need to create a real cluster connection but we - # do want RedisCluster.on_connect function to get called, + # do want ValkeyCluster.on_connect function to get called, # so we'll mock some of the Connection's functions to allow it execute_command.side_effect = execute_command_mock_first mocks["send_command"].return_value = True @@ -717,7 +719,7 @@ def execute_command_mock_third(self, *args, **options): mocks["on_connect"].return_value = True # Create a cluster with reading from replications - read_cluster = await get_mocked_redis_client( + read_cluster = await get_mocked_valkey_client( host=default_host, port=default_port, read_from_replicas=True ) assert read_cluster.read_from_replicas is True @@ -732,7 +734,7 @@ def execute_command_mock_third(self, *args, **options): await read_cluster.aclose() - async def test_keyslot(self, r: RedisCluster) -> None: + async def test_keyslot(self, r: ValkeyCluster) -> None: """ Test that method will compute correct key in all supported cases """ @@ -755,7 +757,7 @@ async def test_get_node_name(self) -> None: == f"{default_host}:{default_port}" ) - async def test_all_nodes(self, r: RedisCluster) -> None: + async def test_all_nodes(self, r: ValkeyCluster) -> None: """ Set a list of nodes and it should be possible to iterate over all """ @@ -764,7 +766,7 @@ async def test_all_nodes(self, r: RedisCluster) -> None: for i, node in enumerate(r.get_nodes()): assert node in nodes - async def test_all_nodes_masters(self, r: RedisCluster) -> None: + async def test_all_nodes_masters(self, r: ValkeyCluster) -> None: """ Set a list of nodes with random primaries/replicas config and it shold be possible to iterate over all of them. @@ -778,7 +780,7 @@ async def test_all_nodes_masters(self, r: RedisCluster) -> None: for node in r.get_primaries(): assert node in nodes - @pytest.mark.parametrize("error", RedisCluster.ERRORS_ALLOW_RETRY) + @pytest.mark.parametrize("error", ValkeyCluster.ERRORS_ALLOW_RETRY) async def test_cluster_down_overreaches_retry_attempts( self, error: Union[Type[TimeoutError], Type[ClusterDownError], Type[ConnectionError]], @@ -788,7 +790,7 @@ async def test_cluster_down_overreaches_retry_attempts( the command as many times as configured in cluster_error_retry_attempts and then raise the exception """ - with mock.patch.object(RedisCluster, "_execute_command") as execute_command: + with mock.patch.object(ValkeyCluster, "_execute_command") as execute_command: def raise_error(target_node, *args, **kwargs): execute_command.failed_calls += 1 @@ -796,7 +798,7 @@ def raise_error(target_node, *args, **kwargs): execute_command.side_effect = raise_error - rc = await get_mocked_redis_client(host=default_host, port=default_port) + rc = await get_mocked_valkey_client(host=default_host, port=default_port) with pytest.raises(error): await rc.get("bar") @@ -804,7 +806,7 @@ def raise_error(target_node, *args, **kwargs): await rc.aclose() - async def test_set_default_node_success(self, r: RedisCluster) -> None: + async def test_set_default_node_success(self, r: ValkeyCluster) -> None: """ test successful replacement of the default cluster node """ @@ -818,7 +820,7 @@ async def test_set_default_node_success(self, r: RedisCluster) -> None: r.set_default_node(new_def_node) assert r.get_default_node() == new_def_node - async def test_set_default_node_failure(self, r: RedisCluster) -> None: + async def test_set_default_node_failure(self, r: ValkeyCluster) -> None: """ test failed replacement of the default cluster node """ @@ -830,7 +832,7 @@ async def test_set_default_node_failure(self, r: RedisCluster) -> None: r.set_default_node(new_def_node) assert r.get_default_node() == default_node - async def test_get_node_from_key(self, r: RedisCluster) -> None: + async def test_get_node_from_key(self, r: ValkeyCluster) -> None: """ Test that get_node_from_key function returns the correct node """ @@ -844,9 +846,9 @@ async def test_get_node_from_key(self, r: RedisCluster) -> None: assert replica.server_type == REPLICA assert replica in slot_nodes - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_not_require_full_coverage_cluster_down_error( - self, r: RedisCluster + self, r: ValkeyCluster ) -> None: """ When require_full_coverage is set to False (default client config) and not @@ -876,16 +878,19 @@ async def test_not_require_full_coverage_cluster_down_error( raise e async def test_can_run_concurrent_commands(self, request: FixtureRequest) -> None: - url = request.config.getoption("--redis-url") - rc = RedisCluster.from_url(url) + url = request.config.getoption("--valkey-url") + rc = ValkeyCluster.from_url(url) assert all( await asyncio.gather( - *(rc.echo("i", target_nodes=RedisCluster.ALL_NODES) for i in range(100)) + *( + rc.echo("i", target_nodes=ValkeyCluster.ALL_NODES) + for i in range(100) + ) ) ) await rc.aclose() - def test_replace_cluster_node(self, r: RedisCluster) -> None: + def test_replace_cluster_node(self, r: ValkeyCluster) -> None: prev_default_node = r.get_default_node() r.replace_default_node() assert r.get_default_node() != prev_default_node @@ -906,8 +911,8 @@ async def test_default_node_is_replaced_after_exception(self, r): # Rollback to the old default node r.replace_default_node(curr_default_node) - async def test_address_remap(self, create_redis, master_host): - """Test that we can create a rediscluster object with + async def test_address_remap(self, create_valkey, master_host): + """Test that we can create a valkeycluster object with a host-port remapper and map connections through proxy objects """ @@ -933,8 +938,8 @@ def address_remap(address): await asyncio.gather(*[p.start() for p in proxies]) try: # create cluster: - r = await create_redis( - cls=RedisCluster, flushdb=False, address_remap=address_remap + r = await create_valkey( + cls=ValkeyCluster, flushdb=False, address_remap=address_remap ) try: assert await r.ping() is True @@ -950,12 +955,12 @@ def address_remap(address): assert n_used > 1 -class TestClusterRedisCommands: +class TestClusterValkeyCommands: """ - Tests for RedisCluster unique commands + Tests for ValkeyCluster unique commands """ - async def test_get_and_set(self, r: RedisCluster) -> None: + async def test_get_and_set(self, r: ValkeyCluster) -> None: # get and set can't be tested independently of each other assert await r.get("a") is None byte_string = b"value" @@ -968,7 +973,7 @@ async def test_get_and_set(self, r: RedisCluster) -> None: assert await r.get("integer") == str(integer).encode() assert (await r.get("unicode_string")).decode("utf-8") == unicode_string - async def test_mget_nonatomic(self, r: RedisCluster) -> None: + async def test_mget_nonatomic(self, r: ValkeyCluster) -> None: assert await r.mget_nonatomic([]) == [] assert await r.mget_nonatomic(["a", "b"]) == [None, None] await r.set("a", "1") @@ -982,16 +987,16 @@ async def test_mget_nonatomic(self, r: RedisCluster) -> None: b"3", ] - async def test_mset_nonatomic(self, r: RedisCluster) -> None: + async def test_mset_nonatomic(self, r: ValkeyCluster) -> None: d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} assert await r.mset_nonatomic(d) for k, v in d.items(): assert await r.get(k) == v - async def test_config_set(self, r: RedisCluster) -> None: + async def test_config_set(self, r: ValkeyCluster) -> None: assert await r.config_set("slowlog-log-slower-than", 0) - async def test_cluster_config_resetstat(self, r: RedisCluster) -> None: + async def test_cluster_config_resetstat(self, r: ValkeyCluster) -> None: await r.ping(target_nodes="all") all_info = await r.info(target_nodes="all") prior_commands_processed = -1 @@ -1004,29 +1009,29 @@ async def test_cluster_config_resetstat(self, r: RedisCluster) -> None: reset_commands_processed = node_info["total_commands_processed"] assert reset_commands_processed < prior_commands_processed - async def test_client_setname(self, r: RedisCluster) -> None: + async def test_client_setname(self, r: ValkeyCluster) -> None: node = r.get_random_node() - await r.client_setname("redis_py_test", target_nodes=node) + await r.client_setname("valkey_py_test", target_nodes=node) client_name = await r.client_getname(target_nodes=node) - assert_resp_response(r, client_name, "redis_py_test", b"redis_py_test") + assert_resp_response(r, client_name, "valkey_py_test", b"valkey_py_test") - async def test_exists(self, r: RedisCluster) -> None: + async def test_exists(self, r: ValkeyCluster) -> None: d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} await r.mset_nonatomic(d) assert await r.exists(*d.keys()) == len(d) - async def test_delete(self, r: RedisCluster) -> None: + async def test_delete(self, r: ValkeyCluster) -> None: d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} await r.mset_nonatomic(d) assert await r.delete(*d.keys()) == len(d) assert await r.delete(*d.keys()) == 0 - async def test_touch(self, r: RedisCluster) -> None: + async def test_touch(self, r: ValkeyCluster) -> None: d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} await r.mset_nonatomic(d) assert await r.touch(*d.keys()) == len(d) - async def test_unlink(self, r: RedisCluster) -> None: + async def test_unlink(self, r: ValkeyCluster) -> None: d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} await r.mset_nonatomic(d) assert await r.unlink(*d.keys()) == len(d) @@ -1038,27 +1043,27 @@ async def test_unlink(self, r: RedisCluster) -> None: async def test_initialize_before_execute_multi_key_command( self, request: FixtureRequest ) -> None: - # Test for issue https://github.com/redis/redis-py/issues/2437 - url = request.config.getoption("--redis-url") - r = RedisCluster.from_url(url) + # Test for issue https://github.com/valkey/valkey-py/issues/2437 + url = request.config.getoption("--valkey-url") + r = ValkeyCluster.from_url(url) assert 0 == await r.exists("a", "b", "c") await r.aclose() - @skip_if_redis_enterprise() - async def test_cluster_myid(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_myid(self, r: ValkeyCluster) -> None: node = r.get_random_node() myid = await r.cluster_myid(node) assert len(myid) == 40 @skip_if_server_version_lt("7.2.0") - @skip_if_redis_enterprise() - async def test_cluster_myshardid(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_myshardid(self, r: ValkeyCluster) -> None: node = r.get_random_node() myshardid = await r.cluster_myshardid(node) assert len(myshardid) == 40 - @skip_if_redis_enterprise() - async def test_cluster_slots(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_slots(self, r: ValkeyCluster) -> None: mock_all_nodes_resp(r, default_cluster_slots) cluster_slots = await r.cluster_slots() assert isinstance(cluster_slots, dict) @@ -1066,36 +1071,36 @@ async def test_cluster_slots(self, r: RedisCluster) -> None: assert cluster_slots.get((0, 8191)) is not None assert cluster_slots.get((0, 8191)).get("primary") == ("127.0.0.1", 7000) - @skip_if_redis_enterprise() - async def test_cluster_addslots(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_addslots(self, r: ValkeyCluster) -> None: node = r.get_random_node() mock_node_resp(node, "OK") assert await r.cluster_addslots(node, 1, 2, 3) is True @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() - async def test_cluster_addslotsrange(self, r: RedisCluster): + @skip_if_valkey_enterprise() + async def test_cluster_addslotsrange(self, r: ValkeyCluster): node = r.get_random_node() mock_node_resp(node, "OK") assert await r.cluster_addslotsrange(node, 1, 5) - @skip_if_redis_enterprise() - async def test_cluster_countkeysinslot(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_countkeysinslot(self, r: ValkeyCluster) -> None: node = r.nodes_manager.get_node_from_slot(1) mock_node_resp(node, 2) assert await r.cluster_countkeysinslot(1) == 2 - async def test_cluster_count_failure_report(self, r: RedisCluster) -> None: + async def test_cluster_count_failure_report(self, r: ValkeyCluster) -> None: mock_all_nodes_resp(r, 0) assert await r.cluster_count_failure_report("node_0") == 0 - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_cluster_delslots(self) -> None: cluster_slots = [ [0, 8191, ["127.0.0.1", 7000, "node_0"]], [8192, 16383, ["127.0.0.1", 7001, "node_1"]], ] - r = await get_mocked_redis_client( + r = await get_mocked_valkey_client( host=default_host, port=default_port, cluster_slots=cluster_slots ) mock_all_nodes_resp(r, "OK") @@ -1108,9 +1113,9 @@ async def test_cluster_delslots(self) -> None: await r.aclose() @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_cluster_delslotsrange(self): - r = await get_mocked_redis_client(host=default_host, port=default_port) + r = await get_mocked_valkey_client(host=default_host, port=default_port) mock_all_nodes_resp(r, "OK") node = r.get_random_node() await r.cluster_addslots(node, 1, 2, 3, 4, 5) @@ -1118,35 +1123,35 @@ async def test_cluster_delslotsrange(self): assert node._free.pop().read_response.called await r.aclose() - @skip_if_redis_enterprise() - async def test_cluster_failover(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_failover(self, r: ValkeyCluster) -> None: node = r.get_random_node() mock_node_resp(node, "OK") assert await r.cluster_failover(node) is True assert await r.cluster_failover(node, "FORCE") is True assert await r.cluster_failover(node, "TAKEOVER") is True - with pytest.raises(RedisError): + with pytest.raises(ValkeyError): await r.cluster_failover(node, "FORCT") - @skip_if_redis_enterprise() - async def test_cluster_info(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_info(self, r: ValkeyCluster) -> None: info = await r.cluster_info() assert isinstance(info, dict) assert info["cluster_state"] == "ok" - @skip_if_redis_enterprise() - async def test_cluster_keyslot(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_keyslot(self, r: ValkeyCluster) -> None: mock_all_nodes_resp(r, 12182) assert await r.cluster_keyslot("foo") == 12182 - @skip_if_redis_enterprise() - async def test_cluster_meet(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_meet(self, r: ValkeyCluster) -> None: node = r.get_default_node() mock_node_resp(node, "OK") assert await r.cluster_meet("127.0.0.1", 6379) is True - @skip_if_redis_enterprise() - async def test_cluster_nodes(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_nodes(self, r: ValkeyCluster) -> None: response = ( "c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 " "slave aa90da731f673a99617dfe930306549a09f83a6b 0 " @@ -1174,8 +1179,8 @@ async def test_cluster_nodes(self, r: RedisCluster) -> None: == "c8253bae761cb1ecb2b61857d85dfe455a0fec8b" ) - @skip_if_redis_enterprise() - async def test_cluster_nodes_importing_migrating(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_nodes_importing_migrating(self, r: ValkeyCluster) -> None: response = ( "488ead2fcce24d8c0f158f9172cb1f4a9e040fe5 127.0.0.1:16381@26381 " "master - 0 1648975557664 3 connected 10923-16383\n" @@ -1211,8 +1216,8 @@ async def test_cluster_nodes_importing_migrating(self, r: RedisCluster) -> None: assert node_16381.get("slots") == [["10923", "16383"]] assert node_16381.get("migrations") == [] - @skip_if_redis_enterprise() - async def test_cluster_replicate(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_replicate(self, r: ValkeyCluster) -> None: node = r.get_random_node() all_replicas = r.get_replicas() mock_all_nodes_resp(r, "OK") @@ -1224,8 +1229,8 @@ async def test_cluster_replicate(self, r: RedisCluster) -> None: else: assert results is True - @skip_if_redis_enterprise() - async def test_cluster_reset(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_reset(self, r: ValkeyCluster) -> None: mock_all_nodes_resp(r, "OK") assert await r.cluster_reset() is True assert await r.cluster_reset(False) is True @@ -1233,8 +1238,8 @@ async def test_cluster_reset(self, r: RedisCluster) -> None: for res in all_results.values(): assert res is True - @skip_if_redis_enterprise() - async def test_cluster_save_config(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_save_config(self, r: ValkeyCluster) -> None: node = r.get_random_node() all_nodes = r.get_nodes() mock_all_nodes_resp(r, "OK") @@ -1243,42 +1248,42 @@ async def test_cluster_save_config(self, r: RedisCluster) -> None: for res in all_results.values(): assert res is True - @skip_if_redis_enterprise() - async def test_cluster_get_keys_in_slot(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_get_keys_in_slot(self, r: ValkeyCluster) -> None: response = ["{foo}1", "{foo}2"] node = r.nodes_manager.get_node_from_slot(12182) mock_node_resp(node, response) keys = await r.cluster_get_keys_in_slot(12182, 4) assert keys == response - @skip_if_redis_enterprise() - async def test_cluster_set_config_epoch(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_set_config_epoch(self, r: ValkeyCluster) -> None: mock_all_nodes_resp(r, "OK") assert await r.cluster_set_config_epoch(3) is True all_results = await r.cluster_set_config_epoch(3, target_nodes="all") for res in all_results.values(): assert res is True - @skip_if_redis_enterprise() - async def test_cluster_setslot(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_setslot(self, r: ValkeyCluster) -> None: node = r.get_random_node() mock_node_resp(node, "OK") assert await r.cluster_setslot(node, "node_0", 1218, "IMPORTING") is True assert await r.cluster_setslot(node, "node_0", 1218, "NODE") is True assert await r.cluster_setslot(node, "node_0", 1218, "MIGRATING") is True - with pytest.raises(RedisError): + with pytest.raises(ValkeyError): await r.cluster_failover(node, "STABLE") - with pytest.raises(RedisError): + with pytest.raises(ValkeyError): await r.cluster_failover(node, "STATE") - async def test_cluster_setslot_stable(self, r: RedisCluster) -> None: + async def test_cluster_setslot_stable(self, r: ValkeyCluster) -> None: node = r.nodes_manager.get_node_from_slot(12182) mock_node_resp(node, "OK") assert await r.cluster_setslot_stable(12182) is True assert node._free.pop().read_response.called - @skip_if_redis_enterprise() - async def test_cluster_replicas(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_cluster_replicas(self, r: ValkeyCluster) -> None: response = [ b"01eca22229cf3c652b6fca0d09ff6941e0d2e3 " b"127.0.0.1:6377@16377 slave " @@ -1299,7 +1304,7 @@ async def test_cluster_replicas(self, r: RedisCluster) -> None: ) @skip_if_server_version_lt("7.0.0") - async def test_cluster_links(self, r: RedisCluster): + async def test_cluster_links(self, r: ValkeyCluster): node = r.get_random_node() res = await r.cluster_links(node) if is_resp2_connection(r): @@ -1315,9 +1320,9 @@ async def test_cluster_links(self, r: RedisCluster): for i in range(0, len(res) - 1, 2): assert res[i][b"node"] == res[i + 1][b"node"] - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_readonly(self) -> None: - r = await get_mocked_redis_client(host=default_host, port=default_port) + r = await get_mocked_valkey_client(host=default_host, port=default_port) mock_all_nodes_resp(r, "OK") assert await r.readonly() is True all_replicas_results = await r.readonly(target_nodes="replicas") @@ -1328,9 +1333,9 @@ async def test_readonly(self) -> None: await r.aclose() - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_readwrite(self) -> None: - r = await get_mocked_redis_client(host=default_host, port=default_port) + r = await get_mocked_valkey_client(host=default_host, port=default_port) mock_all_nodes_resp(r, "OK") assert await r.readwrite() is True all_replicas_results = await r.readwrite(target_nodes="replicas") @@ -1341,8 +1346,8 @@ async def test_readwrite(self) -> None: await r.aclose() - @skip_if_redis_enterprise() - async def test_bgsave(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_bgsave(self, r: ValkeyCluster) -> None: try: assert await r.bgsave() await asyncio.sleep(0.3) @@ -1351,7 +1356,7 @@ async def test_bgsave(self, r: RedisCluster) -> None: if "Background save already in progress" not in e.__str__(): raise - async def test_info(self, r: RedisCluster) -> None: + async def test_info(self, r: ValkeyCluster) -> None: # Map keys to same slot await r.set("x{1}", 1) await r.set("y{1}", 2) @@ -1364,7 +1369,7 @@ async def test_info(self, r: RedisCluster) -> None: assert isinstance(info, dict) assert info["db0"]["keys"] == 3 - async def _init_slowlog_test(self, r: RedisCluster, node: ClusterNode) -> str: + async def _init_slowlog_test(self, r: ValkeyCluster, node: ClusterNode) -> str: slowlog_lim = await r.config_get("slowlog-log-slower-than", target_nodes=node) assert ( await r.config_set("slowlog-log-slower-than", 0, target_nodes=node) is True @@ -1372,7 +1377,7 @@ async def _init_slowlog_test(self, r: RedisCluster, node: ClusterNode) -> str: return slowlog_lim["slowlog-log-slower-than"] async def _teardown_slowlog_test( - self, r: RedisCluster, node: ClusterNode, prev_limit: str + self, r: ValkeyCluster, node: ClusterNode, prev_limit: str ) -> None: assert ( await r.config_set("slowlog-log-slower-than", prev_limit, target_nodes=node) @@ -1380,7 +1385,7 @@ async def _teardown_slowlog_test( ) async def test_slowlog_get( - self, r: RedisCluster, slowlog: Optional[List[Dict[str, Union[int, bytes]]]] + self, r: ValkeyCluster, slowlog: Optional[List[Dict[str, Union[int, bytes]]]] ) -> None: unicode_string = chr(3456) + "abcd" + chr(3421) node = r.get_node_from_key(unicode_string) @@ -1408,7 +1413,7 @@ async def test_slowlog_get( await self._teardown_slowlog_test(r, node, slowlog_limit) async def test_slowlog_get_limit( - self, r: RedisCluster, slowlog: Optional[List[Dict[str, Union[int, bytes]]]] + self, r: ValkeyCluster, slowlog: Optional[List[Dict[str, Union[int, bytes]]]] ) -> None: assert await r.slowlog_reset() node = r.get_node_from_key("foo") @@ -1420,31 +1425,31 @@ async def test_slowlog_get_limit( assert len(slowlog) == 1 await self._teardown_slowlog_test(r, node, slowlog_limit) - async def test_slowlog_length(self, r: RedisCluster, slowlog: None) -> None: + async def test_slowlog_length(self, r: ValkeyCluster, slowlog: None) -> None: await r.get("foo") node = r.nodes_manager.get_node_from_slot(key_slot(b"foo")) slowlog_len = await r.slowlog_len(target_nodes=node) assert isinstance(slowlog_len, int) - async def test_time(self, r: RedisCluster) -> None: + async def test_time(self, r: ValkeyCluster) -> None: t = await r.time(target_nodes=r.get_primaries()[0]) assert len(t) == 2 assert isinstance(t[0], int) assert isinstance(t[1], int) @skip_if_server_version_lt("4.0.0") - async def test_memory_usage(self, r: RedisCluster) -> None: + async def test_memory_usage(self, r: ValkeyCluster) -> None: await r.set("foo", "bar") assert isinstance(await r.memory_usage("foo"), int) @skip_if_server_version_lt("4.0.0") - @skip_if_redis_enterprise() - async def test_memory_malloc_stats(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_memory_malloc_stats(self, r: ValkeyCluster) -> None: assert await r.memory_malloc_stats() @skip_if_server_version_lt("4.0.0") - @skip_if_redis_enterprise() - async def test_memory_stats(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_memory_stats(self, r: ValkeyCluster) -> None: # put a key into the current db to make sure that "db." # has data await r.set("foo", "bar") @@ -1456,30 +1461,30 @@ async def test_memory_stats(self, r: RedisCluster) -> None: assert isinstance(value, dict) @skip_if_server_version_lt("4.0.0") - async def test_memory_help(self, r: RedisCluster) -> None: + async def test_memory_help(self, r: ValkeyCluster) -> None: with pytest.raises(NotImplementedError): await r.memory_help() @skip_if_server_version_lt("4.0.0") - async def test_memory_doctor(self, r: RedisCluster) -> None: + async def test_memory_doctor(self, r: ValkeyCluster) -> None: with pytest.raises(NotImplementedError): await r.memory_doctor() - @skip_if_redis_enterprise() - async def test_lastsave(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_lastsave(self, r: ValkeyCluster) -> None: node = r.get_primaries()[0] assert isinstance(await r.lastsave(target_nodes=node), datetime.datetime) - async def test_cluster_echo(self, r: RedisCluster) -> None: + async def test_cluster_echo(self, r: ValkeyCluster) -> None: node = r.get_primaries()[0] assert await r.echo("foo bar", target_nodes=node) == b"foo bar" @skip_if_server_version_lt("1.0.0") - async def test_debug_segfault(self, r: RedisCluster) -> None: + async def test_debug_segfault(self, r: ValkeyCluster) -> None: with pytest.raises(NotImplementedError): await r.debug_segfault() - async def test_config_resetstat(self, r: RedisCluster) -> None: + async def test_config_resetstat(self, r: ValkeyCluster) -> None: node = r.get_primaries()[0] await r.ping(target_nodes=node) prior_commands_processed = int( @@ -1493,32 +1498,32 @@ async def test_config_resetstat(self, r: RedisCluster) -> None: assert reset_commands_processed < prior_commands_processed @skip_if_server_version_lt("6.2.0") - async def test_client_trackinginfo(self, r: RedisCluster) -> None: + async def test_client_trackinginfo(self, r: ValkeyCluster) -> None: node = r.get_primaries()[0] res = await r.client_trackinginfo(target_nodes=node) assert len(res) > 2 assert "prefixes" in res or b"prefixes" in res @skip_if_server_version_lt("2.9.50") - async def test_client_pause(self, r: RedisCluster) -> None: + async def test_client_pause(self, r: ValkeyCluster) -> None: node = r.get_primaries()[0] assert await r.client_pause(1, target_nodes=node) assert await r.client_pause(timeout=1, target_nodes=node) - with pytest.raises(RedisError): + with pytest.raises(ValkeyError): await r.client_pause(timeout="not an integer", target_nodes=node) @skip_if_server_version_lt("6.2.0") - @skip_if_redis_enterprise() - async def test_client_unpause(self, r: RedisCluster) -> None: + @skip_if_valkey_enterprise() + async def test_client_unpause(self, r: ValkeyCluster) -> None: assert await r.client_unpause() @skip_if_server_version_lt("5.0.0") - async def test_client_id(self, r: RedisCluster) -> None: + async def test_client_id(self, r: ValkeyCluster) -> None: node = r.get_primaries()[0] assert await r.client_id(target_nodes=node) > 0 @skip_if_server_version_lt("5.0.0") - async def test_client_unblock(self, r: RedisCluster) -> None: + async def test_client_unblock(self, r: ValkeyCluster) -> None: node = r.get_primaries()[0] myid = await r.client_id(target_nodes=node) assert not await r.client_unblock(myid, target_nodes=node) @@ -1526,13 +1531,13 @@ async def test_client_unblock(self, r: RedisCluster) -> None: assert not await r.client_unblock(myid, error=False, target_nodes=node) @skip_if_server_version_lt("6.0.0") - async def test_client_getredir(self, r: RedisCluster) -> None: + async def test_client_getredir(self, r: ValkeyCluster) -> None: node = r.get_primaries()[0] assert isinstance(await r.client_getredir(target_nodes=node), int) assert await r.client_getredir(target_nodes=node) == -1 @skip_if_server_version_lt("6.2.0") - async def test_client_info(self, r: RedisCluster) -> None: + async def test_client_info(self, r: ValkeyCluster) -> None: node = r.get_primaries()[0] info = await r.client_info(target_nodes=node) assert isinstance(info, dict) @@ -1540,40 +1545,40 @@ async def test_client_info(self, r: RedisCluster) -> None: @skip_if_server_version_lt("2.6.9") async def test_client_kill( - self, r: RedisCluster, create_redis: Callable[..., RedisCluster] + self, r: ValkeyCluster, create_valkey: Callable[..., ValkeyCluster] ) -> None: node = r.get_primaries()[0] - r2 = await create_redis(cls=RedisCluster, flushdb=False) - await r.client_setname("redis-py-c1", target_nodes="all") - await r2.client_setname("redis-py-c2", target_nodes="all") + r2 = await create_valkey(cls=ValkeyCluster, flushdb=False) + await r.client_setname("valkey-py-c1", target_nodes="all") + await r2.client_setname("valkey-py-c2", target_nodes="all") clients = [ client for client in await r.client_list(target_nodes=node) - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} - client_addr = clients_by_name["redis-py-c2"].get("addr") + client_addr = clients_by_name["valkey-py-c2"].get("addr") assert await r.client_kill(client_addr, target_nodes=node) is True clients = [ client for client in await r.client_list(target_nodes=node) - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 1 - assert clients[0].get("name") == "redis-py-c1" + assert clients[0].get("name") == "valkey-py-c1" await r2.aclose() @skip_if_server_version_lt("2.6.0") - async def test_cluster_bitop_not_empty_string(self, r: RedisCluster) -> None: + async def test_cluster_bitop_not_empty_string(self, r: ValkeyCluster) -> None: await r.set("{foo}a", "") await r.bitop("not", "{foo}r", "{foo}a") assert await r.get("{foo}r") is None @skip_if_server_version_lt("2.6.0") - async def test_cluster_bitop_not(self, r: RedisCluster) -> None: + async def test_cluster_bitop_not(self, r: ValkeyCluster) -> None: test_str = b"\xAA\x00\xFF\x55" correct = ~0xAA00FF55 & 0xFFFFFFFF await r.set("{foo}a", test_str) @@ -1581,7 +1586,7 @@ async def test_cluster_bitop_not(self, r: RedisCluster) -> None: assert int(binascii.hexlify(await r.get("{foo}r")), 16) == correct @skip_if_server_version_lt("2.6.0") - async def test_cluster_bitop_not_in_place(self, r: RedisCluster) -> None: + async def test_cluster_bitop_not_in_place(self, r: ValkeyCluster) -> None: test_str = b"\xAA\x00\xFF\x55" correct = ~0xAA00FF55 & 0xFFFFFFFF await r.set("{foo}a", test_str) @@ -1589,7 +1594,7 @@ async def test_cluster_bitop_not_in_place(self, r: RedisCluster) -> None: assert int(binascii.hexlify(await r.get("{foo}a")), 16) == correct @skip_if_server_version_lt("2.6.0") - async def test_cluster_bitop_single_string(self, r: RedisCluster) -> None: + async def test_cluster_bitop_single_string(self, r: ValkeyCluster) -> None: test_str = b"\x01\x02\xFF" await r.set("{foo}a", test_str) await r.bitop("and", "{foo}res1", "{foo}a") @@ -1600,7 +1605,7 @@ async def test_cluster_bitop_single_string(self, r: RedisCluster) -> None: assert await r.get("{foo}res3") == test_str @skip_if_server_version_lt("2.6.0") - async def test_cluster_bitop_string_operands(self, r: RedisCluster) -> None: + async def test_cluster_bitop_string_operands(self, r: ValkeyCluster) -> None: await r.set("{foo}a", b"\x01\x02\xFF\xFF") await r.set("{foo}b", b"\x01\x02\xFF") await r.bitop("and", "{foo}res1", "{foo}a", "{foo}b") @@ -1611,7 +1616,7 @@ async def test_cluster_bitop_string_operands(self, r: RedisCluster) -> None: assert int(binascii.hexlify(await r.get("{foo}res3")), 16) == 0x000000FF @skip_if_server_version_lt("6.2.0") - async def test_cluster_copy(self, r: RedisCluster) -> None: + async def test_cluster_copy(self, r: ValkeyCluster) -> None: assert await r.copy("{foo}a", "{foo}b") == 0 await r.set("{foo}a", "bar") assert await r.copy("{foo}a", "{foo}b") == 1 @@ -1619,25 +1624,25 @@ async def test_cluster_copy(self, r: RedisCluster) -> None: assert await r.get("{foo}b") == b"bar" @skip_if_server_version_lt("6.2.0") - async def test_cluster_copy_and_replace(self, r: RedisCluster) -> None: + async def test_cluster_copy_and_replace(self, r: ValkeyCluster) -> None: await r.set("{foo}a", "foo1") await r.set("{foo}b", "foo2") assert await r.copy("{foo}a", "{foo}b") == 0 assert await r.copy("{foo}a", "{foo}b", replace=True) == 1 @skip_if_server_version_lt("6.2.0") - async def test_cluster_lmove(self, r: RedisCluster) -> None: + async def test_cluster_lmove(self, r: ValkeyCluster) -> None: await r.rpush("{foo}a", "one", "two", "three", "four") assert await r.lmove("{foo}a", "{foo}b") assert await r.lmove("{foo}a", "{foo}b", "right", "left") @skip_if_server_version_lt("6.2.0") - async def test_cluster_blmove(self, r: RedisCluster) -> None: + async def test_cluster_blmove(self, r: ValkeyCluster) -> None: await r.rpush("{foo}a", "one", "two", "three", "four") assert await r.blmove("{foo}a", "{foo}b", 5) assert await r.blmove("{foo}a", "{foo}b", 1, "RIGHT", "LEFT") - async def test_cluster_msetnx(self, r: RedisCluster) -> None: + async def test_cluster_msetnx(self, r: ValkeyCluster) -> None: d = {"{foo}a": b"1", "{foo}b": b"2", "{foo}c": b"3"} assert await r.msetnx(d) d2 = {"{foo}a": b"x", "{foo}d": b"4"} @@ -1646,13 +1651,13 @@ async def test_cluster_msetnx(self, r: RedisCluster) -> None: assert await r.get(k) == v assert await r.get("{foo}d") is None - async def test_cluster_rename(self, r: RedisCluster) -> None: + async def test_cluster_rename(self, r: ValkeyCluster) -> None: await r.set("{foo}a", "1") assert await r.rename("{foo}a", "{foo}b") assert await r.get("{foo}a") is None assert await r.get("{foo}b") == b"1" - async def test_cluster_renamenx(self, r: RedisCluster) -> None: + async def test_cluster_renamenx(self, r: ValkeyCluster) -> None: await r.set("{foo}a", "1") await r.set("{foo}b", "2") assert not await r.renamenx("{foo}a", "{foo}b") @@ -1660,7 +1665,7 @@ async def test_cluster_renamenx(self, r: RedisCluster) -> None: assert await r.get("{foo}b") == b"2" # LIST COMMANDS - async def test_cluster_blpop(self, r: RedisCluster) -> None: + async def test_cluster_blpop(self, r: ValkeyCluster) -> None: await r.rpush("{foo}a", "1", "2") await r.rpush("{foo}b", "3", "4") assert_resp_response( @@ -1693,7 +1698,7 @@ async def test_cluster_blpop(self, r: RedisCluster) -> None: r, await r.blpop("{foo}c", timeout=1), (b"{foo}c", b"1"), [b"{foo}c", b"1"] ) - async def test_cluster_brpop(self, r: RedisCluster) -> None: + async def test_cluster_brpop(self, r: ValkeyCluster) -> None: await r.rpush("{foo}a", "1", "2") await r.rpush("{foo}b", "3", "4") assert_resp_response( @@ -1726,7 +1731,7 @@ async def test_cluster_brpop(self, r: RedisCluster) -> None: r, await r.brpop("{foo}c", timeout=1), (b"{foo}c", b"1"), [b"{foo}c", b"1"] ) - async def test_cluster_brpoplpush(self, r: RedisCluster) -> None: + async def test_cluster_brpoplpush(self, r: ValkeyCluster) -> None: await r.rpush("{foo}a", "1", "2") await r.rpush("{foo}b", "3", "4") assert await r.brpoplpush("{foo}a", "{foo}b") == b"2" @@ -1735,24 +1740,24 @@ async def test_cluster_brpoplpush(self, r: RedisCluster) -> None: assert await r.lrange("{foo}a", 0, -1) == [] assert await r.lrange("{foo}b", 0, -1) == [b"1", b"2", b"3", b"4"] - async def test_cluster_brpoplpush_empty_string(self, r: RedisCluster) -> None: + async def test_cluster_brpoplpush_empty_string(self, r: ValkeyCluster) -> None: await r.rpush("{foo}a", "") assert await r.brpoplpush("{foo}a", "{foo}b") == b"" - async def test_cluster_rpoplpush(self, r: RedisCluster) -> None: + async def test_cluster_rpoplpush(self, r: ValkeyCluster) -> None: await r.rpush("{foo}a", "a1", "a2", "a3") await r.rpush("{foo}b", "b1", "b2", "b3") assert await r.rpoplpush("{foo}a", "{foo}b") == b"a3" assert await r.lrange("{foo}a", 0, -1) == [b"a1", b"a2"] assert await r.lrange("{foo}b", 0, -1) == [b"a3", b"b1", b"b2", b"b3"] - async def test_cluster_sdiff(self, r: RedisCluster) -> None: + async def test_cluster_sdiff(self, r: ValkeyCluster) -> None: await r.sadd("{foo}a", "1", "2", "3") assert await r.sdiff("{foo}a", "{foo}b") == {b"1", b"2", b"3"} await r.sadd("{foo}b", "2", "3") assert await r.sdiff("{foo}a", "{foo}b") == {b"1"} - async def test_cluster_sdiffstore(self, r: RedisCluster) -> None: + async def test_cluster_sdiffstore(self, r: ValkeyCluster) -> None: await r.sadd("{foo}a", "1", "2", "3") assert await r.sdiffstore("{foo}c", "{foo}a", "{foo}b") == 3 assert await r.smembers("{foo}c") == {b"1", b"2", b"3"} @@ -1760,13 +1765,13 @@ async def test_cluster_sdiffstore(self, r: RedisCluster) -> None: assert await r.sdiffstore("{foo}c", "{foo}a", "{foo}b") == 1 assert await r.smembers("{foo}c") == {b"1"} - async def test_cluster_sinter(self, r: RedisCluster) -> None: + async def test_cluster_sinter(self, r: ValkeyCluster) -> None: await r.sadd("{foo}a", "1", "2", "3") assert await r.sinter("{foo}a", "{foo}b") == set() await r.sadd("{foo}b", "2", "3") assert await r.sinter("{foo}a", "{foo}b") == {b"2", b"3"} - async def test_cluster_sinterstore(self, r: RedisCluster) -> None: + async def test_cluster_sinterstore(self, r: ValkeyCluster) -> None: await r.sadd("{foo}a", "1", "2", "3") assert await r.sinterstore("{foo}c", "{foo}a", "{foo}b") == 0 assert await r.smembers("{foo}c") == set() @@ -1774,26 +1779,26 @@ async def test_cluster_sinterstore(self, r: RedisCluster) -> None: assert await r.sinterstore("{foo}c", "{foo}a", "{foo}b") == 2 assert await r.smembers("{foo}c") == {b"2", b"3"} - async def test_cluster_smove(self, r: RedisCluster) -> None: + async def test_cluster_smove(self, r: ValkeyCluster) -> None: await r.sadd("{foo}a", "a1", "a2") await r.sadd("{foo}b", "b1", "b2") assert await r.smove("{foo}a", "{foo}b", "a1") assert await r.smembers("{foo}a") == {b"a2"} assert await r.smembers("{foo}b") == {b"b1", b"b2", b"a1"} - async def test_cluster_sunion(self, r: RedisCluster) -> None: + async def test_cluster_sunion(self, r: ValkeyCluster) -> None: await r.sadd("{foo}a", "1", "2") await r.sadd("{foo}b", "2", "3") assert await r.sunion("{foo}a", "{foo}b") == {b"1", b"2", b"3"} - async def test_cluster_sunionstore(self, r: RedisCluster) -> None: + async def test_cluster_sunionstore(self, r: ValkeyCluster) -> None: await r.sadd("{foo}a", "1", "2") await r.sadd("{foo}b", "2", "3") assert await r.sunionstore("{foo}c", "{foo}a", "{foo}b") == 3 assert await r.smembers("{foo}c") == {b"1", b"2", b"3"} @skip_if_server_version_lt("6.2.0") - async def test_cluster_zdiff(self, r: RedisCluster) -> None: + async def test_cluster_zdiff(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3}) await r.zadd("{foo}b", {"a1": 1, "a2": 2}) assert await r.zdiff(["{foo}a", "{foo}b"]) == [b"a3"] @@ -1801,7 +1806,7 @@ async def test_cluster_zdiff(self, r: RedisCluster) -> None: assert_resp_response(r, response, [b"a3", b"3"], [[b"a3", 3.0]]) @skip_if_server_version_lt("6.2.0") - async def test_cluster_zdiffstore(self, r: RedisCluster) -> None: + async def test_cluster_zdiffstore(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3}) await r.zadd("{foo}b", {"a1": 1, "a2": 2}) assert await r.zdiffstore("{foo}out", ["{foo}a", "{foo}b"]) @@ -1810,7 +1815,7 @@ async def test_cluster_zdiffstore(self, r: RedisCluster) -> None: assert_resp_response(r, response, [(b"a3", 3.0)], [[b"a3", 3.0]]) @skip_if_server_version_lt("6.2.0") - async def test_cluster_zinter(self, r: RedisCluster) -> None: + async def test_cluster_zinter(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 1}) await r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) @@ -1845,7 +1850,7 @@ async def test_cluster_zinter(self, r: RedisCluster) -> None: r, res, [(b"a3", 20), (b"a1", 23)], [[b"a3", 20], [b"a1", 23]] ) - async def test_cluster_zinterstore_sum(self, r: RedisCluster) -> None: + async def test_cluster_zinterstore_sum(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) @@ -1857,7 +1862,7 @@ async def test_cluster_zinterstore_sum(self, r: RedisCluster) -> None: [[b"a3", 8.0], [b"a1", 9.0]], ) - async def test_cluster_zinterstore_max(self, r: RedisCluster) -> None: + async def test_cluster_zinterstore_max(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) @@ -1874,7 +1879,7 @@ async def test_cluster_zinterstore_max(self, r: RedisCluster) -> None: [[b"a3", 5.0], [b"a1", 6.0]], ) - async def test_cluster_zinterstore_min(self, r: RedisCluster) -> None: + async def test_cluster_zinterstore_min(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3}) await r.zadd("{foo}b", {"a1": 2, "a2": 3, "a3": 5}) await r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) @@ -1891,7 +1896,7 @@ async def test_cluster_zinterstore_min(self, r: RedisCluster) -> None: [[b"a1", 1.0], [b"a3", 3.0]], ) - async def test_cluster_zinterstore_with_weight(self, r: RedisCluster) -> None: + async def test_cluster_zinterstore_with_weight(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) @@ -1906,7 +1911,7 @@ async def test_cluster_zinterstore_with_weight(self, r: RedisCluster) -> None: ) @skip_if_server_version_lt("4.9.0") - async def test_cluster_bzpopmax(self, r: RedisCluster) -> None: + async def test_cluster_bzpopmax(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 2}) await r.zadd("{foo}b", {"b1": 10, "b2": 20}) assert_resp_response( @@ -1943,7 +1948,7 @@ async def test_cluster_bzpopmax(self, r: RedisCluster) -> None: ) @skip_if_server_version_lt("4.9.0") - async def test_cluster_bzpopmin(self, r: RedisCluster) -> None: + async def test_cluster_bzpopmin(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 2}) await r.zadd("{foo}b", {"b1": 10, "b2": 20}) assert_resp_response( @@ -1980,7 +1985,7 @@ async def test_cluster_bzpopmin(self, r: RedisCluster) -> None: ) @skip_if_server_version_lt("6.2.0") - async def test_cluster_zrangestore(self, r: RedisCluster) -> None: + async def test_cluster_zrangestore(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zrangestore("{foo}b", "{foo}a", 0, 1) assert await r.zrange("{foo}b", 0, -1) == [b"a1", b"a2"] @@ -2007,7 +2012,7 @@ async def test_cluster_zrangestore(self, r: RedisCluster) -> None: assert await r.zrange("{foo}b", 0, -1) == [b"a2"] @skip_if_server_version_lt("6.2.0") - async def test_cluster_zunion(self, r: RedisCluster) -> None: + async def test_cluster_zunion(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) @@ -2050,7 +2055,7 @@ async def test_cluster_zunion(self, r: RedisCluster) -> None: [[b"a2", 5.0], [b"a4", 12.0], [b"a3", 20.0], [b"a1", 23.0]], ) - async def test_cluster_zunionstore_sum(self, r: RedisCluster) -> None: + async def test_cluster_zunionstore_sum(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) @@ -2062,7 +2067,7 @@ async def test_cluster_zunionstore_sum(self, r: RedisCluster) -> None: [[b"a2", 3.0], [b"a4", 4.0], [b"a3", 8.0], [b"a1", 9.0]], ) - async def test_cluster_zunionstore_max(self, r: RedisCluster) -> None: + async def test_cluster_zunionstore_max(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) @@ -2079,7 +2084,7 @@ async def test_cluster_zunionstore_max(self, r: RedisCluster) -> None: [[b"a2", 2.0], [b"a4", 4.0], [b"a3", 5.0], [b"a1", 6.0]], ) - async def test_cluster_zunionstore_min(self, r: RedisCluster) -> None: + async def test_cluster_zunionstore_min(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3}) await r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 4}) await r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) @@ -2096,7 +2101,7 @@ async def test_cluster_zunionstore_min(self, r: RedisCluster) -> None: [[b"a1", 1.0], [b"a2", 2.0], [b"a3", 3.0], [b"a4", 4.0]], ) - async def test_cluster_zunionstore_with_weight(self, r: RedisCluster) -> None: + async def test_cluster_zunionstore_with_weight(self, r: ValkeyCluster) -> None: await r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4}) @@ -2111,7 +2116,7 @@ async def test_cluster_zunionstore_with_weight(self, r: RedisCluster) -> None: ) @skip_if_server_version_lt("2.8.9") - async def test_cluster_pfcount(self, r: RedisCluster) -> None: + async def test_cluster_pfcount(self, r: ValkeyCluster) -> None: members = {b"1", b"2", b"3"} await r.pfadd("{foo}a", *members) assert await r.pfcount("{foo}a") == len(members) @@ -2121,7 +2126,7 @@ async def test_cluster_pfcount(self, r: RedisCluster) -> None: assert await r.pfcount("{foo}a", "{foo}b") == len(members_b.union(members)) @skip_if_server_version_lt("2.8.9") - async def test_cluster_pfmerge(self, r: RedisCluster) -> None: + async def test_cluster_pfmerge(self, r: ValkeyCluster) -> None: mema = {b"1", b"2", b"3"} memb = {b"2", b"3", b"4"} memc = {b"5", b"6", b"7"} @@ -2133,14 +2138,14 @@ async def test_cluster_pfmerge(self, r: RedisCluster) -> None: await r.pfmerge("{foo}d", "{foo}b") assert await r.pfcount("{foo}d") == 7 - async def test_cluster_sort_store(self, r: RedisCluster) -> None: + async def test_cluster_sort_store(self, r: ValkeyCluster) -> None: await r.rpush("{foo}a", "2", "3", "1") assert await r.sort("{foo}a", store="{foo}sorted_values") == 3 assert await r.lrange("{foo}sorted_values", 0, -1) == [b"1", b"2", b"3"] # GEO COMMANDS @skip_if_server_version_lt("6.2.0") - async def test_cluster_geosearchstore(self, r: RedisCluster) -> None: + async def test_cluster_geosearchstore(self, r: ValkeyCluster) -> None: values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2159,7 +2164,7 @@ async def test_cluster_geosearchstore(self, r: RedisCluster) -> None: @skip_unless_arch_bits(64) @skip_if_server_version_lt("6.2.0") - async def test_geosearchstore_dist(self, r: RedisCluster) -> None: + async def test_geosearchstore_dist(self, r: ValkeyCluster) -> None: values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2173,13 +2178,13 @@ async def test_geosearchstore_dist(self, r: RedisCluster) -> None: longitude=2.191, latitude=41.433, radius=1000, - storedist=True, + stovalkeyt=True, ) # instead of save the geo score, the distance is saved. assert await r.zscore("{foo}places_barcelona", "place1") == 88.05060698409301 @skip_if_server_version_lt("3.2.0") - async def test_cluster_georadius_store(self, r: RedisCluster) -> None: + async def test_cluster_georadius_store(self, r: ValkeyCluster) -> None: values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2194,7 +2199,7 @@ async def test_cluster_georadius_store(self, r: RedisCluster) -> None: @skip_unless_arch_bits(64) @skip_if_server_version_lt("3.2.0") - async def test_cluster_georadius_store_dist(self, r: RedisCluster) -> None: + async def test_cluster_georadius_store_dist(self, r: ValkeyCluster) -> None: values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2208,12 +2213,12 @@ async def test_cluster_georadius_store_dist(self, r: RedisCluster) -> None: # instead of save the geo score, the distance is saved. assert await r.zscore("{foo}places_barcelona", "place1") == 88.05060698409301 - async def test_cluster_dbsize(self, r: RedisCluster) -> None: + async def test_cluster_dbsize(self, r: ValkeyCluster) -> None: d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} assert await r.mset_nonatomic(d) assert await r.dbsize(target_nodes="primaries") == len(d) - async def test_cluster_keys(self, r: RedisCluster) -> None: + async def test_cluster_keys(self, r: ValkeyCluster) -> None: assert await r.keys() == [] keys_with_underscores = {b"test_a", b"test_b"} keys = keys_with_underscores.union({b"testc"}) @@ -2227,7 +2232,7 @@ async def test_cluster_keys(self, r: RedisCluster) -> None: # SCAN COMMANDS @skip_if_server_version_lt("2.8.0") - async def test_cluster_scan(self, r: RedisCluster) -> None: + async def test_cluster_scan(self, r: ValkeyCluster) -> None: await r.set("a", 1) await r.set("b", 2) await r.set("c", 3) @@ -2246,7 +2251,7 @@ async def test_cluster_scan(self, r: RedisCluster) -> None: assert all(cursor == 0 for cursor in cursors.values()) @skip_if_server_version_lt("6.0.0") - async def test_cluster_scan_type(self, r: RedisCluster) -> None: + async def test_cluster_scan_type(self, r: ValkeyCluster) -> None: await r.sadd("a-set", 1) await r.sadd("b-set", 1) await r.sadd("c-set", 1) @@ -2269,7 +2274,7 @@ async def test_cluster_scan_type(self, r: RedisCluster) -> None: assert all(cursor == 0 for cursor in cursors.values()) @skip_if_server_version_lt("2.8.0") - async def test_cluster_scan_iter(self, r: RedisCluster) -> None: + async def test_cluster_scan_iter(self, r: ValkeyCluster) -> None: keys_all = [] keys_1 = [] for i in range(100): @@ -2290,7 +2295,7 @@ async def test_cluster_scan_iter(self, r: RedisCluster) -> None: ] assert sorted(keys) == keys_1 - async def test_cluster_randomkey(self, r: RedisCluster) -> None: + async def test_cluster_randomkey(self, r: ValkeyCluster) -> None: node = r.get_node_from_key("{foo}") assert await r.randomkey(target_nodes=node) is None for key in ("{foo}a", "{foo}b", "{foo}c"): @@ -2298,13 +2303,13 @@ async def test_cluster_randomkey(self, r: RedisCluster) -> None: assert await r.randomkey(target_nodes=node) in (b"{foo}a", b"{foo}b", b"{foo}c") @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_acl_log( - self, r: RedisCluster, create_redis: Callable[..., RedisCluster] + self, r: ValkeyCluster, create_valkey: Callable[..., ValkeyCluster] ) -> None: key = "{cache}:" node = r.get_node_from_key(key) - username = "redis-py-user" + username = "valkey-py-user" await r.acl_setuser( username, @@ -2317,8 +2322,8 @@ async def test_acl_log( ) await r.acl_log_reset(target_nodes=node) - user_client = await create_redis( - cls=RedisCluster, flushdb=False, username=username + user_client = await create_valkey( + cls=ValkeyCluster, flushdb=False, username=username ) # Valid operation and key @@ -2350,7 +2355,7 @@ class TestNodesManager: Tests for the NodesManager class """ - async def test_load_balancer(self, r: RedisCluster) -> None: + async def test_load_balancer(self, r: ValkeyCluster) -> None: n_manager = r.nodes_manager lb = n_manager.read_load_balancer slot_1 = 1257 @@ -2392,8 +2397,8 @@ async def test_init_slots_cache_not_all_slots_covered(self) -> None: [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]], [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]], ] - with pytest.raises(RedisClusterException) as ex: - rc = await get_mocked_redis_client( + with pytest.raises(ValkeyClusterException) as ex: + rc = await get_mocked_valkey_client( host=default_host, port=default_port, cluster_slots=cluster_slots, @@ -2416,7 +2421,7 @@ async def test_init_slots_cache_not_require_full_coverage_success(self) -> None: [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]], ] - rc = await get_mocked_redis_client( + rc = await get_mocked_valkey_client( host=default_host, port=default_port, cluster_slots=cluster_slots, @@ -2437,11 +2442,11 @@ async def test_init_slots_cache(self) -> None: [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.2", 7005]], ] - rc = await get_mocked_redis_client( + rc = await get_mocked_valkey_client( host=default_host, port=default_port, cluster_slots=good_slots_resp ) n_manager = rc.nodes_manager - assert len(n_manager.slots_cache) == REDIS_CLUSTER_HASH_SLOTS + assert len(n_manager.slots_cache) == VALKEY_CLUSTER_HASH_SLOTS for slot_info in good_slots_resp: all_hosts = ["127.0.0.1", "127.0.0.2"] all_ports = [7000, 7001, 7002, 7003, 7004, 7005] @@ -2460,11 +2465,11 @@ async def test_init_slots_cache(self) -> None: async def test_init_slots_cache_cluster_mode_disabled(self) -> None: """ - Test that creating a RedisCluster failes if one of the startup nodes + Test that creating a ValkeyCluster failes if one of the startup nodes has cluster mode disabled """ - with pytest.raises(RedisClusterException) as e: - rc = await get_mocked_redis_client( + with pytest.raises(ValkeyClusterException) as e: + rc = await get_mocked_valkey_client( cluster_slots_raise_error=True, host=default_host, port=default_port, @@ -2478,7 +2483,7 @@ async def test_empty_startup_nodes(self) -> None: It should not be possible to create a node manager with no nodes specified """ - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): await NodesManager([], False, {}).initialize() async def test_wrong_startup_nodes_type(self) -> None: @@ -2486,7 +2491,7 @@ async def test_wrong_startup_nodes_type(self) -> None: If something other then a list type itteratable is provided it should fail """ - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): await NodesManager({}, False, {}).initialize() async def test_init_slots_cache_slots_collision(self) -> None: @@ -2502,7 +2507,7 @@ async def test_init_slots_cache_slots_collision(self) -> None: async def mocked_execute_command(self, *args, **kwargs): """ Helper function to return custom slots cache data from - different redis nodes + different valkey nodes """ if self.port == 7000: result = [ @@ -2527,10 +2532,10 @@ async def mocked_execute_command(self, *args, **kwargs): execute_command.side_effect = mocked_execute_command - with pytest.raises(RedisClusterException) as ex: + with pytest.raises(ValkeyClusterException) as ex: node_1 = ClusterNode("127.0.0.1", 7000) node_2 = ClusterNode("127.0.0.1", 7001) - async with RedisCluster(startup_nodes=[node_1, node_2]): + async with ValkeyCluster(startup_nodes=[node_1, node_2]): ... assert str(ex.value).startswith( "startup_nodes could not agree on a valid slots cache" @@ -2543,7 +2548,7 @@ async def test_cluster_one_instance(self) -> None: """ node = ClusterNode(default_host, default_port) cluster_slots = [[0, 16383, ["", default_port]]] - rc = await get_mocked_redis_client( + rc = await get_mocked_valkey_client( startup_nodes=[node], cluster_slots=cluster_slots ) @@ -2553,8 +2558,8 @@ async def test_cluster_one_instance(self) -> None: assert n_node is not None assert n_node == node assert n_node.server_type == PRIMARY - assert len(n.slots_cache) == REDIS_CLUSTER_HASH_SLOTS - for i in range(0, REDIS_CLUSTER_HASH_SLOTS): + assert len(n.slots_cache) == VALKEY_CLUSTER_HASH_SLOTS + for i in range(0, VALKEY_CLUSTER_HASH_SLOTS): assert n.slots_cache[i] == [n_node] await rc.aclose() @@ -2589,10 +2594,10 @@ async def mocked_execute_command(self, *args, **kwargs): # If all startup nodes fail to connect, connection error should be # thrown - with pytest.raises(RedisClusterException) as e: - async with RedisCluster(startup_nodes=[node_1]): + with pytest.raises(ValkeyClusterException) as e: + async with ValkeyCluster(startup_nodes=[node_1]): ... - assert "Redis Cluster cannot be connected" in str(e.value) + assert "Valkey Cluster cannot be connected" in str(e.value) with mock.patch.object( AsyncCommandsParser, "initialize", autospec=True @@ -2613,7 +2618,7 @@ def cmd_init_mock(self, r: ClusterNode) -> None: cmd_parser_initialize.side_effect = cmd_init_mock # When at least one startup node is reachable, the cluster # initialization should succeeds - async with RedisCluster(startup_nodes=[node_1, node_2]) as rc: + async with ValkeyCluster(startup_nodes=[node_1, node_2]) as rc: assert rc.get_node(host=default_host, port=7001) is not None assert rc.get_node(host=default_host, port=7002) is not None @@ -2621,19 +2626,19 @@ def cmd_init_mock(self, r: ClusterNode) -> None: class TestClusterPipeline: """Tests for the ClusterPipeline class.""" - async def test_blocked_arguments(self, r: RedisCluster) -> None: + async def test_blocked_arguments(self, r: ValkeyCluster) -> None: """Test handling for blocked pipeline arguments.""" - with pytest.raises(RedisClusterException) as ex: + with pytest.raises(ValkeyClusterException) as ex: r.pipeline(transaction=True) assert str(ex.value) == "transaction is deprecated in cluster mode" - with pytest.raises(RedisClusterException) as ex: + with pytest.raises(ValkeyClusterException) as ex: r.pipeline(shard_hint=True) assert str(ex.value) == "shard_hint is deprecated in cluster mode" - async def test_blocked_methods(self, r: RedisCluster) -> None: + async def test_blocked_methods(self, r: ValkeyCluster) -> None: """Test handling for blocked pipeline commands.""" pipeline = r.pipeline() for command in PIPELINE_BLOCKED_COMMANDS: @@ -2641,22 +2646,22 @@ async def test_blocked_methods(self, r: RedisCluster) -> None: if command == "mset_nonatomic": continue - with pytest.raises(RedisClusterException) as exc: + with pytest.raises(ValkeyClusterException) as exc: getattr(pipeline, command)() assert str(exc.value) == ( f"ERROR: Calling pipelined function {command} is blocked " - "when running redis in cluster mode..." + "when running valkey in cluster mode..." ) - async def test_empty_stack(self, r: RedisCluster) -> None: + async def test_empty_stack(self, r: ValkeyCluster) -> None: """If a pipeline is executed with no commands it should return a empty list.""" p = r.pipeline() result = await p.execute() assert result == [] - async def test_redis_cluster_pipeline(self, r: RedisCluster) -> None: - """Test that we can use a pipeline with the RedisCluster class""" + async def test_valkey_cluster_pipeline(self, r: ValkeyCluster) -> None: + """Test that we can use a pipeline with the ValkeyCluster class""" result = await ( r.pipeline() .set("A", 1) @@ -2672,7 +2677,7 @@ async def test_redis_cluster_pipeline(self, r: RedisCluster) -> None: assert result == [True, b"1", 1, {b"F": b"V"}, True, True, b"2", b"3", 1, 1, 1] async def test_multi_key_operation_with_a_single_slot( - self, r: RedisCluster + self, r: ValkeyCluster ) -> None: """Test multi key operation with a single slot.""" pipe = r.pipeline() @@ -2686,7 +2691,7 @@ async def test_multi_key_operation_with_a_single_slot( res = await pipe.execute() assert res == [True, True, True, b"1", b"2", b"3"] - async def test_multi_key_operation_with_multi_slots(self, r: RedisCluster) -> None: + async def test_multi_key_operation_with_multi_slots(self, r: ValkeyCluster) -> None: """Test multi key operation with more than one slot.""" pipe = r.pipeline() pipe.set("a{foo}", 1) @@ -2702,7 +2707,7 @@ async def test_multi_key_operation_with_multi_slots(self, r: RedisCluster) -> No res = await pipe.execute() assert res == [True, True, True, True, True, b"1", b"2", b"3", b"4", b"5"] - async def test_cluster_down_error(self, r: RedisCluster) -> None: + async def test_cluster_down_error(self, r: ValkeyCluster) -> None: """ Test that the pipeline retries cluster_error_retry_attempts times before raising an error. @@ -2734,7 +2739,7 @@ async def parse_response( == 3 * r.cluster_error_retry_attempts - 2 ) - async def test_connection_error_not_raised(self, r: RedisCluster) -> None: + async def test_connection_error_not_raised(self, r: ValkeyCluster) -> None: """Test ConnectionError handling with raise_on_error=False.""" key = "foo" node = r.get_node_from_key(key, False) @@ -2758,7 +2763,7 @@ async def parse_response( assert node.parse_response.await_count assert isinstance(res[0], ConnectionError) - async def test_connection_error_raised(self, r: RedisCluster) -> None: + async def test_connection_error_raised(self, r: ValkeyCluster) -> None: """Test ConnectionError handling with raise_on_error=True.""" key = "foo" node = r.get_node_from_key(key, False) @@ -2781,7 +2786,7 @@ async def parse_response( with pytest.raises(ConnectionError): await pipe.get(key).get(key).execute(raise_on_error=True) - async def test_asking_error(self, r: RedisCluster) -> None: + async def test_asking_error(self, r: ValkeyCluster) -> None: """Test AskError handling.""" key = "foo" first_node = r.get_node_from_key(key, False) @@ -2802,7 +2807,7 @@ async def test_asking_error(self, r: RedisCluster) -> None: @skip_if_server_version_gte("7.0.0") async def test_moved_redirection_on_slave_with_default( - self, r: RedisCluster + self, r: ValkeyCluster ) -> None: """Test MovedError handling.""" key = "foo" @@ -2838,7 +2843,7 @@ async def parse_response( assert await readwrite_pipe.execute() == [b"bar", b"bar"] async def test_readonly_pipeline_from_readonly_client( - self, r: RedisCluster + self, r: ValkeyCluster ) -> None: """Test that the pipeline uses replicas for read_from_replicas clients.""" # Create a cluster with reading from replications @@ -2858,20 +2863,20 @@ async def test_readonly_pipeline_from_readonly_client( break assert executed_on_replica - async def test_can_run_concurrent_pipelines(self, r: RedisCluster) -> None: + async def test_can_run_concurrent_pipelines(self, r: ValkeyCluster) -> None: """Test that the pipeline can be used concurrently.""" await asyncio.gather( - *(self.test_redis_cluster_pipeline(r) for i in range(100)), + *(self.test_valkey_cluster_pipeline(r) for i in range(100)), *(self.test_multi_key_operation_with_a_single_slot(r) for i in range(100)), *(self.test_multi_key_operation_with_multi_slots(r) for i in range(100)), ) @pytest.mark.onlycluster - async def test_pipeline_with_default_node_error_command(self, create_redis): + async def test_pipeline_with_default_node_error_command(self, create_valkey): """ Test that the default node is being replaced when it raises a relevant exception """ - r = await create_redis(cls=RedisCluster, flushdb=False) + r = await create_valkey(cls=ValkeyCluster, flushdb=False) curr_default_node = r.get_default_node() err = ConnectionError("error") cmd_count = await r.command_count() @@ -2891,7 +2896,7 @@ class TestSSL: """ Tests for SSL connections. - This relies on the --redis-ssl-url for building the client and connecting to the + This relies on the --valkey-ssl-url for building the client and connecting to the appropriate port. """ @@ -2900,11 +2905,11 @@ class TestSSL: CLIENT_KEY = get_ssl_filename("client-key.pem") @pytest_asyncio.fixture() - def create_client(self, request: FixtureRequest) -> Callable[..., RedisCluster]: - ssl_url = request.config.option.redis_ssl_url + def create_client(self, request: FixtureRequest) -> Callable[..., ValkeyCluster]: + ssl_url = request.config.option.valkey_ssl_url ssl_host, ssl_port = urlparse(ssl_url)[1].split(":") - async def _create_client(mocked: bool = True, **kwargs: Any) -> RedisCluster: + async def _create_client(mocked: bool = True, **kwargs: Any) -> ValkeyCluster: if mocked: with mock.patch.object( ClusterNode, "execute_command", autospec=True @@ -2930,35 +2935,35 @@ async def execute_command(self, *args, **kwargs): execute_command_mock.side_effect = execute_command - rc = await RedisCluster(host=ssl_host, port=ssl_port, **kwargs) + rc = await ValkeyCluster(host=ssl_host, port=ssl_port, **kwargs) assert len(rc.get_nodes()) == 1 node = rc.get_default_node() assert node.port == int(ssl_port) return rc - return await RedisCluster(host=ssl_host, port=ssl_port, **kwargs) + return await ValkeyCluster(host=ssl_host, port=ssl_port, **kwargs) return _create_client async def test_ssl_connection_without_ssl( - self, create_client: Callable[..., Awaitable[RedisCluster]] + self, create_client: Callable[..., Awaitable[ValkeyCluster]] ) -> None: - with pytest.raises(RedisClusterException) as e: + with pytest.raises(ValkeyClusterException) as e: await create_client(mocked=False, ssl=False) e = e.value.__cause__ assert "Connection closed by server" in str(e) async def test_ssl_with_invalid_cert( - self, create_client: Callable[..., Awaitable[RedisCluster]] + self, create_client: Callable[..., Awaitable[ValkeyCluster]] ) -> None: - with pytest.raises(RedisClusterException) as e: + with pytest.raises(ValkeyClusterException) as e: await create_client(mocked=False, ssl=True) e = e.value.__cause__.__context__ assert "SSL: CERTIFICATE_VERIFY_FAILED" in str(e) async def test_ssl_connection( - self, create_client: Callable[..., Awaitable[RedisCluster]] + self, create_client: Callable[..., Awaitable[ValkeyCluster]] ) -> None: async with await create_client(ssl=True, ssl_cert_reqs="none") as rc: assert await rc.ping() @@ -2972,7 +2977,7 @@ async def test_ssl_connection( ], ) async def test_ssl_connection_tls12_custom_ciphers( - self, ssl_ciphers, create_client: Callable[..., Awaitable[RedisCluster]] + self, ssl_ciphers, create_client: Callable[..., Awaitable[ValkeyCluster]] ) -> None: async with await create_client( ssl=True, @@ -2983,7 +2988,7 @@ async def test_ssl_connection_tls12_custom_ciphers( assert await rc.ping() async def test_ssl_connection_tls12_custom_ciphers_invalid( - self, create_client: Callable[..., Awaitable[RedisCluster]] + self, create_client: Callable[..., Awaitable[ValkeyCluster]] ) -> None: async with await create_client( ssl=True, @@ -2991,9 +2996,9 @@ async def test_ssl_connection_tls12_custom_ciphers_invalid( ssl_min_version=ssl.TLSVersion.TLSv1_2, ssl_ciphers="foo:bar", ) as rc: - with pytest.raises(RedisClusterException) as e: + with pytest.raises(ValkeyClusterException) as e: assert await rc.ping() - assert "Redis Cluster cannot be connected" in str(e.value) + assert "Valkey Cluster cannot be connected" in str(e.value) @pytest.mark.parametrize( "ssl_ciphers", @@ -3003,7 +3008,7 @@ async def test_ssl_connection_tls12_custom_ciphers_invalid( ], ) async def test_ssl_connection_tls13_custom_ciphers( - self, ssl_ciphers, create_client: Callable[..., Awaitable[RedisCluster]] + self, ssl_ciphers, create_client: Callable[..., Awaitable[ValkeyCluster]] ) -> None: # TLSv1.3 does not support changing the ciphers async with await create_client( @@ -3012,12 +3017,12 @@ async def test_ssl_connection_tls13_custom_ciphers( ssl_min_version=ssl.TLSVersion.TLSv1_2, ssl_ciphers=ssl_ciphers, ) as rc: - with pytest.raises(RedisClusterException) as e: + with pytest.raises(ValkeyClusterException) as e: assert await rc.ping() - assert "Redis Cluster cannot be connected" in str(e.value) + assert "Valkey Cluster cannot be connected" in str(e.value) async def test_validating_self_signed_certificate( - self, create_client: Callable[..., Awaitable[RedisCluster]] + self, create_client: Callable[..., Awaitable[ValkeyCluster]] ) -> None: async with await create_client( ssl=True, @@ -3029,7 +3034,7 @@ async def test_validating_self_signed_certificate( assert await rc.ping() async def test_validating_self_signed_string_certificate( - self, create_client: Callable[..., Awaitable[RedisCluster]] + self, create_client: Callable[..., Awaitable[ValkeyCluster]] ) -> None: with open(self.CA_CERT) as f: cert_data = f.read() diff --git a/tests/test_asyncio/test_commands.py b/tests/test_asyncio/test_commands.py index b3e42bae..c1a03c11 100644 --- a/tests/test_asyncio/test_commands.py +++ b/tests/test_asyncio/test_commands.py @@ -11,15 +11,7 @@ import pytest import pytest_asyncio -import redis -from redis import exceptions -from redis._parsers.helpers import ( - _RedisCallbacks, - _RedisCallbacksRESP2, - _RedisCallbacksRESP3, - parse_info, -) -from redis.client import EMPTY_RESPONSE, NEVER_DECODE +import valkey from tests.conftest import ( assert_resp_response, assert_resp_response_in, @@ -28,17 +20,25 @@ skip_if_server_version_lt, skip_unless_arch_bits, ) +from valkey import exceptions +from valkey._parsers.helpers import ( + _ValkeyCallbacks, + _ValkeyCallbacksRESP2, + _ValkeyCallbacksRESP3, + parse_info, +) +from valkey.client import EMPTY_RESPONSE, NEVER_DECODE if sys.version_info >= (3, 11, 3): from asyncio import timeout as async_timeout else: from async_timeout import timeout as async_timeout -REDIS_6_VERSION = "5.9.0" +VALKEY_6_VERSION = "5.9.0" @pytest_asyncio.fixture() -async def r_teardown(r: redis.Redis): +async def r_teardown(r: valkey.Valkey): """ A special fixture which removes the provided names from the database after use """ @@ -54,7 +54,7 @@ def factory(username): @pytest_asyncio.fixture() -async def slowlog(r: redis.Redis): +async def slowlog(r: valkey.Valkey): current_config = await r.config_get() old_slower_than_value = current_config["slowlog-log-slower-than"] old_max_legnth_value = current_config["slowlog-max-len"] @@ -68,13 +68,13 @@ async def slowlog(r: redis.Redis): await r.config_set("slowlog-max-len", old_max_legnth_value) -async def redis_server_time(client: redis.Redis): +async def valkey_server_time(client: valkey.Valkey): seconds, milliseconds = await client.time() timestamp = float(f"{seconds}.{milliseconds}") return datetime.datetime.fromtimestamp(timestamp) -async def get_stream_message(client: redis.Redis, stream: str, message_id: str): +async def get_stream_message(client: valkey.Valkey, stream: str, message_id: str): """Fetch a stream message and format it as a (message_id, fields) pair""" response = await client.xrange(stream, min=message_id, max=message_id) assert len(response) == 1 @@ -86,58 +86,58 @@ async def get_stream_message(client: redis.Redis, stream: str, message_id: str): class TestResponseCallbacks: """Tests for the response callback system""" - async def test_response_callbacks(self, r: redis.Redis): - callbacks = _RedisCallbacks + async def test_response_callbacks(self, r: valkey.Valkey): + callbacks = _ValkeyCallbacks if is_resp2_connection(r): - callbacks.update(_RedisCallbacksRESP2) + callbacks.update(_ValkeyCallbacksRESP2) else: - callbacks.update(_RedisCallbacksRESP3) + callbacks.update(_ValkeyCallbacksRESP3) assert r.response_callbacks == callbacks - assert id(r.response_callbacks) != id(_RedisCallbacks) + assert id(r.response_callbacks) != id(_ValkeyCallbacks) r.set_response_callback("GET", lambda x: "static") await r.set("a", "foo") assert await r.get("a") == "static" - async def test_case_insensitive_command_names(self, r: redis.Redis): + async def test_case_insensitive_command_names(self, r: valkey.Valkey): assert r.response_callbacks["ping"] == r.response_callbacks["PING"] -class TestRedisCommands: - async def test_command_on_invalid_key_type(self, r: redis.Redis): +class TestValkeyCommands: + async def test_command_on_invalid_key_type(self, r: valkey.Valkey): await r.lpush("a", "1") - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): await r.get("a") # SERVER INFORMATION - @skip_if_server_version_lt(REDIS_6_VERSION) - async def test_acl_cat_no_category(self, r: redis.Redis): + @skip_if_server_version_lt(VALKEY_6_VERSION) + async def test_acl_cat_no_category(self, r: valkey.Valkey): categories = await r.acl_cat() assert isinstance(categories, list) assert "read" in categories or b"read" in categories - @skip_if_server_version_lt(REDIS_6_VERSION) - async def test_acl_cat_with_category(self, r: redis.Redis): + @skip_if_server_version_lt(VALKEY_6_VERSION) + async def test_acl_cat_with_category(self, r: valkey.Valkey): commands = await r.acl_cat("read") assert isinstance(commands, list) assert "get" in commands or b"get" in commands - @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_if_server_version_lt(VALKEY_6_VERSION) async def test_acl_deluser(self, r_teardown): - username = "redis-py-user" + username = "valkey-py-user" r = r_teardown(username) assert await r.acl_deluser(username) == 0 assert await r.acl_setuser(username, enabled=False, reset=True) assert await r.acl_deluser(username) == 1 - @skip_if_server_version_lt(REDIS_6_VERSION) - async def test_acl_genpass(self, r: redis.Redis): + @skip_if_server_version_lt(VALKEY_6_VERSION) + async def test_acl_genpass(self, r: valkey.Valkey): password = await r.acl_genpass() assert isinstance(password, (str, bytes)) @skip_if_server_version_lt("7.0.0") async def test_acl_getuser_setuser(self, r_teardown): - username = "redis-py-user" + username = "valkey-py-user" r = r_teardown(username) # test enabled=False assert await r.acl_setuser(username, enabled=False, reset=True) @@ -235,19 +235,19 @@ async def test_acl_getuser_setuser(self, r_teardown): ) assert len((await r.acl_getuser(username))["passwords"]) == 1 - @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_if_server_version_lt(VALKEY_6_VERSION) async def test_acl_list(self, r_teardown): - username = "redis-py-user" + username = "valkey-py-user" r = r_teardown(username) start = await r.acl_list() assert await r.acl_setuser(username, enabled=False, reset=True) users = await r.acl_list() assert len(users) == len(start) + 1 - @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_if_server_version_lt(VALKEY_6_VERSION) @pytest.mark.onlynoncluster - async def test_acl_log(self, r_teardown, create_redis): - username = "redis-py-user" + async def test_acl_log(self, r_teardown, create_valkey): + username = "valkey-py-user" r = r_teardown(username) await r.acl_setuser( username, @@ -259,7 +259,7 @@ async def test_acl_log(self, r_teardown, create_redis): ) await r.acl_log_reset() - user_client = await create_redis(username=username) + user_client = await create_valkey(username=username) # Valid operation and key assert await user_client.set("cache:0", 1) @@ -281,50 +281,50 @@ async def test_acl_log(self, r_teardown, create_redis): assert_resp_response_in(r, "client-info", expected, expected.keys()) assert await r.acl_log_reset() - @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_if_server_version_lt(VALKEY_6_VERSION) async def test_acl_setuser_categories_without_prefix_fails(self, r_teardown): - username = "redis-py-user" + username = "valkey-py-user" r = r_teardown(username) with pytest.raises(exceptions.DataError): await r.acl_setuser(username, categories=["list"]) - @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_if_server_version_lt(VALKEY_6_VERSION) async def test_acl_setuser_commands_without_prefix_fails(self, r_teardown): - username = "redis-py-user" + username = "valkey-py-user" r = r_teardown(username) with pytest.raises(exceptions.DataError): await r.acl_setuser(username, commands=["get"]) - @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_if_server_version_lt(VALKEY_6_VERSION) async def test_acl_setuser_add_passwords_and_nopass_fails(self, r_teardown): - username = "redis-py-user" + username = "valkey-py-user" r = r_teardown(username) with pytest.raises(exceptions.DataError): await r.acl_setuser(username, passwords="+mypass", nopass=True) - @skip_if_server_version_lt(REDIS_6_VERSION) - async def test_acl_users(self, r: redis.Redis): + @skip_if_server_version_lt(VALKEY_6_VERSION) + async def test_acl_users(self, r: valkey.Valkey): users = await r.acl_users() assert isinstance(users, list) assert len(users) > 0 - @skip_if_server_version_lt(REDIS_6_VERSION) - async def test_acl_whoami(self, r: redis.Redis): + @skip_if_server_version_lt(VALKEY_6_VERSION) + async def test_acl_whoami(self, r: valkey.Valkey): username = await r.acl_whoami() assert isinstance(username, (str, bytes)) @pytest.mark.onlynoncluster - async def test_client_list(self, r: redis.Redis): + async def test_client_list(self, r: valkey.Valkey): clients = await r.client_list() assert isinstance(clients[0], dict) assert "addr" in clients[0] @skip_if_server_version_lt("5.0.0") - async def test_client_list_type(self, r: redis.Redis): - with pytest.raises(exceptions.RedisError): + async def test_client_list_type(self, r: valkey.Valkey): + with pytest.raises(exceptions.ValkeyError): await r.client_list(_type="not a client type") for client_type in ["normal", "master", "replica", "pubsub"]: clients = await r.client_list(_type=client_type) @@ -332,12 +332,12 @@ async def test_client_list_type(self, r: redis.Redis): @skip_if_server_version_lt("5.0.0") @pytest.mark.onlynoncluster - async def test_client_id(self, r: redis.Redis): + async def test_client_id(self, r: valkey.Valkey): assert await r.client_id() > 0 @skip_if_server_version_lt("5.0.0") @pytest.mark.onlynoncluster - async def test_client_unblock(self, r: redis.Redis): + async def test_client_unblock(self, r: valkey.Valkey): myid = await r.client_id() assert not await r.client_unblock(myid) assert not await r.client_unblock(myid, error=True) @@ -345,34 +345,34 @@ async def test_client_unblock(self, r: redis.Redis): @skip_if_server_version_lt("2.6.9") @pytest.mark.onlynoncluster - async def test_client_getname(self, r: redis.Redis): + async def test_client_getname(self, r: valkey.Valkey): assert await r.client_getname() is None @skip_if_server_version_lt("2.6.9") @pytest.mark.onlynoncluster - async def test_client_setname(self, r: redis.Redis): - assert await r.client_setname("redis_py_test") + async def test_client_setname(self, r: valkey.Valkey): + assert await r.client_setname("valkey_py_test") assert_resp_response( - r, await r.client_getname(), "redis_py_test", b"redis_py_test" + r, await r.client_getname(), "valkey_py_test", b"valkey_py_test" ) @skip_if_server_version_lt("7.2.0") - async def test_client_setinfo(self, r: redis.Redis): + async def test_client_setinfo(self, r: valkey.Valkey): await r.ping() info = await r.client_info() - assert info["lib-name"] == "redis-py" - assert info["lib-ver"] == redis.__version__ + assert info["lib-name"] == "valkey-py" + assert info["lib-ver"] == valkey.__version__ assert await r.client_setinfo("lib-name", "test") assert await r.client_setinfo("lib-ver", "123") info = await r.client_info() assert info["lib-name"] == "test" assert info["lib-ver"] == "123" - r2 = redis.asyncio.Redis(lib_name="test2", lib_version="1234") + r2 = valkey.asyncio.Valkey(lib_name="test2", lib_version="1234") info = await r2.client_info() assert info["lib-name"] == "test2" assert info["lib-ver"] == "1234" await r2.aclose() - r3 = redis.asyncio.Redis(lib_name=None, lib_version=None) + r3 = valkey.asyncio.Valkey(lib_name=None, lib_version=None) info = await r3.client_info() assert info["lib-name"] == "" assert info["lib-ver"] == "" @@ -380,31 +380,31 @@ async def test_client_setinfo(self, r: redis.Redis): @skip_if_server_version_lt("2.6.9") @pytest.mark.onlynoncluster - async def test_client_kill(self, r: redis.Redis, r2): - await r.client_setname("redis-py-c1") - await r2.client_setname("redis-py-c2") + async def test_client_kill(self, r: valkey.Valkey, r2): + await r.client_setname("valkey-py-c1") + await r2.client_setname("valkey-py-c2") clients = [ client for client in await r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} - client_addr = clients_by_name["redis-py-c2"].get("addr") + client_addr = clients_by_name["valkey-py-c2"].get("addr") assert await r.client_kill(client_addr) is True clients = [ client for client in await r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 1 - assert clients[0].get("name") == "redis-py-c1" + assert clients[0].get("name") == "valkey-py-c1" @skip_if_server_version_lt("2.8.12") - async def test_client_kill_filter_invalid_params(self, r: redis.Redis): + async def test_client_kill_filter_invalid_params(self, r: valkey.Valkey): # empty with pytest.raises(exceptions.DataError): await r.client_kill_filter() @@ -419,86 +419,86 @@ async def test_client_kill_filter_invalid_params(self, r: redis.Redis): @skip_if_server_version_lt("2.8.12") @pytest.mark.onlynoncluster - async def test_client_kill_filter_by_id(self, r: redis.Redis, r2): - await r.client_setname("redis-py-c1") - await r2.client_setname("redis-py-c2") + async def test_client_kill_filter_by_id(self, r: valkey.Valkey, r2): + await r.client_setname("valkey-py-c1") + await r2.client_setname("valkey-py-c2") clients = [ client for client in await r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} - client_2_id = clients_by_name["redis-py-c2"].get("id") + client_2_id = clients_by_name["valkey-py-c2"].get("id") resp = await r.client_kill_filter(_id=client_2_id) assert resp == 1 clients = [ client for client in await r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 1 - assert clients[0].get("name") == "redis-py-c1" + assert clients[0].get("name") == "valkey-py-c1" @skip_if_server_version_lt("2.8.12") @pytest.mark.onlynoncluster - async def test_client_kill_filter_by_addr(self, r: redis.Redis, r2): - await r.client_setname("redis-py-c1") - await r2.client_setname("redis-py-c2") + async def test_client_kill_filter_by_addr(self, r: valkey.Valkey, r2): + await r.client_setname("valkey-py-c1") + await r2.client_setname("valkey-py-c2") clients = [ client for client in await r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} - client_2_addr = clients_by_name["redis-py-c2"].get("addr") + client_2_addr = clients_by_name["valkey-py-c2"].get("addr") resp = await r.client_kill_filter(addr=client_2_addr) assert resp == 1 clients = [ client for client in await r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 1 - assert clients[0].get("name") == "redis-py-c1" + assert clients[0].get("name") == "valkey-py-c1" @skip_if_server_version_lt("2.6.9") - async def test_client_list_after_client_setname(self, r: redis.Redis): - await r.client_setname("redis_py_test") + async def test_client_list_after_client_setname(self, r: valkey.Valkey): + await r.client_setname("valkey_py_test") clients = await r.client_list() # we don't know which client ours will be - assert "redis_py_test" in [c["name"] for c in clients] + assert "valkey_py_test" in [c["name"] for c in clients] @skip_if_server_version_lt("2.9.50") @pytest.mark.onlynoncluster - async def test_client_pause(self, r: redis.Redis): + async def test_client_pause(self, r: valkey.Valkey): assert await r.client_pause(1) assert await r.client_pause(timeout=1) - with pytest.raises(exceptions.RedisError): + with pytest.raises(exceptions.ValkeyError): await r.client_pause(timeout="not an integer") @skip_if_server_version_lt("7.2.0") @pytest.mark.onlynoncluster - async def test_client_no_touch(self, r: redis.Redis): + async def test_client_no_touch(self, r: valkey.Valkey): assert await r.client_no_touch("ON") == b"OK" assert await r.client_no_touch("OFF") == b"OK" with pytest.raises(TypeError): await r.client_no_touch() - async def test_config_get(self, r: redis.Redis): + async def test_config_get(self, r: valkey.Valkey): data = await r.config_get() assert "maxmemory" in data assert data["maxmemory"].isdigit() @pytest.mark.onlynoncluster - async def test_config_resetstat(self, r: redis.Redis): + async def test_config_resetstat(self, r: valkey.Valkey): await r.ping() prior_commands_processed = int((await r.info())["total_commands_processed"]) assert prior_commands_processed >= 1 @@ -506,47 +506,47 @@ async def test_config_resetstat(self, r: redis.Redis): reset_commands_processed = int((await r.info())["total_commands_processed"]) assert reset_commands_processed < prior_commands_processed - async def test_config_set(self, r: redis.Redis): + async def test_config_set(self, r: valkey.Valkey): await r.config_set("timeout", 70) assert (await r.config_get())["timeout"] == "70" assert await r.config_set("timeout", 0) assert (await r.config_get())["timeout"] == "0" @pytest.mark.onlynoncluster - async def test_dbsize(self, r: redis.Redis): + async def test_dbsize(self, r: valkey.Valkey): await r.set("a", "foo") await r.set("b", "bar") assert await r.dbsize() == 2 @pytest.mark.onlynoncluster - async def test_echo(self, r: redis.Redis): + async def test_echo(self, r: valkey.Valkey): assert await r.echo("foo bar") == b"foo bar" @pytest.mark.onlynoncluster - async def test_info(self, r: redis.Redis): + async def test_info(self, r: valkey.Valkey): await r.set("a", "foo") await r.set("b", "bar") info = await r.info() assert isinstance(info, dict) assert "arch_bits" in info.keys() - assert "redis_version" in info.keys() + assert "valkey_version" in info.keys() @pytest.mark.onlynoncluster - async def test_lastsave(self, r: redis.Redis): + async def test_lastsave(self, r: valkey.Valkey): assert isinstance(await r.lastsave(), datetime.datetime) - async def test_object(self, r: redis.Redis): + async def test_object(self, r: valkey.Valkey): await r.set("a", "foo") assert isinstance(await r.object("refcount", "a"), int) assert isinstance(await r.object("idletime", "a"), int) assert await r.object("encoding", "a") in (b"raw", b"embstr") assert await r.object("idletime", "invalid-key") is None - async def test_ping(self, r: redis.Redis): + async def test_ping(self, r: valkey.Valkey): assert await r.ping() @pytest.mark.onlynoncluster - async def test_slowlog_get(self, r: redis.Redis, slowlog): + async def test_slowlog_get(self, r: valkey.Valkey, slowlog): assert await r.slowlog_reset() unicode_string = chr(3456) + "abcd" + chr(3421) await r.get(unicode_string) @@ -568,7 +568,7 @@ async def test_slowlog_get(self, r: redis.Redis, slowlog): assert isinstance(slowlog[0]["duration"], int) @pytest.mark.onlynoncluster - async def test_slowlog_get_limit(self, r: redis.Redis, slowlog): + async def test_slowlog_get_limit(self, r: valkey.Valkey, slowlog): assert await r.slowlog_reset() await r.get("foo") slowlog = await r.slowlog_get(1) @@ -577,36 +577,36 @@ async def test_slowlog_get_limit(self, r: redis.Redis, slowlog): assert len(slowlog) == 1 @pytest.mark.onlynoncluster - async def test_slowlog_length(self, r: redis.Redis, slowlog): + async def test_slowlog_length(self, r: valkey.Valkey, slowlog): await r.get("foo") assert isinstance(await r.slowlog_len(), int) @skip_if_server_version_lt("2.6.0") - async def test_time(self, r: redis.Redis): + async def test_time(self, r: valkey.Valkey): t = await r.time() assert len(t) == 2 assert isinstance(t[0], int) assert isinstance(t[1], int) - async def test_never_decode_option(self, r: redis.Redis): + async def test_never_decode_option(self, r: valkey.Valkey): opts = {NEVER_DECODE: []} await r.delete("a") assert await r.execute_command("EXISTS", "a", **opts) == 0 - async def test_empty_response_option(self, r: redis.Redis): + async def test_empty_response_option(self, r: valkey.Valkey): opts = {EMPTY_RESPONSE: []} await r.delete("a") assert await r.execute_command("EXISTS", "a", **opts) == 0 # BASIC KEY COMMANDS - async def test_append(self, r: redis.Redis): + async def test_append(self, r: valkey.Valkey): assert await r.append("a", "a1") == 2 assert await r.get("a") == b"a1" assert await r.append("a", "a2") == 4 assert await r.get("a") == b"a1a2" @skip_if_server_version_lt("2.6.0") - async def test_bitcount(self, r: redis.Redis): + async def test_bitcount(self, r: valkey.Valkey): await r.setbit("a", 5, True) assert await r.bitcount("a") == 1 await r.setbit("a", 6, True) @@ -626,14 +626,14 @@ async def test_bitcount(self, r: redis.Redis): @skip_if_server_version_lt("2.6.0") @pytest.mark.onlynoncluster - async def test_bitop_not_empty_string(self, r: redis.Redis): + async def test_bitop_not_empty_string(self, r: valkey.Valkey): await r.set("a", "") await r.bitop("not", "r", "a") assert await r.get("r") is None @skip_if_server_version_lt("2.6.0") @pytest.mark.onlynoncluster - async def test_bitop_not(self, r: redis.Redis): + async def test_bitop_not(self, r: valkey.Valkey): test_str = b"\xAA\x00\xFF\x55" correct = ~0xAA00FF55 & 0xFFFFFFFF await r.set("a", test_str) @@ -642,7 +642,7 @@ async def test_bitop_not(self, r: redis.Redis): @skip_if_server_version_lt("2.6.0") @pytest.mark.onlynoncluster - async def test_bitop_not_in_place(self, r: redis.Redis): + async def test_bitop_not_in_place(self, r: valkey.Valkey): test_str = b"\xAA\x00\xFF\x55" correct = ~0xAA00FF55 & 0xFFFFFFFF await r.set("a", test_str) @@ -651,7 +651,7 @@ async def test_bitop_not_in_place(self, r: redis.Redis): @skip_if_server_version_lt("2.6.0") @pytest.mark.onlynoncluster - async def test_bitop_single_string(self, r: redis.Redis): + async def test_bitop_single_string(self, r: valkey.Valkey): test_str = b"\x01\x02\xFF" await r.set("a", test_str) await r.bitop("and", "res1", "a") @@ -663,7 +663,7 @@ async def test_bitop_single_string(self, r: redis.Redis): @skip_if_server_version_lt("2.6.0") @pytest.mark.onlynoncluster - async def test_bitop_string_operands(self, r: redis.Redis): + async def test_bitop_string_operands(self, r: valkey.Valkey): await r.set("a", b"\x01\x02\xFF\xFF") await r.set("b", b"\x01\x02\xFF") await r.bitop("and", "res1", "a", "b") @@ -675,7 +675,7 @@ async def test_bitop_string_operands(self, r: redis.Redis): @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.8.7") - async def test_bitpos(self, r: redis.Redis): + async def test_bitpos(self, r: valkey.Valkey): key = "key:bitpos" await r.set(key, b"\xff\xf0\x00") assert await r.bitpos(key, 0) == 12 @@ -688,15 +688,15 @@ async def test_bitpos(self, r: redis.Redis): assert await r.bitpos(key, 1) == -1 @skip_if_server_version_lt("2.8.7") - async def test_bitpos_wrong_arguments(self, r: redis.Redis): + async def test_bitpos_wrong_arguments(self, r: valkey.Valkey): key = "key:bitpos:wrong:args" await r.set(key, b"\xff\xf0\x00") - with pytest.raises(exceptions.RedisError): + with pytest.raises(exceptions.ValkeyError): await r.bitpos(key, 0, end=1) == 12 - with pytest.raises(exceptions.RedisError): + with pytest.raises(exceptions.ValkeyError): await r.bitpos(key, 7) == 12 - async def test_decr(self, r: redis.Redis): + async def test_decr(self, r: valkey.Valkey): assert await r.decr("a") == -1 assert await r.get("a") == b"-1" assert await r.decr("a") == -2 @@ -704,37 +704,37 @@ async def test_decr(self, r: redis.Redis): assert await r.decr("a", amount=5) == -7 assert await r.get("a") == b"-7" - async def test_decrby(self, r: redis.Redis): + async def test_decrby(self, r: valkey.Valkey): assert await r.decrby("a", amount=2) == -2 assert await r.decrby("a", amount=3) == -5 assert await r.get("a") == b"-5" - async def test_delete(self, r: redis.Redis): + async def test_delete(self, r: valkey.Valkey): assert await r.delete("a") == 0 await r.set("a", "foo") assert await r.delete("a") == 1 - async def test_delete_with_multiple_keys(self, r: redis.Redis): + async def test_delete_with_multiple_keys(self, r: valkey.Valkey): await r.set("a", "foo") await r.set("b", "bar") assert await r.delete("a", "b") == 2 assert await r.get("a") is None assert await r.get("b") is None - async def test_delitem(self, r: redis.Redis): + async def test_delitem(self, r: valkey.Valkey): await r.set("a", "foo") await r.delete("a") assert await r.get("a") is None @skip_if_server_version_lt("4.0.0") - async def test_unlink(self, r: redis.Redis): + async def test_unlink(self, r: valkey.Valkey): assert await r.unlink("a") == 0 await r.set("a", "foo") assert await r.unlink("a") == 1 assert await r.get("a") is None @skip_if_server_version_lt("4.0.0") - async def test_unlink_with_multiple_keys(self, r: redis.Redis): + async def test_unlink_with_multiple_keys(self, r: valkey.Valkey): await r.set("a", "foo") await r.set("b", "bar") assert await r.unlink("a", "b") == 2 @@ -742,7 +742,7 @@ async def test_unlink_with_multiple_keys(self, r: redis.Redis): assert await r.get("b") is None @skip_if_server_version_lt("2.6.0") - async def test_dump_and_restore(self, r: redis.Redis): + async def test_dump_and_restore(self, r: valkey.Valkey): await r.set("a", "foo") dumped = await r.dump("a") await r.delete("a") @@ -750,41 +750,41 @@ async def test_dump_and_restore(self, r: redis.Redis): assert await r.get("a") == b"foo" @skip_if_server_version_lt("3.0.0") - async def test_dump_and_restore_and_replace(self, r: redis.Redis): + async def test_dump_and_restore_and_replace(self, r: valkey.Valkey): await r.set("a", "bar") dumped = await r.dump("a") - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): await r.restore("a", 0, dumped) await r.restore("a", 0, dumped, replace=True) assert await r.get("a") == b"bar" @skip_if_server_version_lt("5.0.0") - async def test_dump_and_restore_absttl(self, r: redis.Redis): + async def test_dump_and_restore_absttl(self, r: valkey.Valkey): await r.set("a", "foo") dumped = await r.dump("a") await r.delete("a") ttl = int( - (await redis_server_time(r) + datetime.timedelta(minutes=1)).timestamp() + (await valkey_server_time(r) + datetime.timedelta(minutes=1)).timestamp() * 1000 ) await r.restore("a", ttl, dumped, absttl=True) assert await r.get("a") == b"foo" assert 0 < await r.ttl("a") <= 61 - async def test_exists(self, r: redis.Redis): + async def test_exists(self, r: valkey.Valkey): assert await r.exists("a") == 0 await r.set("a", "foo") await r.set("b", "bar") assert await r.exists("a") == 1 assert await r.exists("a", "b") == 2 - async def test_exists_contains(self, r: redis.Redis): + async def test_exists_contains(self, r: valkey.Valkey): assert not await r.exists("a") await r.set("a", "foo") assert await r.exists("a") - async def test_expire(self, r: redis.Redis): + async def test_expire(self, r: valkey.Valkey): assert not await r.expire("a", 10) await r.set("a", "foo") assert await r.expire("a", 10) @@ -792,24 +792,24 @@ async def test_expire(self, r: redis.Redis): assert await r.persist("a") assert await r.ttl("a") == -1 - async def test_expireat_datetime(self, r: redis.Redis): - expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1) + async def test_expireat_datetime(self, r: valkey.Valkey): + expire_at = await valkey_server_time(r) + datetime.timedelta(minutes=1) await r.set("a", "foo") assert await r.expireat("a", expire_at) assert 0 < await r.ttl("a") <= 61 - async def test_expireat_no_key(self, r: redis.Redis): - expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1) + async def test_expireat_no_key(self, r: valkey.Valkey): + expire_at = await valkey_server_time(r) + datetime.timedelta(minutes=1) assert not await r.expireat("a", expire_at) - async def test_expireat_unixtime(self, r: redis.Redis): - expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1) + async def test_expireat_unixtime(self, r: valkey.Valkey): + expire_at = await valkey_server_time(r) + datetime.timedelta(minutes=1) await r.set("a", "foo") expire_at_seconds = int(expire_at.timestamp()) assert await r.expireat("a", expire_at_seconds) assert 0 < await r.ttl("a") <= 61 - async def test_get_and_set(self, r: redis.Redis): + async def test_get_and_set(self, r: valkey.Valkey): # get and set can't be tested independently of each other assert await r.get("a") is None byte_string = b"value" @@ -822,7 +822,7 @@ async def test_get_and_set(self, r: redis.Redis): assert await r.get("integer") == str(integer).encode() assert (await r.get("unicode_string")).decode("utf-8") == unicode_string - async def test_get_set_bit(self, r: redis.Redis): + async def test_get_set_bit(self, r: valkey.Valkey): # no value assert not await r.getbit("a", 5) # set bit 5 @@ -838,18 +838,18 @@ async def test_get_set_bit(self, r: redis.Redis): assert await r.setbit("a", 5, True) assert await r.getbit("a", 5) - async def test_getrange(self, r: redis.Redis): + async def test_getrange(self, r: valkey.Valkey): await r.set("a", "foo") assert await r.getrange("a", 0, 0) == b"f" assert await r.getrange("a", 0, 2) == b"foo" assert await r.getrange("a", 3, 4) == b"" - async def test_getset(self, r: redis.Redis): + async def test_getset(self, r: valkey.Valkey): assert await r.getset("a", "foo") is None assert await r.getset("a", "bar") == b"foo" assert await r.get("a") == b"bar" - async def test_incr(self, r: redis.Redis): + async def test_incr(self, r: valkey.Valkey): assert await r.incr("a") == 1 assert await r.get("a") == b"1" assert await r.incr("a") == 2 @@ -857,20 +857,20 @@ async def test_incr(self, r: redis.Redis): assert await r.incr("a", amount=5) == 7 assert await r.get("a") == b"7" - async def test_incrby(self, r: redis.Redis): + async def test_incrby(self, r: valkey.Valkey): assert await r.incrby("a") == 1 assert await r.incrby("a", 4) == 5 assert await r.get("a") == b"5" @skip_if_server_version_lt("2.6.0") - async def test_incrbyfloat(self, r: redis.Redis): + async def test_incrbyfloat(self, r: valkey.Valkey): assert await r.incrbyfloat("a") == 1.0 assert await r.get("a") == b"1" assert await r.incrbyfloat("a", 1.1) == 2.1 assert float(await r.get("a")) == float(2.1) @pytest.mark.onlynoncluster - async def test_keys(self, r: redis.Redis): + async def test_keys(self, r: valkey.Valkey): assert await r.keys() == [] keys_with_underscores = {b"test_a", b"test_b"} keys = keys_with_underscores.union({b"testc"}) @@ -880,7 +880,7 @@ async def test_keys(self, r: redis.Redis): assert set(await r.keys(pattern="test*")) == keys @pytest.mark.onlynoncluster - async def test_mget(self, r: redis.Redis): + async def test_mget(self, r: valkey.Valkey): assert await r.mget([]) == [] assert await r.mget(["a", "b"]) == [None, None] await r.set("a", "1") @@ -889,14 +889,14 @@ async def test_mget(self, r: redis.Redis): assert await r.mget("a", "other", "b", "c") == [b"1", None, b"2", b"3"] @pytest.mark.onlynoncluster - async def test_mset(self, r: redis.Redis): + async def test_mset(self, r: valkey.Valkey): d = {"a": b"1", "b": b"2", "c": b"3"} assert await r.mset(d) for k, v in d.items(): assert await r.get(k) == v @pytest.mark.onlynoncluster - async def test_msetnx(self, r: redis.Redis): + async def test_msetnx(self, r: valkey.Valkey): d = {"a": b"1", "b": b"2", "c": b"3"} assert await r.msetnx(d) d2 = {"a": b"x", "d": b"4"} @@ -906,7 +906,7 @@ async def test_msetnx(self, r: redis.Redis): assert await r.get("d") is None @skip_if_server_version_lt("2.6.0") - async def test_pexpire(self, r: redis.Redis): + async def test_pexpire(self, r: valkey.Valkey): assert not await r.pexpire("a", 60000) await r.set("a", "foo") assert await r.pexpire("a", 60000) @@ -915,40 +915,40 @@ async def test_pexpire(self, r: redis.Redis): assert await r.pttl("a") == -1 @skip_if_server_version_lt("2.6.0") - async def test_pexpireat_datetime(self, r: redis.Redis): - expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1) + async def test_pexpireat_datetime(self, r: valkey.Valkey): + expire_at = await valkey_server_time(r) + datetime.timedelta(minutes=1) await r.set("a", "foo") assert await r.pexpireat("a", expire_at) assert 0 < await r.pttl("a") <= 61000 @skip_if_server_version_lt("2.6.0") - async def test_pexpireat_no_key(self, r: redis.Redis): - expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1) + async def test_pexpireat_no_key(self, r: valkey.Valkey): + expire_at = await valkey_server_time(r) + datetime.timedelta(minutes=1) assert not await r.pexpireat("a", expire_at) @skip_if_server_version_lt("2.6.0") - async def test_pexpireat_unixtime(self, r: redis.Redis): - expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1) + async def test_pexpireat_unixtime(self, r: valkey.Valkey): + expire_at = await valkey_server_time(r) + datetime.timedelta(minutes=1) await r.set("a", "foo") expire_at_milliseconds = int(expire_at.timestamp() * 1000) assert await r.pexpireat("a", expire_at_milliseconds) assert 0 < await r.pttl("a") <= 61000 @skip_if_server_version_lt("2.6.0") - async def test_psetex(self, r: redis.Redis): + async def test_psetex(self, r: valkey.Valkey): assert await r.psetex("a", 1000, "value") assert await r.get("a") == b"value" assert 0 < await r.pttl("a") <= 1000 @skip_if_server_version_lt("2.6.0") - async def test_psetex_timedelta(self, r: redis.Redis): + async def test_psetex_timedelta(self, r: valkey.Valkey): expire_at = datetime.timedelta(milliseconds=1000) assert await r.psetex("a", expire_at, "value") assert await r.get("a") == b"value" assert 0 < await r.pttl("a") <= 1000 @skip_if_server_version_lt("2.6.0") - async def test_pttl(self, r: redis.Redis): + async def test_pttl(self, r: valkey.Valkey): assert not await r.pexpire("a", 10000) await r.set("a", "1") assert await r.pexpire("a", 10000) @@ -957,7 +957,7 @@ async def test_pttl(self, r: redis.Redis): assert await r.pttl("a") == -1 @skip_if_server_version_lt("2.8.0") - async def test_pttl_no_key(self, r: redis.Redis): + async def test_pttl_no_key(self, r: valkey.Valkey): """PTTL on servers 2.8 and after return -2 when the key doesn't exist""" assert await r.pttl("a") == -2 @@ -975,21 +975,21 @@ async def test_hrandfield(self, r): assert len(await r.hrandfield("key", -10)) == 10 @pytest.mark.onlynoncluster - async def test_randomkey(self, r: redis.Redis): + async def test_randomkey(self, r: valkey.Valkey): assert await r.randomkey() is None for key in ("a", "b", "c"): await r.set(key, 1) assert await r.randomkey() in (b"a", b"b", b"c") @pytest.mark.onlynoncluster - async def test_rename(self, r: redis.Redis): + async def test_rename(self, r: valkey.Valkey): await r.set("a", "1") assert await r.rename("a", "b") assert await r.get("a") is None assert await r.get("b") == b"1" @pytest.mark.onlynoncluster - async def test_renamenx(self, r: redis.Redis): + async def test_renamenx(self, r: valkey.Valkey): await r.set("a", "1") await r.set("b", "2") assert not await r.renamenx("a", "b") @@ -997,13 +997,13 @@ async def test_renamenx(self, r: redis.Redis): assert await r.get("b") == b"2" @skip_if_server_version_lt("2.6.0") - async def test_set_nx(self, r: redis.Redis): + async def test_set_nx(self, r: valkey.Valkey): assert await r.set("a", "1", nx=True) assert not await r.set("a", "2", nx=True) assert await r.get("a") == b"1" @skip_if_server_version_lt("2.6.0") - async def test_set_xx(self, r: redis.Redis): + async def test_set_xx(self, r: valkey.Valkey): assert not await r.set("a", "1", xx=True) assert await r.get("a") is None await r.set("a", "bar") @@ -1011,38 +1011,38 @@ async def test_set_xx(self, r: redis.Redis): assert await r.get("a") == b"2" @skip_if_server_version_lt("2.6.0") - async def test_set_px(self, r: redis.Redis): + async def test_set_px(self, r: valkey.Valkey): assert await r.set("a", "1", px=10000) assert await r.get("a") == b"1" assert 0 < await r.pttl("a") <= 10000 assert 0 < await r.ttl("a") <= 10 @skip_if_server_version_lt("2.6.0") - async def test_set_px_timedelta(self, r: redis.Redis): + async def test_set_px_timedelta(self, r: valkey.Valkey): expire_at = datetime.timedelta(milliseconds=1000) assert await r.set("a", "1", px=expire_at) assert 0 < await r.pttl("a") <= 1000 assert 0 < await r.ttl("a") <= 1 @skip_if_server_version_lt("2.6.0") - async def test_set_ex(self, r: redis.Redis): + async def test_set_ex(self, r: valkey.Valkey): assert await r.set("a", "1", ex=10) assert 0 < await r.ttl("a") <= 10 @skip_if_server_version_lt("2.6.0") - async def test_set_ex_timedelta(self, r: redis.Redis): + async def test_set_ex_timedelta(self, r: valkey.Valkey): expire_at = datetime.timedelta(seconds=60) assert await r.set("a", "1", ex=expire_at) assert 0 < await r.ttl("a") <= 60 @skip_if_server_version_lt("2.6.0") - async def test_set_multipleoptions(self, r: redis.Redis): + async def test_set_multipleoptions(self, r: valkey.Valkey): await r.set("a", "val") assert await r.set("a", "1", xx=True, px=10000) assert 0 < await r.ttl("a") <= 10 - @skip_if_server_version_lt(REDIS_6_VERSION) - async def test_set_keepttl(self, r: redis.Redis): + @skip_if_server_version_lt(VALKEY_6_VERSION) + async def test_set_keepttl(self, r: valkey.Valkey): await r.set("a", "val") assert await r.set("a", "1", xx=True, px=10000) assert 0 < await r.ttl("a") <= 10 @@ -1050,36 +1050,36 @@ async def test_set_keepttl(self, r: redis.Redis): assert await r.get("a") == b"2" assert 0 < await r.ttl("a") <= 10 - async def test_setex(self, r: redis.Redis): + async def test_setex(self, r: valkey.Valkey): assert await r.setex("a", 60, "1") assert await r.get("a") == b"1" assert 0 < await r.ttl("a") <= 60 - async def test_setnx(self, r: redis.Redis): + async def test_setnx(self, r: valkey.Valkey): assert await r.setnx("a", "1") assert await r.get("a") == b"1" assert not await r.setnx("a", "2") assert await r.get("a") == b"1" - async def test_setrange(self, r: redis.Redis): + async def test_setrange(self, r: valkey.Valkey): assert await r.setrange("a", 5, "foo") == 8 assert await r.get("a") == b"\0\0\0\0\0foo" await r.set("a", "abcdefghijh") assert await r.setrange("a", 6, "12345") == 11 assert await r.get("a") == b"abcdef12345" - async def test_strlen(self, r: redis.Redis): + async def test_strlen(self, r: valkey.Valkey): await r.set("a", "foo") assert await r.strlen("a") == 3 - async def test_substr(self, r: redis.Redis): + async def test_substr(self, r: valkey.Valkey): await r.set("a", "0123456789") assert await r.substr("a", 0) == b"0123456789" assert await r.substr("a", 2) == b"23456789" assert await r.substr("a", 3, 5) == b"345" assert await r.substr("a", 3, -2) == b"345678" - async def test_ttl(self, r: redis.Redis): + async def test_ttl(self, r: valkey.Valkey): await r.set("a", "1") assert await r.expire("a", 10) assert 0 < await r.ttl("a") <= 10 @@ -1087,11 +1087,11 @@ async def test_ttl(self, r: redis.Redis): assert await r.ttl("a") == -1 @skip_if_server_version_lt("2.8.0") - async def test_ttl_nokey(self, r: redis.Redis): + async def test_ttl_nokey(self, r: valkey.Valkey): """TTL on servers 2.8 and after return -2 when the key doesn't exist""" assert await r.ttl("a") == -2 - async def test_type(self, r: redis.Redis): + async def test_type(self, r: valkey.Valkey): assert await r.type("a") == b"none" await r.set("a", "1") assert await r.type("a") == b"string" @@ -1107,7 +1107,7 @@ async def test_type(self, r: redis.Redis): # LIST COMMANDS @pytest.mark.onlynoncluster - async def test_blpop(self, r: redis.Redis): + async def test_blpop(self, r: valkey.Valkey): await r.rpush("a", "1", "2") await r.rpush("b", "3", "4") assert_resp_response( @@ -1129,7 +1129,7 @@ async def test_blpop(self, r: redis.Redis): ) @pytest.mark.onlynoncluster - async def test_brpop(self, r: redis.Redis): + async def test_brpop(self, r: valkey.Valkey): await r.rpush("a", "1", "2") await r.rpush("b", "3", "4") assert_resp_response( @@ -1151,7 +1151,7 @@ async def test_brpop(self, r: redis.Redis): ) @pytest.mark.onlynoncluster - async def test_brpoplpush(self, r: redis.Redis): + async def test_brpoplpush(self, r: valkey.Valkey): await r.rpush("a", "1", "2") await r.rpush("b", "3", "4") assert await r.brpoplpush("a", "b") == b"2" @@ -1161,54 +1161,54 @@ async def test_brpoplpush(self, r: redis.Redis): assert await r.lrange("b", 0, -1) == [b"1", b"2", b"3", b"4"] @pytest.mark.onlynoncluster - async def test_brpoplpush_empty_string(self, r: redis.Redis): + async def test_brpoplpush_empty_string(self, r: valkey.Valkey): await r.rpush("a", "") assert await r.brpoplpush("a", "b") == b"" - async def test_lindex(self, r: redis.Redis): + async def test_lindex(self, r: valkey.Valkey): await r.rpush("a", "1", "2", "3") assert await r.lindex("a", "0") == b"1" assert await r.lindex("a", "1") == b"2" assert await r.lindex("a", "2") == b"3" - async def test_linsert(self, r: redis.Redis): + async def test_linsert(self, r: valkey.Valkey): await r.rpush("a", "1", "2", "3") assert await r.linsert("a", "after", "2", "2.5") == 4 assert await r.lrange("a", 0, -1) == [b"1", b"2", b"2.5", b"3"] assert await r.linsert("a", "before", "2", "1.5") == 5 assert await r.lrange("a", 0, -1) == [b"1", b"1.5", b"2", b"2.5", b"3"] - async def test_llen(self, r: redis.Redis): + async def test_llen(self, r: valkey.Valkey): await r.rpush("a", "1", "2", "3") assert await r.llen("a") == 3 - async def test_lpop(self, r: redis.Redis): + async def test_lpop(self, r: valkey.Valkey): await r.rpush("a", "1", "2", "3") assert await r.lpop("a") == b"1" assert await r.lpop("a") == b"2" assert await r.lpop("a") == b"3" assert await r.lpop("a") is None - async def test_lpush(self, r: redis.Redis): + async def test_lpush(self, r: valkey.Valkey): assert await r.lpush("a", "1") == 1 assert await r.lpush("a", "2") == 2 assert await r.lpush("a", "3", "4") == 4 assert await r.lrange("a", 0, -1) == [b"4", b"3", b"2", b"1"] - async def test_lpushx(self, r: redis.Redis): + async def test_lpushx(self, r: valkey.Valkey): assert await r.lpushx("a", "1") == 0 assert await r.lrange("a", 0, -1) == [] await r.rpush("a", "1", "2", "3") assert await r.lpushx("a", "4") == 4 assert await r.lrange("a", 0, -1) == [b"4", b"1", b"2", b"3"] - async def test_lrange(self, r: redis.Redis): + async def test_lrange(self, r: valkey.Valkey): await r.rpush("a", "1", "2", "3", "4", "5") assert await r.lrange("a", 0, 2) == [b"1", b"2", b"3"] assert await r.lrange("a", 2, 10) == [b"3", b"4", b"5"] assert await r.lrange("a", 0, -1) == [b"1", b"2", b"3", b"4", b"5"] - async def test_lrem(self, r: redis.Redis): + async def test_lrem(self, r: valkey.Valkey): await r.rpush("a", "Z", "b", "Z", "Z", "c", "Z", "Z") # remove the first 'Z' item assert await r.lrem("a", 1, "Z") == 1 @@ -1220,18 +1220,18 @@ async def test_lrem(self, r: redis.Redis): assert await r.lrem("a", 0, "Z") == 2 assert await r.lrange("a", 0, -1) == [b"b", b"c"] - async def test_lset(self, r: redis.Redis): + async def test_lset(self, r: valkey.Valkey): await r.rpush("a", "1", "2", "3") assert await r.lrange("a", 0, -1) == [b"1", b"2", b"3"] assert await r.lset("a", 1, "4") assert await r.lrange("a", 0, 2) == [b"1", b"4", b"3"] - async def test_ltrim(self, r: redis.Redis): + async def test_ltrim(self, r: valkey.Valkey): await r.rpush("a", "1", "2", "3") assert await r.ltrim("a", 0, 1) assert await r.lrange("a", 0, -1) == [b"1", b"2"] - async def test_rpop(self, r: redis.Redis): + async def test_rpop(self, r: valkey.Valkey): await r.rpush("a", "1", "2", "3") assert await r.rpop("a") == b"3" assert await r.rpop("a") == b"2" @@ -1239,21 +1239,21 @@ async def test_rpop(self, r: redis.Redis): assert await r.rpop("a") is None @pytest.mark.onlynoncluster - async def test_rpoplpush(self, r: redis.Redis): + async def test_rpoplpush(self, r: valkey.Valkey): await r.rpush("a", "a1", "a2", "a3") await r.rpush("b", "b1", "b2", "b3") assert await r.rpoplpush("a", "b") == b"a3" assert await r.lrange("a", 0, -1) == [b"a1", b"a2"] assert await r.lrange("b", 0, -1) == [b"a3", b"b1", b"b2", b"b3"] - async def test_rpush(self, r: redis.Redis): + async def test_rpush(self, r: valkey.Valkey): assert await r.rpush("a", "1") == 1 assert await r.rpush("a", "2") == 2 assert await r.rpush("a", "3", "4") == 4 assert await r.lrange("a", 0, -1) == [b"1", b"2", b"3", b"4"] @skip_if_server_version_lt("6.0.6") - async def test_lpos(self, r: redis.Redis): + async def test_lpos(self, r: valkey.Valkey): assert await r.rpush("a", "a", "b", "c", "1", "2", "3", "c", "c") == 8 assert await r.lpos("a", "a") == 0 assert await r.lpos("a", "c") == 2 @@ -1284,7 +1284,7 @@ async def test_lpos(self, r: redis.Redis): assert await r.lpos("a", "c", count=0, maxlen=3, rank=-1) == [7, 6] assert await r.lpos("a", "c", count=0, maxlen=7, rank=2) == [6] - async def test_rpushx(self, r: redis.Redis): + async def test_rpushx(self, r: valkey.Valkey): assert await r.rpushx("a", "b") == 0 assert await r.lrange("a", 0, -1) == [] await r.rpush("a", "1", "2", "3") @@ -1294,7 +1294,7 @@ async def test_rpushx(self, r: redis.Redis): # SCAN COMMANDS @skip_if_server_version_lt("2.8.0") @pytest.mark.onlynoncluster - async def test_scan(self, r: redis.Redis): + async def test_scan(self, r: valkey.Valkey): await r.set("a", 1) await r.set("b", 2) await r.set("c", 3) @@ -1304,9 +1304,9 @@ async def test_scan(self, r: redis.Redis): _, keys = await r.scan(match="a") assert set(keys) == {b"a"} - @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_if_server_version_lt(VALKEY_6_VERSION) @pytest.mark.onlynoncluster - async def test_scan_type(self, r: redis.Redis): + async def test_scan_type(self, r: valkey.Valkey): await r.sadd("a-set", 1) await r.hset("a-hash", "foo", 2) await r.lpush("a-list", "aux", 3) @@ -1315,7 +1315,7 @@ async def test_scan_type(self, r: redis.Redis): @skip_if_server_version_lt("2.8.0") @pytest.mark.onlynoncluster - async def test_scan_iter(self, r: redis.Redis): + async def test_scan_iter(self, r: valkey.Valkey): await r.set("a", 1) await r.set("b", 2) await r.set("c", 3) @@ -1325,7 +1325,7 @@ async def test_scan_iter(self, r: redis.Redis): assert set(keys) == {b"a"} @skip_if_server_version_lt("2.8.0") - async def test_sscan(self, r: redis.Redis): + async def test_sscan(self, r: valkey.Valkey): await r.sadd("a", 1, 2, 3) cursor, members = await r.sscan("a") assert cursor == 0 @@ -1334,7 +1334,7 @@ async def test_sscan(self, r: redis.Redis): assert set(members) == {b"1"} @skip_if_server_version_lt("2.8.0") - async def test_sscan_iter(self, r: redis.Redis): + async def test_sscan_iter(self, r: valkey.Valkey): await r.sadd("a", 1, 2, 3) members = [k async for k in r.sscan_iter("a")] assert set(members) == {b"1", b"2", b"3"} @@ -1342,7 +1342,7 @@ async def test_sscan_iter(self, r: redis.Redis): assert set(members) == {b"1"} @skip_if_server_version_lt("2.8.0") - async def test_hscan(self, r: redis.Redis): + async def test_hscan(self, r: valkey.Valkey): await r.hset("a", mapping={"a": 1, "b": 2, "c": 3}) cursor, dic = await r.hscan("a") assert cursor == 0 @@ -1353,7 +1353,7 @@ async def test_hscan(self, r: redis.Redis): assert dic == {} @skip_if_server_version_lt("7.4.0") - async def test_hscan_novalues(self, r: redis.Redis): + async def test_hscan_novalues(self, r: valkey.Valkey): await r.hset("a", mapping={"a": 1, "b": 2, "c": 3}) cursor, keys = await r.hscan("a", no_values=True) assert cursor == 0 @@ -1364,7 +1364,7 @@ async def test_hscan_novalues(self, r: redis.Redis): assert keys == [] @skip_if_server_version_lt("2.8.0") - async def test_hscan_iter(self, r: redis.Redis): + async def test_hscan_iter(self, r: valkey.Valkey): await r.hset("a", mapping={"a": 1, "b": 2, "c": 3}) dic = {k: v async for k, v in r.hscan_iter("a")} assert dic == {b"a": b"1", b"b": b"2", b"c": b"3"} @@ -1374,7 +1374,7 @@ async def test_hscan_iter(self, r: redis.Redis): assert dic == {} @skip_if_server_version_lt("7.4.0") - async def test_hscan_iter_novalues(self, r: redis.Redis): + async def test_hscan_iter_novalues(self, r: valkey.Valkey): await r.hset("a", mapping={"a": 1, "b": 2, "c": 3}) keys = list([k async for k in r.hscan_iter("a", no_values=True)]) assert sorted(keys) == [b"a", b"b", b"c"] @@ -1386,7 +1386,7 @@ async def test_hscan_iter_novalues(self, r: redis.Redis): assert keys == [] @skip_if_server_version_lt("2.8.0") - async def test_zscan(self, r: redis.Redis): + async def test_zscan(self, r: valkey.Valkey): await r.zadd("a", {"a": 1, "b": 2, "c": 3}) cursor, pairs = await r.zscan("a") assert cursor == 0 @@ -1395,7 +1395,7 @@ async def test_zscan(self, r: redis.Redis): assert set(pairs) == {(b"a", 1)} @skip_if_server_version_lt("2.8.0") - async def test_zscan_iter(self, r: redis.Redis): + async def test_zscan_iter(self, r: valkey.Valkey): await r.zadd("a", {"a": 1, "b": 2, "c": 3}) pairs = [k async for k in r.zscan_iter("a")] assert set(pairs) == {(b"a", 1), (b"b", 2), (b"c", 3)} @@ -1403,24 +1403,24 @@ async def test_zscan_iter(self, r: redis.Redis): assert set(pairs) == {(b"a", 1)} # SET COMMANDS - async def test_sadd(self, r: redis.Redis): + async def test_sadd(self, r: valkey.Valkey): members = {b"1", b"2", b"3"} await r.sadd("a", *members) assert await r.smembers("a") == members - async def test_scard(self, r: redis.Redis): + async def test_scard(self, r: valkey.Valkey): await r.sadd("a", "1", "2", "3") assert await r.scard("a") == 3 @pytest.mark.onlynoncluster - async def test_sdiff(self, r: redis.Redis): + async def test_sdiff(self, r: valkey.Valkey): await r.sadd("a", "1", "2", "3") assert await r.sdiff("a", "b") == {b"1", b"2", b"3"} await r.sadd("b", "2", "3") assert await r.sdiff("a", "b") == {b"1"} @pytest.mark.onlynoncluster - async def test_sdiffstore(self, r: redis.Redis): + async def test_sdiffstore(self, r: valkey.Valkey): await r.sadd("a", "1", "2", "3") assert await r.sdiffstore("c", "a", "b") == 3 assert await r.smembers("c") == {b"1", b"2", b"3"} @@ -1429,14 +1429,14 @@ async def test_sdiffstore(self, r: redis.Redis): assert await r.smembers("c") == {b"1"} @pytest.mark.onlynoncluster - async def test_sinter(self, r: redis.Redis): + async def test_sinter(self, r: valkey.Valkey): await r.sadd("a", "1", "2", "3") assert await r.sinter("a", "b") == set() await r.sadd("b", "2", "3") assert await r.sinter("a", "b") == {b"2", b"3"} @pytest.mark.onlynoncluster - async def test_sinterstore(self, r: redis.Redis): + async def test_sinterstore(self, r: valkey.Valkey): await r.sadd("a", "1", "2", "3") assert await r.sinterstore("c", "a", "b") == 0 assert await r.smembers("c") == set() @@ -1444,26 +1444,26 @@ async def test_sinterstore(self, r: redis.Redis): assert await r.sinterstore("c", "a", "b") == 2 assert await r.smembers("c") == {b"2", b"3"} - async def test_sismember(self, r: redis.Redis): + async def test_sismember(self, r: valkey.Valkey): await r.sadd("a", "1", "2", "3") assert await r.sismember("a", "1") assert await r.sismember("a", "2") assert await r.sismember("a", "3") assert not await r.sismember("a", "4") - async def test_smembers(self, r: redis.Redis): + async def test_smembers(self, r: valkey.Valkey): await r.sadd("a", "1", "2", "3") assert await r.smembers("a") == {b"1", b"2", b"3"} @pytest.mark.onlynoncluster - async def test_smove(self, r: redis.Redis): + async def test_smove(self, r: valkey.Valkey): await r.sadd("a", "a1", "a2") await r.sadd("b", "b1", "b2") assert await r.smove("a", "b", "a1") assert await r.smembers("a") == {b"a2"} assert await r.smembers("b") == {b"b1", b"b2", b"a1"} - async def test_spop(self, r: redis.Redis): + async def test_spop(self, r: valkey.Valkey): s = [b"1", b"2", b"3"] await r.sadd("a", *s) value = await r.spop("a") @@ -1471,7 +1471,7 @@ async def test_spop(self, r: redis.Redis): assert await r.smembers("a") == set(s) - {value} @skip_if_server_version_lt("3.2.0") - async def test_spop_multi_value(self, r: redis.Redis): + async def test_spop_multi_value(self, r: valkey.Valkey): s = [b"1", b"2", b"3"] await r.sadd("a", *s) values = await r.spop("a", 2) @@ -1485,40 +1485,40 @@ async def test_spop_multi_value(self, r: redis.Redis): r, response, list(set(s) - set(values)), set(s) - set(values) ) - async def test_srandmember(self, r: redis.Redis): + async def test_srandmember(self, r: valkey.Valkey): s = [b"1", b"2", b"3"] await r.sadd("a", *s) assert await r.srandmember("a") in s @skip_if_server_version_lt("2.6.0") - async def test_srandmember_multi_value(self, r: redis.Redis): + async def test_srandmember_multi_value(self, r: valkey.Valkey): s = [b"1", b"2", b"3"] await r.sadd("a", *s) randoms = await r.srandmember("a", number=2) assert len(randoms) == 2 assert set(randoms).intersection(s) == set(randoms) - async def test_srem(self, r: redis.Redis): + async def test_srem(self, r: valkey.Valkey): await r.sadd("a", "1", "2", "3", "4") assert await r.srem("a", "5") == 0 assert await r.srem("a", "2", "4") == 2 assert await r.smembers("a") == {b"1", b"3"} @pytest.mark.onlynoncluster - async def test_sunion(self, r: redis.Redis): + async def test_sunion(self, r: valkey.Valkey): await r.sadd("a", "1", "2") await r.sadd("b", "2", "3") assert await r.sunion("a", "b") == {b"1", b"2", b"3"} @pytest.mark.onlynoncluster - async def test_sunionstore(self, r: redis.Redis): + async def test_sunionstore(self, r: valkey.Valkey): await r.sadd("a", "1", "2") await r.sadd("b", "2", "3") assert await r.sunionstore("c", "a", "b") == 3 assert await r.smembers("c") == {b"1", b"2", b"3"} # SORTED SET COMMANDS - async def test_zadd(self, r: redis.Redis): + async def test_zadd(self, r: valkey.Valkey): mapping = {"a1": 1.0, "a2": 2.0, "a3": 3.0} await r.zadd("a", mapping) response = await r.zrange("a", 0, -1, withscores=True) @@ -1541,7 +1541,7 @@ async def test_zadd(self, r: redis.Redis): with pytest.raises(exceptions.DataError): await r.zadd("a", mapping, incr=True) - async def test_zadd_nx(self, r: redis.Redis): + async def test_zadd_nx(self, r: valkey.Valkey): assert await r.zadd("a", {"a1": 1}) == 1 assert await r.zadd("a", {"a1": 99, "a2": 2}, nx=True) == 1 response = await r.zrange("a", 0, -1, withscores=True) @@ -1549,13 +1549,13 @@ async def test_zadd_nx(self, r: redis.Redis): r, response, [(b"a1", 1.0), (b"a2", 2.0)], [[b"a1", 1.0], [b"a2", 2.0]] ) - async def test_zadd_xx(self, r: redis.Redis): + async def test_zadd_xx(self, r: valkey.Valkey): assert await r.zadd("a", {"a1": 1}) == 1 assert await r.zadd("a", {"a1": 99, "a2": 2}, xx=True) == 0 response = await r.zrange("a", 0, -1, withscores=True) assert_resp_response(r, response, [(b"a1", 99.0)], [[b"a1", 99.0]]) - async def test_zadd_ch(self, r: redis.Redis): + async def test_zadd_ch(self, r: valkey.Valkey): assert await r.zadd("a", {"a1": 1}) == 1 assert await r.zadd("a", {"a1": 99, "a2": 2}, ch=True) == 2 response = await r.zrange("a", 0, -1, withscores=True) @@ -1563,21 +1563,21 @@ async def test_zadd_ch(self, r: redis.Redis): r, response, [(b"a2", 2.0), (b"a1", 99.0)], [[b"a2", 2.0], [b"a1", 99.0]] ) - async def test_zadd_incr(self, r: redis.Redis): + async def test_zadd_incr(self, r: valkey.Valkey): assert await r.zadd("a", {"a1": 1}) == 1 assert await r.zadd("a", {"a1": 4.5}, incr=True) == 5.5 - async def test_zadd_incr_with_xx(self, r: redis.Redis): + async def test_zadd_incr_with_xx(self, r: valkey.Valkey): # this asks zadd to incr 'a1' only if it exists, but it clearly - # doesn't. Redis returns a null value in this case and so should - # redis-py + # doesn't. Valkey returns a null value in this case and so should + # valkey-py assert await r.zadd("a", {"a1": 1}, xx=True, incr=True) is None - async def test_zcard(self, r: redis.Redis): + async def test_zcard(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zcard("a") == 3 - async def test_zcount(self, r: redis.Redis): + async def test_zcount(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zcount("a", "-inf", "+inf") == 3 assert await r.zcount("a", 1, 2) == 2 @@ -1604,7 +1604,7 @@ async def test_zdiffstore(self, r): response = await r.zrange("out", 0, -1, withscores=True) assert_resp_response(r, response, [(b"a3", 3.0)], [[b"a3", 3.0]]) - async def test_zincrby(self, r: redis.Redis): + async def test_zincrby(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zincrby("a", 1, "a2") == 3.0 assert await r.zincrby("a", 5, "a3") == 8.0 @@ -1612,13 +1612,13 @@ async def test_zincrby(self, r: redis.Redis): assert await r.zscore("a", "a3") == 8.0 @skip_if_server_version_lt("2.8.9") - async def test_zlexcount(self, r: redis.Redis): + async def test_zlexcount(self, r: valkey.Valkey): await r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0}) assert await r.zlexcount("a", "-", "+") == 7 assert await r.zlexcount("a", "[b", "[f") == 5 @pytest.mark.onlynoncluster - async def test_zinterstore_sum(self, r: redis.Redis): + async def test_zinterstore_sum(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) @@ -1629,7 +1629,7 @@ async def test_zinterstore_sum(self, r: redis.Redis): ) @pytest.mark.onlynoncluster - async def test_zinterstore_max(self, r: redis.Redis): + async def test_zinterstore_max(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) @@ -1640,7 +1640,7 @@ async def test_zinterstore_max(self, r: redis.Redis): ) @pytest.mark.onlynoncluster - async def test_zinterstore_min(self, r: redis.Redis): + async def test_zinterstore_min(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) await r.zadd("b", {"a1": 2, "a2": 3, "a3": 5}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) @@ -1651,7 +1651,7 @@ async def test_zinterstore_min(self, r: redis.Redis): ) @pytest.mark.onlynoncluster - async def test_zinterstore_with_weight(self, r: redis.Redis): + async def test_zinterstore_with_weight(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) @@ -1662,7 +1662,7 @@ async def test_zinterstore_with_weight(self, r: redis.Redis): ) @skip_if_server_version_lt("4.9.0") - async def test_zpopmax(self, r: redis.Redis): + async def test_zpopmax(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) response = await r.zpopmax("a") assert_resp_response(r, response, [(b"a3", 3)], [b"a3", 3.0]) @@ -1674,7 +1674,7 @@ async def test_zpopmax(self, r: redis.Redis): ) @skip_if_server_version_lt("4.9.0") - async def test_zpopmin(self, r: redis.Redis): + async def test_zpopmin(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) response = await r.zpopmin("a") assert_resp_response(r, response, [(b"a1", 1)], [b"a1", 1.0]) @@ -1687,7 +1687,7 @@ async def test_zpopmin(self, r: redis.Redis): @skip_if_server_version_lt("4.9.0") @pytest.mark.onlynoncluster - async def test_bzpopmax(self, r: redis.Redis): + async def test_bzpopmax(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2}) await r.zadd("b", {"b1": 10, "b2": 20}) assert_resp_response( @@ -1722,7 +1722,7 @@ async def test_bzpopmax(self, r: redis.Redis): @skip_if_server_version_lt("4.9.0") @pytest.mark.onlynoncluster - async def test_bzpopmin(self, r: redis.Redis): + async def test_bzpopmin(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2}) await r.zadd("b", {"b1": 10, "b2": 20}) assert_resp_response( @@ -1755,7 +1755,7 @@ async def test_bzpopmin(self, r: redis.Redis): r, await r.bzpopmin("c", timeout=1), (b"c", b"c1", 100), [b"c", b"c1", 100] ) - async def test_zrange(self, r: redis.Redis): + async def test_zrange(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zrange("a", 0, 1) == [b"a1", b"a2"] assert await r.zrange("a", 1, 2) == [b"a2", b"a3"] @@ -1777,7 +1777,7 @@ async def test_zrange(self, r: redis.Redis): # ] @skip_if_server_version_lt("2.8.9") - async def test_zrangebylex(self, r: redis.Redis): + async def test_zrangebylex(self, r: valkey.Valkey): await r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0}) assert await r.zrangebylex("a", "-", "[c") == [b"a", b"b", b"c"] assert await r.zrangebylex("a", "-", "(c") == [b"a", b"b"] @@ -1786,7 +1786,7 @@ async def test_zrangebylex(self, r: redis.Redis): assert await r.zrangebylex("a", "-", "+", start=3, num=2) == [b"d", b"e"] @skip_if_server_version_lt("2.9.9") - async def test_zrevrangebylex(self, r: redis.Redis): + async def test_zrevrangebylex(self, r: valkey.Valkey): await r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0}) assert await r.zrevrangebylex("a", "[c", "-") == [b"c", b"b", b"a"] assert await r.zrevrangebylex("a", "(c", "-") == [b"b", b"a"] @@ -1800,7 +1800,7 @@ async def test_zrevrangebylex(self, r: redis.Redis): assert await r.zrevrangebylex("a", "+", "[f") == [b"g", b"f"] assert await r.zrevrangebylex("a", "+", "-", start=3, num=2) == [b"d", b"c"] - async def test_zrangebyscore(self, r: redis.Redis): + async def test_zrangebyscore(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zrangebyscore("a", 2, 4) == [b"a2", b"a3", b"a4"] @@ -1827,14 +1827,14 @@ async def test_zrangebyscore(self, r: redis.Redis): [[b"a2", 2], [b"a3", 3], [b"a4", 4]], ) - async def test_zrank(self, r: redis.Redis): + async def test_zrank(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zrank("a", "a1") == 0 assert await r.zrank("a", "a2") == 1 assert await r.zrank("a", "a6") is None @skip_if_server_version_lt("7.2.0") - async def test_zrank_withscore(self, r: redis.Redis): + async def test_zrank_withscore(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zrank("a", "a1") == 0 assert await r.zrank("a", "a2") == 1 @@ -1844,20 +1844,20 @@ async def test_zrank_withscore(self, r: redis.Redis): ) assert await r.zrank("a", "a6", withscore=True) is None - async def test_zrem(self, r: redis.Redis): + async def test_zrem(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zrem("a", "a2") == 1 assert await r.zrange("a", 0, -1) == [b"a1", b"a3"] assert await r.zrem("a", "b") == 0 assert await r.zrange("a", 0, -1) == [b"a1", b"a3"] - async def test_zrem_multiple_keys(self, r: redis.Redis): + async def test_zrem_multiple_keys(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zrem("a", "a1", "a2") == 2 assert await r.zrange("a", 0, 5) == [b"a3"] @skip_if_server_version_lt("2.8.9") - async def test_zremrangebylex(self, r: redis.Redis): + async def test_zremrangebylex(self, r: valkey.Valkey): await r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0}) assert await r.zremrangebylex("a", "-", "[c") == 3 assert await r.zrange("a", 0, -1) == [b"d", b"e", b"f", b"g"] @@ -1866,19 +1866,19 @@ async def test_zremrangebylex(self, r: redis.Redis): assert await r.zremrangebylex("a", "[h", "+") == 0 assert await r.zrange("a", 0, -1) == [b"d", b"e"] - async def test_zremrangebyrank(self, r: redis.Redis): + async def test_zremrangebyrank(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zremrangebyrank("a", 1, 3) == 3 assert await r.zrange("a", 0, 5) == [b"a1", b"a5"] - async def test_zremrangebyscore(self, r: redis.Redis): + async def test_zremrangebyscore(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zremrangebyscore("a", 2, 4) == 3 assert await r.zrange("a", 0, -1) == [b"a1", b"a5"] assert await r.zremrangebyscore("a", 2, 4) == 0 assert await r.zrange("a", 0, -1) == [b"a1", b"a5"] - async def test_zrevrange(self, r: redis.Redis): + async def test_zrevrange(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zrevrange("a", 0, 1) == [b"a3", b"a2"] assert await r.zrevrange("a", 1, 2) == [b"a2", b"a1"] @@ -1899,7 +1899,7 @@ async def test_zrevrange(self, r: redis.Redis): r, response, [(b"a3", 3), (b"a2", 2)], [[b"a3", 3], [b"a2", 2]] ) - async def test_zrevrangebyscore(self, r: redis.Redis): + async def test_zrevrangebyscore(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zrevrangebyscore("a", 4, 2) == [b"a4", b"a3", b"a2"] @@ -1926,14 +1926,14 @@ async def test_zrevrangebyscore(self, r: redis.Redis): [[b"a4", 4], [b"a3", 3], [b"a2", 2]], ) - async def test_zrevrank(self, r: redis.Redis): + async def test_zrevrank(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zrevrank("a", "a1") == 4 assert await r.zrevrank("a", "a2") == 3 assert await r.zrevrank("a", "a6") is None @skip_if_server_version_lt("7.2.0") - async def test_zrevrank_withscore(self, r: redis.Redis): + async def test_zrevrank_withscore(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zrevrank("a", "a1") == 4 assert await r.zrevrank("a", "a2") == 3 @@ -1943,14 +1943,14 @@ async def test_zrevrank_withscore(self, r: redis.Redis): ) assert await r.zrevrank("a", "a6", withscore=True) is None - async def test_zscore(self, r: redis.Redis): + async def test_zscore(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zscore("a", "a1") == 1.0 assert await r.zscore("a", "a2") == 2.0 assert await r.zscore("a", "a4") is None @pytest.mark.onlynoncluster - async def test_zunionstore_sum(self, r: redis.Redis): + async def test_zunionstore_sum(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) @@ -1964,7 +1964,7 @@ async def test_zunionstore_sum(self, r: redis.Redis): ) @pytest.mark.onlynoncluster - async def test_zunionstore_max(self, r: redis.Redis): + async def test_zunionstore_max(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) @@ -1978,7 +1978,7 @@ async def test_zunionstore_max(self, r: redis.Redis): ) @pytest.mark.onlynoncluster - async def test_zunionstore_min(self, r: redis.Redis): + async def test_zunionstore_min(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 4}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) @@ -1992,7 +1992,7 @@ async def test_zunionstore_min(self, r: redis.Redis): ) @pytest.mark.onlynoncluster - async def test_zunionstore_with_weight(self, r: redis.Redis): + async def test_zunionstore_with_weight(self, r: valkey.Valkey): await r.zadd("a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) @@ -2007,7 +2007,7 @@ async def test_zunionstore_with_weight(self, r: redis.Redis): # HYPERLOGLOG TESTS @skip_if_server_version_lt("2.8.9") - async def test_pfadd(self, r: redis.Redis): + async def test_pfadd(self, r: valkey.Valkey): members = {b"1", b"2", b"3"} assert await r.pfadd("a", *members) == 1 assert await r.pfadd("a", *members) == 0 @@ -2015,7 +2015,7 @@ async def test_pfadd(self, r: redis.Redis): @skip_if_server_version_lt("2.8.9") @pytest.mark.onlynoncluster - async def test_pfcount(self, r: redis.Redis): + async def test_pfcount(self, r: valkey.Valkey): members = {b"1", b"2", b"3"} await r.pfadd("a", *members) assert await r.pfcount("a") == len(members) @@ -2026,7 +2026,7 @@ async def test_pfcount(self, r: redis.Redis): @skip_if_server_version_lt("2.8.9") @pytest.mark.onlynoncluster - async def test_pfmerge(self, r: redis.Redis): + async def test_pfmerge(self, r: valkey.Valkey): mema = {b"1", b"2", b"3"} memb = {b"2", b"3", b"4"} memc = {b"5", b"6", b"7"} @@ -2039,17 +2039,17 @@ async def test_pfmerge(self, r: redis.Redis): assert await r.pfcount("d") == 7 # HASH COMMANDS - async def test_hget_and_hset(self, r: redis.Redis): + async def test_hget_and_hset(self, r: valkey.Valkey): await r.hset("a", mapping={"1": 1, "2": 2, "3": 3}) assert await r.hget("a", "1") == b"1" assert await r.hget("a", "2") == b"2" assert await r.hget("a", "3") == b"3" - # field was updated, redis returns 0 + # field was updated, valkey returns 0 assert await r.hset("a", "2", 5) == 0 assert await r.hget("a", "2") == b"5" - # field is new, redis returns 1 + # field is new, valkey returns 1 assert await r.hset("a", "4", 4) == 1 assert await r.hget("a", "4") == b"4" @@ -2060,7 +2060,7 @@ async def test_hget_and_hset(self, r: redis.Redis): assert await r.hset("a", 0, 10) == 1 assert await r.hset("a", "", 10) == 1 - async def test_hset_with_multi_key_values(self, r: redis.Redis): + async def test_hset_with_multi_key_values(self, r: valkey.Valkey): await r.hset("a", mapping={"1": 1, "2": 2, "3": 3}) assert await r.hget("a", "1") == b"1" assert await r.hget("a", "2") == b"2" @@ -2071,71 +2071,71 @@ async def test_hset_with_multi_key_values(self, r: redis.Redis): assert await r.hget("b", "2") == b"2" assert await r.hget("b", "foo") == b"bar" - async def test_hset_without_data(self, r: redis.Redis): + async def test_hset_without_data(self, r: valkey.Valkey): with pytest.raises(exceptions.DataError): await r.hset("x") - async def test_hdel(self, r: redis.Redis): + async def test_hdel(self, r: valkey.Valkey): await r.hset("a", mapping={"1": 1, "2": 2, "3": 3}) assert await r.hdel("a", "2") == 1 assert await r.hget("a", "2") is None assert await r.hdel("a", "1", "3") == 2 assert await r.hlen("a") == 0 - async def test_hexists(self, r: redis.Redis): + async def test_hexists(self, r: valkey.Valkey): await r.hset("a", mapping={"1": 1, "2": 2, "3": 3}) assert await r.hexists("a", "1") assert not await r.hexists("a", "4") - async def test_hgetall(self, r: redis.Redis): + async def test_hgetall(self, r: valkey.Valkey): h = {b"a1": b"1", b"a2": b"2", b"a3": b"3"} await r.hset("a", mapping=h) assert await r.hgetall("a") == h - async def test_hincrby(self, r: redis.Redis): + async def test_hincrby(self, r: valkey.Valkey): assert await r.hincrby("a", "1") == 1 assert await r.hincrby("a", "1", amount=2) == 3 assert await r.hincrby("a", "1", amount=-2) == 1 @skip_if_server_version_lt("2.6.0") - async def test_hincrbyfloat(self, r: redis.Redis): + async def test_hincrbyfloat(self, r: valkey.Valkey): assert await r.hincrbyfloat("a", "1") == 1.0 assert await r.hincrbyfloat("a", "1") == 2.0 assert await r.hincrbyfloat("a", "1", 1.2) == 3.2 - async def test_hkeys(self, r: redis.Redis): + async def test_hkeys(self, r: valkey.Valkey): h = {b"a1": b"1", b"a2": b"2", b"a3": b"3"} await r.hset("a", mapping=h) local_keys = list(h.keys()) remote_keys = await r.hkeys("a") assert sorted(local_keys) == sorted(remote_keys) - async def test_hlen(self, r: redis.Redis): + async def test_hlen(self, r: valkey.Valkey): await r.hset("a", mapping={"1": 1, "2": 2, "3": 3}) assert await r.hlen("a") == 3 - async def test_hmget(self, r: redis.Redis): + async def test_hmget(self, r: valkey.Valkey): assert await r.hset("a", mapping={"a": 1, "b": 2, "c": 3}) assert await r.hmget("a", "a", "b", "c") == [b"1", b"2", b"3"] - async def test_hmset(self, r: redis.Redis): + async def test_hmset(self, r: valkey.Valkey): warning_message = ( - r"^Redis(?:Cluster)*\.hmset\(\) is deprecated\. " - r"Use Redis(?:Cluster)*\.hset\(\) instead\.$" + r"^Valkey(?:Cluster)*\.hmset\(\) is deprecated\. " + r"Use Valkey(?:Cluster)*\.hset\(\) instead\.$" ) h = {b"a": b"1", b"b": b"2", b"c": b"3"} with pytest.warns(DeprecationWarning, match=warning_message): assert await r.hmset("a", h) assert await r.hgetall("a") == h - async def test_hsetnx(self, r: redis.Redis): + async def test_hsetnx(self, r: valkey.Valkey): # Initially set the hash field assert await r.hsetnx("a", "1", 1) assert await r.hget("a", "1") == b"1" assert not await r.hsetnx("a", "1", 2) assert await r.hget("a", "1") == b"1" - async def test_hvals(self, r: redis.Redis): + async def test_hvals(self, r: valkey.Valkey): h = {b"a1": b"1", b"a2": b"2", b"a3": b"3"} await r.hset("a", mapping=h) local_vals = list(h.values()) @@ -2143,22 +2143,22 @@ async def test_hvals(self, r: redis.Redis): assert sorted(local_vals) == sorted(remote_vals) @skip_if_server_version_lt("3.2.0") - async def test_hstrlen(self, r: redis.Redis): + async def test_hstrlen(self, r: valkey.Valkey): await r.hset("a", mapping={"1": "22", "2": "333"}) assert await r.hstrlen("a", "1") == 2 assert await r.hstrlen("a", "2") == 3 # SORT - async def test_sort_basic(self, r: redis.Redis): + async def test_sort_basic(self, r: valkey.Valkey): await r.rpush("a", "3", "2", "1", "4") assert await r.sort("a") == [b"1", b"2", b"3", b"4"] - async def test_sort_limited(self, r: redis.Redis): + async def test_sort_limited(self, r: valkey.Valkey): await r.rpush("a", "3", "2", "1", "4") assert await r.sort("a", start=1, num=2) == [b"2", b"3"] @pytest.mark.onlynoncluster - async def test_sort_by(self, r: redis.Redis): + async def test_sort_by(self, r: valkey.Valkey): await r.set("score:1", 8) await r.set("score:2", 3) await r.set("score:3", 5) @@ -2166,7 +2166,7 @@ async def test_sort_by(self, r: redis.Redis): assert await r.sort("a", by="score:*") == [b"2", b"3", b"1"] @pytest.mark.onlynoncluster - async def test_sort_get(self, r: redis.Redis): + async def test_sort_get(self, r: valkey.Valkey): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") @@ -2174,7 +2174,7 @@ async def test_sort_get(self, r: redis.Redis): assert await r.sort("a", get="user:*") == [b"u1", b"u2", b"u3"] @pytest.mark.onlynoncluster - async def test_sort_get_multi(self, r: redis.Redis): + async def test_sort_get_multi(self, r: valkey.Valkey): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") @@ -2189,7 +2189,7 @@ async def test_sort_get_multi(self, r: redis.Redis): ] @pytest.mark.onlynoncluster - async def test_sort_get_groups_two(self, r: redis.Redis): + async def test_sort_get_groups_two(self, r: valkey.Valkey): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") @@ -2201,7 +2201,7 @@ async def test_sort_get_groups_two(self, r: redis.Redis): ] @pytest.mark.onlynoncluster - async def test_sort_groups_string_get(self, r: redis.Redis): + async def test_sort_groups_string_get(self, r: valkey.Valkey): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") @@ -2210,7 +2210,7 @@ async def test_sort_groups_string_get(self, r: redis.Redis): await r.sort("a", get="user:*", groups=True) @pytest.mark.onlynoncluster - async def test_sort_groups_just_one_get(self, r: redis.Redis): + async def test_sort_groups_just_one_get(self, r: valkey.Valkey): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") @@ -2218,7 +2218,7 @@ async def test_sort_groups_just_one_get(self, r: redis.Redis): with pytest.raises(exceptions.DataError): await r.sort("a", get=["user:*"], groups=True) - async def test_sort_groups_no_get(self, r: redis.Redis): + async def test_sort_groups_no_get(self, r: valkey.Valkey): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") @@ -2227,7 +2227,7 @@ async def test_sort_groups_no_get(self, r: redis.Redis): await r.sort("a", groups=True) @pytest.mark.onlynoncluster - async def test_sort_groups_three_gets(self, r: redis.Redis): + async def test_sort_groups_three_gets(self, r: valkey.Valkey): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") @@ -2241,22 +2241,22 @@ async def test_sort_groups_three_gets(self, r: redis.Redis): (b"u3", b"d3", b"3"), ] - async def test_sort_desc(self, r: redis.Redis): + async def test_sort_desc(self, r: valkey.Valkey): await r.rpush("a", "2", "3", "1") assert await r.sort("a", desc=True) == [b"3", b"2", b"1"] - async def test_sort_alpha(self, r: redis.Redis): + async def test_sort_alpha(self, r: valkey.Valkey): await r.rpush("a", "e", "c", "b", "d", "a") assert await r.sort("a", alpha=True) == [b"a", b"b", b"c", b"d", b"e"] @pytest.mark.onlynoncluster - async def test_sort_store(self, r: redis.Redis): + async def test_sort_store(self, r: valkey.Valkey): await r.rpush("a", "2", "3", "1") assert await r.sort("a", store="sorted_values") == 3 assert await r.lrange("sorted_values", 0, -1) == [b"1", b"2", b"3"] @pytest.mark.onlynoncluster - async def test_sort_all_options(self, r: redis.Redis): + async def test_sort_all_options(self, r: valkey.Valkey): await r.set("user:1:username", "zeus") await r.set("user:2:username", "titan") await r.set("user:3:username", "hermes") @@ -2294,8 +2294,8 @@ async def test_sort_all_options(self, r: redis.Redis): b"apple juice", ] - async def test_sort_issue_924(self, r: redis.Redis): - # Tests for issue https://github.com/andymccurdy/redis-py/issues/924 + async def test_sort_issue_924(self, r: valkey.Valkey): + # Tests for issue https://github.com/andymccurdy/valkey-py/issues/924 await r.execute_command("SADD", "issue#924", 1) await r.execute_command("SORT", "issue#924") @@ -2371,13 +2371,13 @@ async def test_cluster_slaves(self, mock_cluster_resp_slaves): @skip_if_server_version_lt("3.0.0") @skip_if_server_version_gte("7.0.0") @pytest.mark.onlynoncluster - async def test_readwrite(self, r: redis.Redis): + async def test_readwrite(self, r: valkey.Valkey): assert await r.readwrite() @skip_if_server_version_lt("3.0.0") @pytest.mark.onlynoncluster - async def test_readonly_invalid_cluster_state(self, r: redis.Redis): - with pytest.raises(exceptions.RedisError): + async def test_readonly_invalid_cluster_state(self, r: valkey.Valkey): + with pytest.raises(exceptions.ValkeyError): await r.readonly() @skip_if_server_version_lt("3.0.0") @@ -2387,7 +2387,7 @@ async def test_readonly(self, mock_cluster_resp_ok): # GEO COMMANDS @skip_if_server_version_lt("3.2.0") - async def test_geoadd(self, r: redis.Redis): + async def test_geoadd(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2398,12 +2398,12 @@ async def test_geoadd(self, r: redis.Redis): assert await r.zcard("barcelona") == 2 @skip_if_server_version_lt("3.2.0") - async def test_geoadd_invalid_params(self, r: redis.Redis): - with pytest.raises(exceptions.RedisError): + async def test_geoadd_invalid_params(self, r: valkey.Valkey): + with pytest.raises(exceptions.ValkeyError): await r.geoadd("barcelona", (1, 2)) @skip_if_server_version_lt("3.2.0") - async def test_geodist(self, r: redis.Redis): + async def test_geodist(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2414,7 +2414,7 @@ async def test_geodist(self, r: redis.Redis): assert await r.geodist("barcelona", "place1", "place2") == 3067.4157 @skip_if_server_version_lt("3.2.0") - async def test_geodist_units(self, r: redis.Redis): + async def test_geodist_units(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2425,18 +2425,18 @@ async def test_geodist_units(self, r: redis.Redis): assert await r.geodist("barcelona", "place1", "place2", "km") == 3.0674 @skip_if_server_version_lt("3.2.0") - async def test_geodist_missing_one_member(self, r: redis.Redis): + async def test_geodist_missing_one_member(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") await r.geoadd("barcelona", values) assert await r.geodist("barcelona", "place1", "missing_member", "km") is None @skip_if_server_version_lt("3.2.0") - async def test_geodist_invalid_units(self, r: redis.Redis): - with pytest.raises(exceptions.RedisError): + async def test_geodist_invalid_units(self, r: valkey.Valkey): + with pytest.raises(exceptions.ValkeyError): assert await r.geodist("x", "y", "z", "inches") @skip_if_server_version_lt("3.2.0") - async def test_geohash(self, r: redis.Redis): + async def test_geohash(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2452,7 +2452,7 @@ async def test_geohash(self, r: redis.Redis): ) @skip_if_server_version_lt("3.2.0") - async def test_geopos(self, r: redis.Redis): + async def test_geopos(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2460,7 +2460,7 @@ async def test_geopos(self, r: redis.Redis): ) await r.geoadd("barcelona", values) - # redis uses 52 bits precision, hereby small errors may be introduced. + # valkey uses 52 bits precision, hereby small errors may be introduced. assert_resp_response( r, await r.geopos("barcelona", "place1", "place2"), @@ -2475,16 +2475,16 @@ async def test_geopos(self, r: redis.Redis): ) @skip_if_server_version_lt("4.0.0") - async def test_geopos_no_value(self, r: redis.Redis): + async def test_geopos_no_value(self, r: valkey.Valkey): assert await r.geopos("barcelona", "place1", "place2") == [None, None] @skip_if_server_version_lt("3.2.0") @skip_if_server_version_gte("4.0.0") - async def test_old_geopos_no_value(self, r: redis.Redis): + async def test_old_geopos_no_value(self, r: valkey.Valkey): assert await r.geopos("barcelona", "place1", "place2") == [] @skip_if_server_version_lt("3.2.0") - async def test_georadius(self, r: redis.Redis): + async def test_georadius(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2496,7 +2496,7 @@ async def test_georadius(self, r: redis.Redis): assert await r.georadius("barcelona", 2.187, 41.406, 1000) == [b"\x80place2"] @skip_if_server_version_lt("3.2.0") - async def test_georadius_no_values(self, r: redis.Redis): + async def test_georadius_no_values(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2507,7 +2507,7 @@ async def test_georadius_no_values(self, r: redis.Redis): assert await r.georadius("barcelona", 1, 2, 1000) == [] @skip_if_server_version_lt("3.2.0") - async def test_georadius_units(self, r: redis.Redis): + async def test_georadius_units(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2521,7 +2521,7 @@ async def test_georadius_units(self, r: redis.Redis): @skip_unless_arch_bits(64) @skip_if_server_version_lt("3.2.0") - async def test_georadius_with(self, r: redis.Redis): + async def test_georadius_with(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2576,7 +2576,7 @@ async def test_georadius_with(self, r: redis.Redis): ) @skip_if_server_version_lt("3.2.0") - async def test_georadius_count(self, r: redis.Redis): + async def test_georadius_count(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2589,7 +2589,7 @@ async def test_georadius_count(self, r: redis.Redis): ] @skip_if_server_version_lt("3.2.0") - async def test_georadius_sort(self, r: redis.Redis): + async def test_georadius_sort(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2608,7 +2608,7 @@ async def test_georadius_sort(self, r: redis.Redis): @skip_if_server_version_lt("3.2.0") @pytest.mark.onlynoncluster - async def test_georadius_store(self, r: redis.Redis): + async def test_georadius_store(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2622,7 +2622,7 @@ async def test_georadius_store(self, r: redis.Redis): @skip_unless_arch_bits(64) @skip_if_server_version_lt("3.2.0") @pytest.mark.onlynoncluster - async def test_georadius_store_dist(self, r: redis.Redis): + async def test_georadius_store_dist(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2638,7 +2638,7 @@ async def test_georadius_store_dist(self, r: redis.Redis): @skip_unless_arch_bits(64) @skip_if_server_version_lt("3.2.0") - async def test_georadiusmember(self, r: redis.Redis): + async def test_georadiusmember(self, r: valkey.Valkey): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, @@ -2670,7 +2670,7 @@ async def test_georadiusmember(self, r: redis.Redis): ] @skip_if_server_version_lt("5.0.0") - async def test_xack(self, r: redis.Redis): + async def test_xack(self, r: valkey.Valkey): stream = "stream" group = "group" consumer = "consumer" @@ -2691,7 +2691,7 @@ async def test_xack(self, r: redis.Redis): assert await r.xack(stream, group, m2, m3) == 2 @skip_if_server_version_lt("5.0.0") - async def test_xadd(self, r: redis.Redis): + async def test_xadd(self, r: valkey.Valkey): stream = "stream" message_id = await r.xadd(stream, {"foo": "bar"}) assert re.match(rb"[0-9]+\-[0-9]+", message_id) @@ -2705,7 +2705,7 @@ async def test_xadd(self, r: redis.Redis): assert await r.xlen(stream) == 2 @skip_if_server_version_lt("5.0.0") - async def test_xclaim(self, r: redis.Redis): + async def test_xclaim(self, r: valkey.Valkey): stream = "stream" group = "group" consumer1 = "consumer1" @@ -2743,7 +2743,7 @@ async def test_xclaim(self, r: redis.Redis): ) == [message_id] @skip_if_server_version_lt("7.0.0") - async def test_xclaim_trimmed(self, r: redis.Redis): + async def test_xclaim_trimmed(self, r: valkey.Valkey): # xclaim should not raise an exception if the item is not there stream = "stream" group = "group" @@ -2767,7 +2767,7 @@ async def test_xclaim_trimmed(self, r: redis.Redis): assert item[0][0] == sid2 @skip_if_server_version_lt("5.0.0") - async def test_xdel(self, r: redis.Redis): + async def test_xdel(self, r: valkey.Valkey): stream = "stream" # deleting from an empty stream doesn't do anything @@ -2782,7 +2782,7 @@ async def test_xdel(self, r: redis.Redis): assert await r.xdel(stream, m2, m3) == 2 @skip_if_server_version_lt("7.0.0") - async def test_xgroup_create(self, r: redis.Redis): + async def test_xgroup_create(self, r: valkey.Valkey): # tests xgroup_create and xinfo_groups stream = "stream" group = "group" @@ -2805,7 +2805,7 @@ async def test_xgroup_create(self, r: redis.Redis): assert await r.xinfo_groups(stream) == expected @skip_if_server_version_lt("7.0.0") - async def test_xgroup_create_mkstream(self, r: redis.Redis): + async def test_xgroup_create_mkstream(self, r: valkey.Valkey): # tests xgroup_create and xinfo_groups stream = "stream" group = "group" @@ -2831,7 +2831,7 @@ async def test_xgroup_create_mkstream(self, r: redis.Redis): assert await r.xinfo_groups(stream) == expected @skip_if_server_version_lt("5.0.0") - async def test_xgroup_delconsumer(self, r: redis.Redis): + async def test_xgroup_delconsumer(self, r: valkey.Valkey): stream = "stream" group = "group" consumer = "consumer" @@ -2849,7 +2849,7 @@ async def test_xgroup_delconsumer(self, r: redis.Redis): assert await r.xgroup_delconsumer(stream, group, consumer) == 2 @skip_if_server_version_lt("5.0.0") - async def test_xgroup_destroy(self, r: redis.Redis): + async def test_xgroup_destroy(self, r: valkey.Valkey): stream = "stream" group = "group" await r.xadd(stream, {"foo": "bar"}) @@ -2861,7 +2861,7 @@ async def test_xgroup_destroy(self, r: redis.Redis): assert await r.xgroup_destroy(stream, group) @skip_if_server_version_lt("7.0.0") - async def test_xgroup_setid(self, r: redis.Redis): + async def test_xgroup_setid(self, r: valkey.Valkey): stream = "stream" group = "group" message_id = await r.xadd(stream, {"foo": "bar"}) @@ -2882,7 +2882,7 @@ async def test_xgroup_setid(self, r: redis.Redis): assert await r.xinfo_groups(stream) == expected @skip_if_server_version_lt("7.2.0") - async def test_xinfo_consumers(self, r: redis.Redis): + async def test_xinfo_consumers(self, r: valkey.Valkey): stream = "stream" group = "group" consumer1 = "consumer1" @@ -2909,7 +2909,7 @@ async def test_xinfo_consumers(self, r: redis.Redis): assert info == expected @skip_if_server_version_lt("5.0.0") - async def test_xinfo_stream(self, r: redis.Redis): + async def test_xinfo_stream(self, r: valkey.Valkey): stream = "stream" m1 = await r.xadd(stream, {"foo": "bar"}) m2 = await r.xadd(stream, {"foo": "bar"}) @@ -2920,7 +2920,7 @@ async def test_xinfo_stream(self, r: redis.Redis): assert info["last-entry"] == await get_stream_message(r, stream, m2) @skip_if_server_version_lt("5.0.0") - async def test_xlen(self, r: redis.Redis): + async def test_xlen(self, r: valkey.Valkey): stream = "stream" assert await r.xlen(stream) == 0 await r.xadd(stream, {"foo": "bar"}) @@ -2928,7 +2928,7 @@ async def test_xlen(self, r: redis.Redis): assert await r.xlen(stream) == 2 @skip_if_server_version_lt("5.0.0") - async def test_xpending(self, r: redis.Redis): + async def test_xpending(self, r: valkey.Valkey): stream = "stream" group = "group" consumer1 = "consumer1" @@ -2957,7 +2957,7 @@ async def test_xpending(self, r: redis.Redis): assert await r.xpending(stream, group) == expected @skip_if_server_version_lt("5.0.0") - async def test_xpending_range(self, r: redis.Redis): + async def test_xpending_range(self, r: valkey.Valkey): stream = "stream" group = "group" consumer1 = "consumer1" @@ -2981,7 +2981,7 @@ async def test_xpending_range(self, r: redis.Redis): assert response[1]["consumer"] == consumer2.encode() @skip_if_server_version_lt("5.0.0") - async def test_xrange(self, r: redis.Redis): + async def test_xrange(self, r: valkey.Valkey): stream = "stream" m1 = await r.xadd(stream, {"foo": "bar"}) m2 = await r.xadd(stream, {"foo": "bar"}) @@ -3004,7 +3004,7 @@ def get_ids(results): assert get_ids(results) == [m1] @skip_if_server_version_lt("5.0.0") - async def test_xread(self, r: redis.Redis): + async def test_xread(self, r: valkey.Valkey): stream = "stream" m1 = await r.xadd(stream, {"foo": "bar"}) m2 = await r.xadd(stream, {"bing": "baz"}) @@ -3035,7 +3035,7 @@ async def test_xread(self, r: redis.Redis): ) @skip_if_server_version_lt("5.0.0") - async def test_xreadgroup(self, r: redis.Redis): + async def test_xreadgroup(self, r: valkey.Valkey): stream = "stream" group = "group" consumer = "consumer" @@ -3102,7 +3102,7 @@ async def test_xreadgroup(self, r: redis.Redis): ) @skip_if_server_version_lt("5.0.0") - async def test_xrevrange(self, r: redis.Redis): + async def test_xrevrange(self, r: valkey.Valkey): stream = "stream" m1 = await r.xadd(stream, {"foo": "bar"}) m2 = await r.xadd(stream, {"foo": "bar"}) @@ -3125,7 +3125,7 @@ def get_ids(results): assert get_ids(results) == [m4] @skip_if_server_version_lt("5.0.0") - async def test_xtrim(self, r: redis.Redis): + async def test_xtrim(self, r: valkey.Valkey): stream = "stream" # trimming an empty key doesn't do anything @@ -3144,7 +3144,7 @@ async def test_xtrim(self, r: redis.Redis): assert await r.xtrim(stream, 3, approximate=False) == 1 @pytest.mark.onlynoncluster - async def test_bitfield_operations(self, r: redis.Redis): + async def test_bitfield_operations(self, r: valkey.Valkey): # comments show affected bits await r.execute_command("SELECT", 10) bf = r.bitfield("a") @@ -3214,7 +3214,7 @@ async def test_bitfield_operations(self, r: redis.Redis): assert resp == [0, None, 255] @skip_if_server_version_lt("6.0.0") - async def test_bitfield_ro(self, r: redis.Redis): + async def test_bitfield_ro(self, r: valkey.Valkey): bf = r.bitfield("a") resp = await bf.set("u8", 8, 255).execute() assert resp == [0] @@ -3227,7 +3227,7 @@ async def test_bitfield_ro(self, r: redis.Redis): assert resp == [0, 15, 15, 14] @skip_if_server_version_lt("4.0.0") - async def test_memory_stats(self, r: redis.Redis): + async def test_memory_stats(self, r: valkey.Valkey): # put a key into the current db to make sure that "db." # has data await r.set("foo", "bar") @@ -3238,18 +3238,18 @@ async def test_memory_stats(self, r: redis.Redis): assert isinstance(value, dict) @skip_if_server_version_lt("4.0.0") - async def test_memory_usage(self, r: redis.Redis): + async def test_memory_usage(self, r: valkey.Valkey): await r.set("foo", "bar") assert isinstance(await r.memory_usage("foo"), int) @skip_if_server_version_lt("4.0.0") - async def test_module_list(self, r: redis.Redis): + async def test_module_list(self, r: valkey.Valkey): assert isinstance(await r.module_list(), list) for x in await r.module_list(): assert isinstance(x, dict) @pytest.mark.onlynoncluster - async def test_interrupted_command(self, r: redis.Redis): + async def test_interrupted_command(self, r: valkey.Valkey): """ Regression test for issue #1128: An Un-handled BaseException will leave the socket with un-read response to a previous @@ -3282,7 +3282,7 @@ async def helper(): @pytest.mark.onlynoncluster class TestBinarySave: - async def test_binary_get_set(self, r: redis.Redis): + async def test_binary_get_set(self, r: valkey.Valkey): assert await r.set(" foo bar ", "123") assert await r.get(" foo bar ") == b"123" @@ -3302,7 +3302,7 @@ async def test_binary_get_set(self, r: redis.Redis): assert await r.delete(" foo\r\nbar\r\n ") assert await r.delete(" \r\n\t\x07\x13 ") - async def test_binary_lists(self, r: redis.Redis): + async def test_binary_lists(self, r: valkey.Valkey): mapping = { b"foo bar": [b"1", b"2", b"3"], b"foo\r\nbar\r\n": [b"4", b"5", b"6"], @@ -3319,9 +3319,9 @@ async def test_binary_lists(self, r: redis.Redis): for key, value in mapping.items(): assert await r.lrange(key, 0, -1) == value - async def test_22_info(self, r: redis.Redis): + async def test_22_info(self, r: valkey.Valkey): """ - Older Redis versions contained 'allocation_stats' in INFO that + Older Valkey versions contained 'allocation_stats' in INFO that was the cause of a number of bugs when parsing. """ info = ( @@ -3355,14 +3355,14 @@ async def test_22_info(self, r: redis.Redis): assert "6" in parsed["allocation_stats"] assert ">=256" in parsed["allocation_stats"] - async def test_large_responses(self, r: redis.Redis): + async def test_large_responses(self, r: valkey.Valkey): """The PythonParser has some special cases for return values > 1MB""" # load up 5MB of data into a key data = "".join([ascii_letters] * (5000000 // len(ascii_letters))) await r.set("a", data) assert await r.get("a") == data.encode() - async def test_floating_point_encoding(self, r: redis.Redis): + async def test_floating_point_encoding(self, r: valkey.Valkey): """ High precision floating point values sent to the server should keep precision. diff --git a/tests/test_asyncio/test_connect.py b/tests/test_asyncio/test_connect.py index 0df7ebb4..d0f0f693 100644 --- a/tests/test_asyncio/test_connect.py +++ b/tests/test_asyncio/test_connect.py @@ -5,12 +5,12 @@ import ssl import pytest -from redis.asyncio.connection import ( +from valkey.asyncio.connection import ( Connection, SSLConnection, UnixDomainSocketConnection, ) -from redis.exceptions import ConnectionError +from valkey.exceptions import ConnectionError from ..ssl_utils import get_ssl_filename @@ -142,7 +142,7 @@ async def _assert_connect( async def _handler(reader, writer): try: - return await _redis_request_handler(reader, writer, stop_event) + return await _valkey_request_handler(reader, writer, stop_event) finally: writer.close() await writer.wait_closed() @@ -177,7 +177,7 @@ async def _handler(reader, writer): await finished.wait() -async def _redis_request_handler(reader, writer, stop_event): +async def _valkey_request_handler(reader, writer, stop_event): buffer = b"" command = None command_ptr = None diff --git a/tests/test_asyncio/test_connection.py b/tests/test_asyncio/test_connection.py index 4ff38086..4da68b43 100644 --- a/tests/test_asyncio/test_connection.py +++ b/tests/test_asyncio/test_connection.py @@ -4,28 +4,28 @@ from unittest.mock import patch import pytest -import redis -from redis._parsers import ( +import valkey +from tests.conftest import skip_if_server_version_lt +from valkey._parsers import ( _AsyncHiredisParser, _AsyncRESP2Parser, _AsyncRESP3Parser, _AsyncRESPBase, ) -from redis.asyncio import ConnectionPool, Redis -from redis.asyncio.connection import Connection, UnixDomainSocketConnection, parse_url -from redis.asyncio.retry import Retry -from redis.backoff import NoBackoff -from redis.exceptions import ConnectionError, InvalidResponse, TimeoutError -from redis.utils import HIREDIS_AVAILABLE -from tests.conftest import skip_if_server_version_lt +from valkey.asyncio import ConnectionPool, Valkey +from valkey.asyncio.connection import Connection, UnixDomainSocketConnection, parse_url +from valkey.asyncio.retry import Retry +from valkey.backoff import NoBackoff +from valkey.exceptions import ConnectionError, InvalidResponse, TimeoutError +from valkey.utils import HIREDIS_AVAILABLE from .compat import mock from .mocks import MockStream @pytest.mark.onlynoncluster -async def test_invalid_response(create_redis): - r = await create_redis(single_connection_client=True) +async def test_invalid_response(create_valkey): + r = await create_valkey(single_connection_client=True) raw = b"x" fake_stream = MockStream(raw + b"\r\n") @@ -46,7 +46,7 @@ async def test_invalid_response(create_redis): @pytest.mark.onlynoncluster async def test_single_connection(): """Test that concurrent requests on a single client are synchronised.""" - r = Redis(single_connection_client=True) + r = Valkey(single_connection_client=True) init_call_count = 0 command_call_count = 0 @@ -91,7 +91,7 @@ async def get_conn(_): @skip_if_server_version_lt("4.0.0") -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster async def test_loading_external_modules(r): def inner(): @@ -102,9 +102,9 @@ def inner(): assert isinstance(getattr(r, "myfuncname"), types.FunctionType) # and call it - from redis.commands import RedisModuleCommands + from valkey.commands import ValkeyModuleCommands - j = RedisModuleCommands.json + j = ValkeyModuleCommands.json r.load_external_module("sometestfuncname", j) # d = {'hello': 'world!'} @@ -173,7 +173,7 @@ async def test_connect_timeout_error_without_retry(): @pytest.mark.onlynoncluster -async def test_connection_parse_response_resume(r: redis.Redis): +async def test_connection_parse_response_resume(r: valkey.Valkey): """ This test verifies that the Connection parser, be that PythonParser or HiredisParser, @@ -289,23 +289,25 @@ async def dummy_method(*args, **kwargs): @pytest.mark.onlynoncluster def test_create_single_connection_client_from_url(): - client = Redis.from_url("redis://localhost:6379/0?", single_connection_client=True) + client = Valkey.from_url( + "valkey://localhost:6379/0?", single_connection_client=True + ) assert client.single_connection_client is True @pytest.mark.parametrize("from_url", (True, False), ids=("from_url", "from_args")) async def test_pool_auto_close(request, from_url): - """Verify that basic Redis instances have auto_close_connection_pool set to True""" + """Verify that basic Valkey instances have auto_close_connection_pool set to True""" - url: str = request.config.getoption("--redis-url") + url: str = request.config.getoption("--valkey-url") url_args = parse_url(url) - async def get_redis_connection(): + async def get_valkey_connection(): if from_url: - return Redis.from_url(url) - return Redis(**url_args) + return Valkey.from_url(url) + return Valkey(**url_args) - r1 = await get_redis_connection() + r1 = await get_valkey_connection() assert r1.auto_close_connection_pool is True await r1.aclose() @@ -318,8 +320,8 @@ async def mock_aclose(self): nonlocal calls calls += 1 - url: str = request.config.getoption("--redis-url") - r1 = await Redis.from_url(url) + url: str = request.config.getoption("--valkey-url") + r1 = await Valkey.from_url(url) with patch.object(r1, "aclose", mock_aclose): with pytest.deprecated_call(): await r1.close() @@ -330,46 +332,46 @@ async def mock_aclose(self): async def test_pool_from_url_deprecation(request): - url: str = request.config.getoption("--redis-url") + url: str = request.config.getoption("--valkey-url") with pytest.deprecated_call(): - return Redis.from_url(url, auto_close_connection_pool=False) + return Valkey.from_url(url, auto_close_connection_pool=False) async def test_pool_auto_close_disable(request): """Verify that auto_close_connection_pool can be disabled (deprecated)""" - url: str = request.config.getoption("--redis-url") + url: str = request.config.getoption("--valkey-url") url_args = parse_url(url) - async def get_redis_connection(): + async def get_valkey_connection(): url_args["auto_close_connection_pool"] = False with pytest.deprecated_call(): - return Redis(**url_args) + return Valkey(**url_args) - r1 = await get_redis_connection() + r1 = await get_valkey_connection() assert r1.auto_close_connection_pool is False await r1.connection_pool.disconnect() await r1.aclose() @pytest.mark.parametrize("from_url", (True, False), ids=("from_url", "from_args")) -async def test_redis_connection_pool(request, from_url): - """Verify that basic Redis instances using `connection_pool` +async def test_valkey_connection_pool(request, from_url): + """Verify that basic Valkey instances using `connection_pool` have auto_close_connection_pool set to False""" - url: str = request.config.getoption("--redis-url") + url: str = request.config.getoption("--valkey-url") url_args = parse_url(url) pool = None - async def get_redis_connection(): + async def get_valkey_connection(): nonlocal pool if from_url: pool = ConnectionPool.from_url(url) else: pool = ConnectionPool(**url_args) - return Redis(connection_pool=pool) + return Valkey(connection_pool=pool) called = 0 @@ -378,7 +380,7 @@ async def mock_disconnect(_): called += 1 with patch.object(ConnectionPool, "disconnect", mock_disconnect): - async with await get_redis_connection() as r1: + async with await get_valkey_connection() as r1: assert r1.auto_close_connection_pool is False assert called == 0 @@ -386,22 +388,22 @@ async def mock_disconnect(_): @pytest.mark.parametrize("from_url", (True, False), ids=("from_url", "from_args")) -async def test_redis_from_pool(request, from_url): - """Verify that basic Redis instances created using `from_pool()` +async def test_valkey_from_pool(request, from_url): + """Verify that basic Valkey instances created using `from_pool()` have auto_close_connection_pool set to True""" - url: str = request.config.getoption("--redis-url") + url: str = request.config.getoption("--valkey-url") url_args = parse_url(url) pool = None - async def get_redis_connection(): + async def get_valkey_connection(): nonlocal pool if from_url: pool = ConnectionPool.from_url(url) else: pool = ConnectionPool(**url_args) - return Redis.from_pool(pool) + return Valkey.from_pool(pool) called = 0 @@ -410,7 +412,7 @@ async def mock_disconnect(_): called += 1 with patch.object(ConnectionPool, "disconnect", mock_disconnect): - async with await get_redis_connection() as r1: + async with await get_valkey_connection() as r1: assert r1.auto_close_connection_pool is True assert called == 1 @@ -418,16 +420,16 @@ async def mock_disconnect(_): @pytest.mark.parametrize("auto_close", (True, False)) -async def test_redis_pool_auto_close_arg(request, auto_close): - """test that redis instance where pool is provided have +async def test_valkey_pool_auto_close_arg(request, auto_close): + """test that valkey instance where pool is provided have auto_close_connection_pool set to False, regardless of arg""" - url: str = request.config.getoption("--redis-url") + url: str = request.config.getoption("--valkey-url") pool = ConnectionPool.from_url(url) - async def get_redis_connection(): + async def get_valkey_connection(): with pytest.deprecated_call(): - client = Redis(connection_pool=pool, auto_close_connection_pool=auto_close) + client = Valkey(connection_pool=pool, auto_close_connection_pool=auto_close) return client called = 0 @@ -437,7 +439,7 @@ async def mock_disconnect(_): called += 1 with patch.object(ConnectionPool, "disconnect", mock_disconnect): - async with await get_redis_connection() as r1: + async with await get_valkey_connection() as r1: assert r1.auto_close_connection_pool is False assert called == 0 @@ -446,15 +448,15 @@ async def mock_disconnect(_): async def test_client_garbage_collection(request): """ - Test that a Redis client will call _close() on any + Test that a Valkey client will call _close() on any connection that it holds at time of destruction """ - url: str = request.config.getoption("--redis-url") + url: str = request.config.getoption("--valkey-url") pool = ConnectionPool.from_url(url) # create a client with a connection from the pool - client = Redis(connection_pool=pool, single_connection_client=True) + client = Valkey(connection_pool=pool, single_connection_client=True) await client.initialize() with mock.patch.object(client, "connection") as a: # we cannot, in unittests, or from asyncio, reliably trigger garbage collection @@ -473,11 +475,11 @@ async def test_connection_garbage_collection(request): stream that it holds. """ - url: str = request.config.getoption("--redis-url") + url: str = request.config.getoption("--valkey-url") pool = ConnectionPool.from_url(url) # create a client with a connection from the pool - client = Redis(connection_pool=pool, single_connection_client=True) + client = Valkey(connection_pool=pool, single_connection_client=True) await client.initialize() conn = client.connection diff --git a/tests/test_asyncio/test_connection_pool.py b/tests/test_asyncio/test_connection_pool.py index 5e4d3f20..c227c749 100644 --- a/tests/test_asyncio/test_connection_pool.py +++ b/tests/test_asyncio/test_connection_pool.py @@ -3,9 +3,9 @@ import pytest import pytest_asyncio -import redis.asyncio as redis -from redis.asyncio.connection import Connection, to_bool -from tests.conftest import skip_if_redis_enterprise, skip_if_server_version_lt +import valkey.asyncio as valkey +from tests.conftest import skip_if_server_version_lt, skip_if_valkey_enterprise +from valkey.asyncio.connection import Connection, to_bool from .compat import aclosing, mock from .conftest import asynccontextmanager @@ -13,11 +13,11 @@ @pytest.mark.onlynoncluster -class TestRedisAutoReleaseConnectionPool: +class TestValkeyAutoReleaseConnectionPool: @pytest_asyncio.fixture - async def r(self, create_redis) -> redis.Redis: + async def r(self, create_valkey) -> valkey.Valkey: """This is necessary since r and r2 create ConnectionPools behind the scenes""" - r = await create_redis() + r = await create_valkey() r.auto_close_connection_pool = True yield r @@ -26,26 +26,26 @@ def get_total_connected_connections(pool): return len(pool._available_connections) + len(pool._in_use_connections) @staticmethod - async def create_two_conn(r: redis.Redis): + async def create_two_conn(r: valkey.Valkey): if not r.single_connection_client: # Single already initialized connection r.connection = await r.connection_pool.get_connection("_") return await r.connection_pool.get_connection("_") @staticmethod - def has_no_connected_connections(pool: redis.ConnectionPool): + def has_no_connected_connections(pool: valkey.ConnectionPool): return not any( x.is_connected for x in pool._available_connections + list(pool._in_use_connections) ) - async def test_auto_disconnect_redis_created_pool(self, r: redis.Redis): + async def test_auto_disconnect_valkey_created_pool(self, r: valkey.Valkey): new_conn = await self.create_two_conn(r) assert new_conn != r.connection assert self.get_total_connected_connections(r.connection_pool) == 2 await r.aclose() assert self.has_no_connected_connections(r.connection_pool) - async def test_do_not_auto_disconnect_redis_created_pool(self, r2: redis.Redis): + async def test_do_not_auto_disconnect_valkey_created_pool(self, r2: valkey.Valkey): assert r2.auto_close_connection_pool is False, ( "The connection pool should not be disconnected as a manually created " "connection pool was passed in in conftest.py" @@ -58,7 +58,9 @@ async def test_do_not_auto_disconnect_redis_created_pool(self, r2: redis.Redis): assert len(r2.connection_pool._available_connections) == 1 assert r2.connection_pool._available_connections[0].is_connected - async def test_auto_release_override_true_manual_created_pool(self, r: redis.Redis): + async def test_auto_release_override_true_manual_created_pool( + self, r: valkey.Valkey + ): assert r.auto_close_connection_pool is True, "This is from the class fixture" await self.create_two_conn(r) await r.aclose() @@ -69,7 +71,7 @@ async def test_auto_release_override_true_manual_created_pool(self, r: redis.Red assert self.has_no_connected_connections(r.connection_pool) @pytest.mark.parametrize("auto_close_conn_pool", [True, False]) - async def test_close_override(self, r: redis.Redis, auto_close_conn_pool): + async def test_close_override(self, r: valkey.Valkey, auto_close_conn_pool): r.auto_close_connection_pool = auto_close_conn_pool await self.create_two_conn(r) await r.aclose(close_connection_pool=True) @@ -77,7 +79,7 @@ async def test_close_override(self, r: redis.Redis, auto_close_conn_pool): @pytest.mark.parametrize("auto_close_conn_pool", [True, False]) async def test_negate_auto_close_client_pool( - self, r: redis.Redis, auto_close_conn_pool + self, r: valkey.Valkey, auto_close_conn_pool ): r.auto_close_connection_pool = auto_close_conn_pool new_conn = await self.create_two_conn(r) @@ -113,10 +115,10 @@ async def get_pool( self, connection_kwargs=None, max_connections=None, - connection_class=redis.Connection, + connection_class=valkey.Connection, ): connection_kwargs = connection_kwargs or {} - pool = redis.ConnectionPool( + pool = valkey.ConnectionPool( connection_class=connection_class, max_connections=max_connections, **connection_kwargs, @@ -137,7 +139,7 @@ async def test_connection_creation(self): async def test_aclosing(self): connection_kwargs = {"foo": "bar", "biz": "baz"} - pool = redis.ConnectionPool( + pool = valkey.ConnectionPool( connection_class=DummyConnection, max_connections=None, **connection_kwargs, @@ -159,7 +161,7 @@ async def test_max_connections(self, master_host): ) as pool: await pool.get_connection("_") await pool.get_connection("_") - with pytest.raises(redis.ConnectionError): + with pytest.raises(valkey.ConnectionError): await pool.get_connection("_") async def test_reuse_previously_released_connection(self, master_host): @@ -178,7 +180,7 @@ async def test_repr_contains_db_info_tcp(self): "client_name": "test-client", } async with self.get_pool( - connection_kwargs=connection_kwargs, connection_class=redis.Connection + connection_kwargs=connection_kwargs, connection_class=valkey.Connection ) as pool: expected = "host=localhost,port=6379,db=1,client_name=test-client" assert expected in repr(pool) @@ -187,7 +189,7 @@ async def test_repr_contains_db_info_unix(self): connection_kwargs = {"path": "/abc", "db": 1, "client_name": "test-client"} async with self.get_pool( connection_kwargs=connection_kwargs, - connection_class=redis.UnixDomainSocketConnection, + connection_class=valkey.UnixDomainSocketConnection, ) as pool: expected = "path=/abc,db=1,client_name=test-client" assert expected in repr(pool) @@ -197,7 +199,7 @@ class TestBlockingConnectionPool: @asynccontextmanager async def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20): connection_kwargs = connection_kwargs or {} - pool = redis.BlockingConnectionPool( + pool = valkey.BlockingConnectionPool( connection_class=DummyConnection, max_connections=max_connections, timeout=timeout, @@ -248,7 +250,7 @@ async def test_connection_pool_blocks_until_timeout(self, master_host): c1 = await pool.get_connection("_") start = asyncio.get_running_loop().time() - with pytest.raises(redis.ConnectionError): + with pytest.raises(valkey.ConnectionError): await pool.get_connection("_") # we should have waited at least some period of time @@ -284,15 +286,15 @@ async def test_reuse_previously_released_connection(self, master_host): assert c1 == c2 def test_repr_contains_db_info_tcp(self): - pool = redis.ConnectionPool( + pool = valkey.ConnectionPool( host="localhost", port=6379, client_name="test-client" ) expected = "host=localhost,port=6379,db=0,client_name=test-client" assert expected in repr(pool) def test_repr_contains_db_info_unix(self): - pool = redis.ConnectionPool( - connection_class=redis.UnixDomainSocketConnection, + pool = valkey.ConnectionPool( + connection_class=valkey.UnixDomainSocketConnection, path="abc", client_name="test-client", ) @@ -302,47 +304,47 @@ def test_repr_contains_db_info_unix(self): class TestConnectionPoolURLParsing: def test_hostname(self): - pool = redis.ConnectionPool.from_url("redis://my.host") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://my.host") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "my.host"} def test_quoted_hostname(self): - pool = redis.ConnectionPool.from_url("redis://my %2F host %2B%3D+") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://my %2F host %2B%3D+") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "my / host +=+"} def test_port(self): - pool = redis.ConnectionPool.from_url("redis://localhost:6380") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://localhost:6380") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "port": 6380} @skip_if_server_version_lt("6.0.0") def test_username(self): - pool = redis.ConnectionPool.from_url("redis://myuser:@localhost") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://myuser:@localhost") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "username": "myuser"} @skip_if_server_version_lt("6.0.0") def test_quoted_username(self): - pool = redis.ConnectionPool.from_url( - "redis://%2Fmyuser%2F%2B name%3D%24+:@localhost" + pool = valkey.ConnectionPool.from_url( + "valkey://%2Fmyuser%2F%2B name%3D%24+:@localhost" ) - assert pool.connection_class == redis.Connection + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == { "host": "localhost", "username": "/myuser/+ name=$+", } def test_password(self): - pool = redis.ConnectionPool.from_url("redis://:mypassword@localhost") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://:mypassword@localhost") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "password": "mypassword"} def test_quoted_password(self): - pool = redis.ConnectionPool.from_url( - "redis://:%2Fmypass%2F%2B word%3D%24+@localhost" + pool = valkey.ConnectionPool.from_url( + "valkey://:%2Fmypass%2F%2B word%3D%24+@localhost" ) - assert pool.connection_class == redis.Connection + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == { "host": "localhost", "password": "/mypass/+ word=$+", @@ -350,8 +352,8 @@ def test_quoted_password(self): @skip_if_server_version_lt("6.0.0") def test_username_and_password(self): - pool = redis.ConnectionPool.from_url("redis://myuser:mypass@localhost") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://myuser:mypass@localhost") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == { "host": "localhost", "username": "myuser", @@ -359,27 +361,27 @@ def test_username_and_password(self): } def test_db_as_argument(self): - pool = redis.ConnectionPool.from_url("redis://localhost", db=1) - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://localhost", db=1) + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "db": 1} def test_db_in_path(self): - pool = redis.ConnectionPool.from_url("redis://localhost/2", db=1) - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://localhost/2", db=1) + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "db": 2} def test_db_in_querystring(self): - pool = redis.ConnectionPool.from_url("redis://localhost/2?db=3", db=1) - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://localhost/2?db=3", db=1) + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "db": 3} def test_extra_typed_querystring_options(self): - pool = redis.ConnectionPool.from_url( - "redis://localhost/2?socket_timeout=20&socket_connect_timeout=10" + pool = valkey.ConnectionPool.from_url( + "valkey://localhost/2?socket_timeout=20&socket_connect_timeout=10" "&socket_keepalive=&retry_on_timeout=Yes&max_connections=10" ) - assert pool.connection_class == redis.Connection + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == { "host": "localhost", "db": 2, @@ -410,46 +412,48 @@ def test_boolean_parsing(self): assert expected is to_bool(value) def test_client_name_in_querystring(self): - pool = redis.ConnectionPool.from_url("redis://location?client_name=test-client") + pool = valkey.ConnectionPool.from_url( + "valkey://location?client_name=test-client" + ) assert pool.connection_kwargs["client_name"] == "test-client" def test_invalid_extra_typed_querystring_options(self): with pytest.raises(ValueError): - redis.ConnectionPool.from_url( - "redis://localhost/2?socket_timeout=_&socket_connect_timeout=abc" + valkey.ConnectionPool.from_url( + "valkey://localhost/2?socket_timeout=_&socket_connect_timeout=abc" ) def test_extra_querystring_options(self): - pool = redis.ConnectionPool.from_url("redis://localhost?a=1&b=2") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://localhost?a=1&b=2") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "a": "1", "b": "2"} def test_calling_from_subclass_returns_correct_instance(self): - pool = redis.BlockingConnectionPool.from_url("redis://localhost") - assert isinstance(pool, redis.BlockingConnectionPool) + pool = valkey.BlockingConnectionPool.from_url("valkey://localhost") + assert isinstance(pool, valkey.BlockingConnectionPool) def test_client_creates_connection_pool(self): - r = redis.Redis.from_url("redis://myhost") - assert r.connection_pool.connection_class == redis.Connection + r = valkey.Valkey.from_url("valkey://myhost") + assert r.connection_pool.connection_class == valkey.Connection assert r.connection_pool.connection_kwargs == {"host": "myhost"} def test_invalid_scheme_raises_error(self): with pytest.raises(ValueError) as cm: - redis.ConnectionPool.from_url("localhost") + valkey.ConnectionPool.from_url("localhost") assert str(cm.value) == ( - "Redis URL must specify one of the following schemes " - "(redis://, rediss://, unix://)" + "Valkey URL must specify one of the following schemes " + "(valkey://, valkeys://, unix://)" ) class TestBlockingConnectionPoolURLParsing: def test_extra_typed_querystring_options(self): - pool = redis.BlockingConnectionPool.from_url( - "redis://localhost/2?socket_timeout=20&socket_connect_timeout=10" + pool = valkey.BlockingConnectionPool.from_url( + "valkey://localhost/2?socket_timeout=20&socket_connect_timeout=10" "&socket_keepalive=&retry_on_timeout=Yes&max_connections=10&timeout=13.37" ) - assert pool.connection_class == redis.Connection + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == { "host": "localhost", "db": 2, @@ -462,105 +466,107 @@ def test_extra_typed_querystring_options(self): def test_invalid_extra_typed_querystring_options(self): with pytest.raises(ValueError): - redis.BlockingConnectionPool.from_url( - "redis://localhost/2?timeout=_not_a_float_" + valkey.BlockingConnectionPool.from_url( + "valkey://localhost/2?timeout=_not_a_float_" ) class TestConnectionPoolUnixSocketURLParsing: def test_defaults(self): - pool = redis.ConnectionPool.from_url("unix:///socket") - assert pool.connection_class == redis.UnixDomainSocketConnection + pool = valkey.ConnectionPool.from_url("unix:///socket") + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == {"path": "/socket"} @skip_if_server_version_lt("6.0.0") def test_username(self): - pool = redis.ConnectionPool.from_url("unix://myuser:@/socket") - assert pool.connection_class == redis.UnixDomainSocketConnection + pool = valkey.ConnectionPool.from_url("unix://myuser:@/socket") + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == {"path": "/socket", "username": "myuser"} @skip_if_server_version_lt("6.0.0") def test_quoted_username(self): - pool = redis.ConnectionPool.from_url( + pool = valkey.ConnectionPool.from_url( "unix://%2Fmyuser%2F%2B name%3D%24+:@/socket" ) - assert pool.connection_class == redis.UnixDomainSocketConnection + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == { "path": "/socket", "username": "/myuser/+ name=$+", } def test_password(self): - pool = redis.ConnectionPool.from_url("unix://:mypassword@/socket") - assert pool.connection_class == redis.UnixDomainSocketConnection + pool = valkey.ConnectionPool.from_url("unix://:mypassword@/socket") + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == {"path": "/socket", "password": "mypassword"} def test_quoted_password(self): - pool = redis.ConnectionPool.from_url( + pool = valkey.ConnectionPool.from_url( "unix://:%2Fmypass%2F%2B word%3D%24+@/socket" ) - assert pool.connection_class == redis.UnixDomainSocketConnection + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == { "path": "/socket", "password": "/mypass/+ word=$+", } def test_quoted_path(self): - pool = redis.ConnectionPool.from_url( + pool = valkey.ConnectionPool.from_url( "unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket" ) - assert pool.connection_class == redis.UnixDomainSocketConnection + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == { "path": "/my/path/to/../+_+=$ocket", "password": "mypassword", } def test_db_as_argument(self): - pool = redis.ConnectionPool.from_url("unix:///socket", db=1) - assert pool.connection_class == redis.UnixDomainSocketConnection + pool = valkey.ConnectionPool.from_url("unix:///socket", db=1) + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == {"path": "/socket", "db": 1} def test_db_in_querystring(self): - pool = redis.ConnectionPool.from_url("unix:///socket?db=2", db=1) - assert pool.connection_class == redis.UnixDomainSocketConnection + pool = valkey.ConnectionPool.from_url("unix:///socket?db=2", db=1) + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == {"path": "/socket", "db": 2} def test_client_name_in_querystring(self): - pool = redis.ConnectionPool.from_url("redis://location?client_name=test-client") + pool = valkey.ConnectionPool.from_url( + "valkey://location?client_name=test-client" + ) assert pool.connection_kwargs["client_name"] == "test-client" def test_extra_querystring_options(self): - pool = redis.ConnectionPool.from_url("unix:///socket?a=1&b=2") - assert pool.connection_class == redis.UnixDomainSocketConnection + pool = valkey.ConnectionPool.from_url("unix:///socket?a=1&b=2") + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == {"path": "/socket", "a": "1", "b": "2"} class TestSSLConnectionURLParsing: def test_host(self): - pool = redis.ConnectionPool.from_url("rediss://my.host") - assert pool.connection_class == redis.SSLConnection + pool = valkey.ConnectionPool.from_url("valkeys://my.host") + assert pool.connection_class == valkey.SSLConnection assert pool.connection_kwargs == {"host": "my.host"} def test_cert_reqs_options(self): import ssl - class DummyConnectionPool(redis.ConnectionPool): + class DummyConnectionPool(valkey.ConnectionPool): def get_connection(self, *args, **kwargs): return self.make_connection() - pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=none") + pool = DummyConnectionPool.from_url("valkeys://?ssl_cert_reqs=none") assert pool.get_connection("_").cert_reqs == ssl.CERT_NONE - pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=optional") + pool = DummyConnectionPool.from_url("valkeys://?ssl_cert_reqs=optional") assert pool.get_connection("_").cert_reqs == ssl.CERT_OPTIONAL - pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=required") + pool = DummyConnectionPool.from_url("valkeys://?ssl_cert_reqs=required") assert pool.get_connection("_").cert_reqs == ssl.CERT_REQUIRED - pool = DummyConnectionPool.from_url("rediss://?ssl_check_hostname=False") + pool = DummyConnectionPool.from_url("valkeys://?ssl_check_hostname=False") assert pool.get_connection("_").check_hostname is False - pool = DummyConnectionPool.from_url("rediss://?ssl_check_hostname=True") + pool = DummyConnectionPool.from_url("valkeys://?ssl_check_hostname=True") assert pool.get_connection("_").check_hostname is True @@ -568,13 +574,13 @@ class TestConnection: async def test_on_connect_error(self): """ An error in Connection.on_connect should disconnect from the server - see for details: https://github.com/andymccurdy/redis-py/issues/368 + see for details: https://github.com/andymccurdy/valkey-py/issues/368 """ - # this assumes the Redis server being tested against doesn't have + # this assumes the Valkey server being tested against doesn't have # 9999 databases ;) - bad_connection = redis.Redis(db=9999) + bad_connection = valkey.Valkey(db=9999) # an error should be raised on connect - with pytest.raises(redis.RedisError): + with pytest.raises(valkey.ValkeyError): await bad_connection.info() pool = bad_connection.connection_pool assert len(pool._available_connections) == 1 @@ -582,27 +588,27 @@ async def test_on_connect_error(self): @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.8.8") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_busy_loading_disconnects_socket(self, r): """ - If Redis raises a LOADING error, the connection should be + If Valkey raises a LOADING error, the connection should be disconnected and a BusyLoadingError raised """ - with pytest.raises(redis.BusyLoadingError): + with pytest.raises(valkey.BusyLoadingError): await r.execute_command("DEBUG", "ERROR", "LOADING fake message") if r.connection: assert not r.connection._reader @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.8.8") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_busy_loading_from_pipeline_immediate_command(self, r): """ BusyLoadingErrors should raise from Pipelines that execute a command immediately, like WATCH does. """ pipe = r.pipeline() - with pytest.raises(redis.BusyLoadingError): + with pytest.raises(valkey.BusyLoadingError): await pipe.immediate_execute_command( "DEBUG", "ERROR", "LOADING fake message" ) @@ -613,7 +619,7 @@ async def test_busy_loading_from_pipeline_immediate_command(self, r): @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.8.8") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_busy_loading_from_pipeline(self, r): """ BusyLoadingErrors should be raised from a pipeline execution @@ -621,7 +627,7 @@ async def test_busy_loading_from_pipeline(self, r): """ pipe = r.pipeline() pipe.execute_command("DEBUG", "ERROR", "LOADING fake message") - with pytest.raises(redis.BusyLoadingError): + with pytest.raises(valkey.BusyLoadingError): await pipe.execute() pool = r.connection_pool assert not pipe.connection @@ -629,22 +635,22 @@ async def test_busy_loading_from_pipeline(self, r): assert not pool._available_connections[0]._reader @skip_if_server_version_lt("2.8.8") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_read_only_error(self, r): """READONLY errors get turned into ReadOnlyError exceptions""" - with pytest.raises(redis.ReadOnlyError): + with pytest.raises(valkey.ReadOnlyError): await r.execute_command("DEBUG", "ERROR", "READONLY blah blah") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_oom_error(self, r): """OOM errors get turned into OutOfMemoryError exceptions""" - with pytest.raises(redis.OutOfMemoryError): + with pytest.raises(valkey.OutOfMemoryError): # note: don't use the DEBUG OOM command since it's not the same # as the db being full await r.execute_command("DEBUG", "ERROR", "OOM blah blah") def test_connect_from_url_tcp(self): - connection = redis.Redis.from_url("redis://localhost") + connection = valkey.Valkey.from_url("valkey://localhost") pool = connection.connection_pool print(repr(pool)) @@ -657,7 +663,7 @@ def test_connect_from_url_tcp(self): ) def test_connect_from_url_unix(self): - connection = redis.Redis.from_url("unix:///path/to/socket") + connection = valkey.Valkey.from_url("unix:///path/to/socket") pool = connection.connection_pool assert re.match( @@ -668,31 +674,31 @@ def test_connect_from_url_unix(self): "path=/path/to/socket,db=0", ) - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_connect_no_auth_supplied_when_required(self, r): """ AuthenticationError should be raised when the server requires a password but one isn't supplied. """ - with pytest.raises(redis.AuthenticationError): + with pytest.raises(valkey.AuthenticationError): await r.execute_command( "DEBUG", "ERROR", "ERR Client sent AUTH, but no password is set" ) - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_connect_invalid_password_supplied(self, r): """AuthenticationError should be raised when sending the wrong password""" - with pytest.raises(redis.AuthenticationError): + with pytest.raises(valkey.AuthenticationError): await r.execute_command("DEBUG", "ERROR", "ERR invalid password") @pytest.mark.onlynoncluster class TestMultiConnectionClient: @pytest_asyncio.fixture() - async def r(self, create_redis, server): - redis = await create_redis(single_connection_client=False) - yield redis - await redis.flushall() + async def r(self, create_valkey, server): + valkey = await create_valkey(single_connection_client=False) + yield valkey + await valkey.flushall() @pytest.mark.onlynoncluster @@ -701,10 +707,10 @@ class TestHealthCheck: interval = 60 @pytest_asyncio.fixture() - async def r(self, create_redis): - redis = await create_redis(health_check_interval=self.interval) - yield redis - await redis.flushall() + async def r(self, create_valkey): + valkey = await create_valkey(health_check_interval=self.interval) + yield valkey + await valkey.flushall() def assert_interval_advanced(self, connection): diff = connection.next_health_check - asyncio.get_running_loop().time() diff --git a/tests/test_asyncio/test_credentials.py b/tests/test_asyncio/test_credentials.py index 4429f745..7d51bbc6 100644 --- a/tests/test_asyncio/test_credentials.py +++ b/tests/test_asyncio/test_credentials.py @@ -5,15 +5,15 @@ import pytest import pytest_asyncio -import redis -from redis import AuthenticationError, DataError, ResponseError -from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider -from redis.utils import str_if_bytes -from tests.conftest import skip_if_redis_enterprise +import valkey +from tests.conftest import skip_if_valkey_enterprise +from valkey import AuthenticationError, DataError, ResponseError +from valkey.credentials import CredentialProvider, UsernamePasswordCredentialProvider +from valkey.utils import str_if_bytes @pytest_asyncio.fixture() -async def r_acl_teardown(r: redis.Redis): +async def r_acl_teardown(r: valkey.Valkey): """ A special fixture which removes the provided names from the database after use """ @@ -29,7 +29,7 @@ def factory(username): @pytest_asyncio.fixture() -async def r_required_pass_teardown(r: redis.Redis): +async def r_required_pass_teardown(r: valkey.Valkey): """ A special fixture which removes the provided password from the database after use """ @@ -120,9 +120,9 @@ async def init_required_pass(r, password): @pytest.mark.asyncio class TestCredentialsProvider: - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_only_pass_without_creds_provider( - self, r_required_pass_teardown, create_redis + self, r_required_pass_teardown, create_valkey ): # test for default user (`username` is supposed to be optional) password = "password" @@ -130,13 +130,13 @@ async def test_only_pass_without_creds_provider( await init_required_pass(r, password) assert await r.auth(password) is True - r2 = await create_redis(flushdb=False, password=password) + r2 = await create_valkey(flushdb=False, password=password) assert await r2.ping() is True - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_user_and_pass_without_creds_provider( - self, r_acl_teardown, create_redis + self, r_acl_teardown, create_valkey ): """ Test backward compatibility with username and password @@ -146,15 +146,15 @@ async def test_user_and_pass_without_creds_provider( password = "password" r = r_acl_teardown(username) await init_acl_user(r, username, password) - r2 = await create_redis(flushdb=False, username=username, password=password) + r2 = await create_valkey(flushdb=False, username=username, password=password) assert await r2.ping() is True @pytest.mark.parametrize("username", ["username", None]) - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() @pytest.mark.onlynoncluster async def test_credential_provider_with_supplier( - self, r_acl_teardown, r_required_pass_teardown, create_redis, username + self, r_acl_teardown, r_required_pass_teardown, create_valkey, username ): creds_provider = AsyncRandomAuthCredProvider( user=username, @@ -171,17 +171,17 @@ async def test_credential_provider_with_supplier( r = r_required_pass_teardown(password) await init_required_pass(r, password) - r2 = await create_redis(flushdb=False, credential_provider=creds_provider) + r2 = await create_valkey(flushdb=False, credential_provider=creds_provider) assert await r2.ping() is True async def test_async_credential_provider_no_password_success( - self, r_acl_teardown, create_redis + self, r_acl_teardown, create_valkey ): username = "username" r = r_acl_teardown(username) await init_acl_user(r, username, "") - r2 = await create_redis( + r2 = await create_valkey( flushdb=False, credential_provider=NoPassCredProvider(), ) @@ -189,13 +189,13 @@ async def test_async_credential_provider_no_password_success( @pytest.mark.onlynoncluster async def test_credential_provider_no_password_error( - self, r_acl_teardown, create_redis + self, r_acl_teardown, create_valkey ): username = "username" r = r_acl_teardown(username) await init_acl_user(r, username, "password") with pytest.raises(AuthenticationError) as e: - await create_redis( + await create_valkey( flushdb=False, credential_provider=NoPassCredProvider(), single_connection_client=True, @@ -205,7 +205,7 @@ async def test_credential_provider_no_password_error( @pytest.mark.onlynoncluster async def test_password_and_username_together_with_cred_provider_raise_error( - self, r_acl_teardown, create_redis + self, r_acl_teardown, create_valkey ): username = "username" r = r_acl_teardown(username) @@ -214,7 +214,7 @@ async def test_password_and_username_together_with_cred_provider_raise_error( username="username", password="password" ) with pytest.raises(DataError) as e: - await create_redis( + await create_valkey( flushdb=False, username="username", password="password", @@ -228,7 +228,7 @@ async def test_password_and_username_together_with_cred_provider_raise_error( @pytest.mark.onlynoncluster async def test_change_username_password_on_existing_connection( - self, r_acl_teardown, create_redis + self, r_acl_teardown, create_valkey ): username = "origin_username" password = "origin_password" @@ -236,7 +236,7 @@ async def test_change_username_password_on_existing_connection( new_password = "new_password" r = r_acl_teardown(username) await init_acl_user(r, username, password) - r2 = await create_redis(flushdb=False, username=username, password=password) + r2 = await create_valkey(flushdb=False, username=username, password=password) assert await r2.ping() is True conn = await r2.connection_pool.get_connection("_") await conn.send_command("PING") @@ -253,7 +253,7 @@ async def test_change_username_password_on_existing_connection( @pytest.mark.asyncio class TestUsernamePasswordCredentialProvider: async def test_user_pass_credential_provider_acl_user_and_pass( - self, r_acl_teardown, create_redis + self, r_acl_teardown, create_valkey ): username = "username" password = "password" @@ -263,11 +263,11 @@ async def test_user_pass_credential_provider_acl_user_and_pass( assert provider.password == password assert provider.get_credentials() == (username, password) await init_acl_user(r, provider.username, provider.password) - r2 = await create_redis(flushdb=False, credential_provider=provider) + r2 = await create_valkey(flushdb=False, credential_provider=provider) assert await r2.ping() is True async def test_user_pass_provider_only_password( - self, r_required_pass_teardown, create_redis + self, r_required_pass_teardown, create_valkey ): password = "password" provider = UsernamePasswordCredentialProvider(password=password) @@ -278,6 +278,6 @@ async def test_user_pass_provider_only_password( await init_required_pass(r, password) - r2 = await create_redis(flushdb=False, credential_provider=provider) + r2 = await create_valkey(flushdb=False, credential_provider=provider) assert await r2.auth(provider.password) is True assert await r2.ping() is True diff --git a/tests/test_asyncio/test_cwe_404.py b/tests/test_asyncio/test_cwe_404.py index df46cabc..37c835c9 100644 --- a/tests/test_asyncio/test_cwe_404.py +++ b/tests/test_asyncio/test_cwe_404.py @@ -2,15 +2,15 @@ import contextlib import pytest -from redis.asyncio import Redis -from redis.asyncio.cluster import RedisCluster -from redis.asyncio.connection import async_timeout +from valkey.asyncio import Valkey +from valkey.asyncio.cluster import ValkeyCluster +from valkey.asyncio.connection import async_timeout class DelayProxy: - def __init__(self, addr, redis_addr, delay: float = 0.0): + def __init__(self, addr, valkey_addr, delay: float = 0.0): self.addr = addr - self.redis_addr = redis_addr + self.valkey_addr = valkey_addr self.delay = delay self.send_event = asyncio.Event() self.server = None @@ -26,10 +26,10 @@ async def __aexit__(self, *args): await self.stop() async def start(self): - # test that we can connect to redis + # test that we can connect to valkey async with async_timeout(2): - _, redis_writer = await asyncio.open_connection(*self.redis_addr) - redis_writer.close() + _, valkey_writer = await asyncio.open_connection(*self.valkey_addr) + valkey_writer.close() self.server = await asyncio.start_server( self.handle, *self.addr, reuse_address=True ) @@ -49,12 +49,12 @@ def set_delay(self, delay: float = 0.0): self.delay = old_delay async def handle(self, reader, writer): - # establish connection to redis - redis_reader, redis_writer = await asyncio.open_connection(*self.redis_addr) + # establish connection to valkey + valkey_reader, valkey_writer = await asyncio.open_connection(*self.valkey_addr) pipe1 = asyncio.create_task( - self.pipe(reader, redis_writer, "to redis:", self.send_event) + self.pipe(reader, valkey_writer, "to valkey:", self.send_event) ) - pipe2 = asyncio.create_task(self.pipe(redis_reader, writer, "from redis:")) + pipe2 = asyncio.create_task(self.pipe(valkey_reader, writer, "from valkey:")) await asyncio.gather(pipe1, pipe2) async def stop(self): @@ -106,12 +106,12 @@ async def pipe( @pytest.mark.onlynoncluster @pytest.mark.parametrize("delay", argvalues=[0.05, 0.5, 1, 2]) async def test_standalone(delay, master_host): - # create a tcp socket proxy that relays data to Redis and back, + # create a tcp socket proxy that relays data to Valkey and back, # inserting 0.1 seconds of delay - async with DelayProxy(addr=("127.0.0.1", 5380), redis_addr=master_host) as dp: + async with DelayProxy(addr=("127.0.0.1", 5380), valkey_addr=master_host) as dp: for b in [True, False]: - # note that we connect to proxy, rather than to Redis directly - async with Redis( + # note that we connect to proxy, rather than to Valkey directly + async with Valkey( host="127.0.0.1", port=5380, single_connection_client=b ) as r: await r.set("foo", "foo") @@ -143,9 +143,9 @@ async def op(r): @pytest.mark.onlynoncluster @pytest.mark.parametrize("delay", argvalues=[0.05, 0.5, 1, 2]) async def test_standalone_pipeline(delay, master_host): - async with DelayProxy(addr=("127.0.0.1", 5380), redis_addr=master_host) as dp: + async with DelayProxy(addr=("127.0.0.1", 5380), valkey_addr=master_host) as dp: for b in [True, False]: - async with Redis( + async with Valkey( host="127.0.0.1", port=5380, single_connection_client=b ) as r: await r.set("foo", "foo") @@ -208,7 +208,7 @@ def remap(address): port = cluster_port + i remapped = remap_base + i forward_addr = hostname, port - proxy = DelayProxy(addr=("127.0.0.1", remapped), redis_addr=forward_addr) + proxy = DelayProxy(addr=("127.0.0.1", remapped), valkey_addr=forward_addr) proxies.append(proxy) def all_clear(): @@ -232,8 +232,8 @@ def set_delay(delay: float): for p in proxies: await stack.enter_async_context(p) - r = RedisCluster.from_url( - f"redis://127.0.0.1:{remap_base}", address_remap=remap + r = ValkeyCluster.from_url( + f"valkey://127.0.0.1:{remap_base}", address_remap=remap ) try: await r.initialize() diff --git a/tests/test_asyncio/test_encoding.py b/tests/test_asyncio/test_encoding.py index 162ccb36..1cde34ad 100644 --- a/tests/test_asyncio/test_encoding.py +++ b/tests/test_asyncio/test_encoding.py @@ -1,47 +1,47 @@ import pytest import pytest_asyncio -import redis.asyncio as redis -from redis.exceptions import DataError +import valkey.asyncio as valkey +from valkey.exceptions import DataError @pytest.mark.onlynoncluster class TestEncoding: @pytest_asyncio.fixture() - async def r(self, create_redis): - redis = await create_redis(decode_responses=True) - yield redis - await redis.flushall() + async def r(self, create_valkey): + valkey = await create_valkey(decode_responses=True) + yield valkey + await valkey.flushall() @pytest_asyncio.fixture() - async def r_no_decode(self, create_redis): - redis = await create_redis(decode_responses=False) - yield redis - await redis.flushall() + async def r_no_decode(self, create_valkey): + valkey = await create_valkey(decode_responses=False) + yield valkey + await valkey.flushall() - async def test_simple_encoding(self, r_no_decode: redis.Redis): + async def test_simple_encoding(self, r_no_decode: valkey.Valkey): unicode_string = chr(3456) + "abcd" + chr(3421) await r_no_decode.set("unicode-string", unicode_string.encode("utf-8")) cached_val = await r_no_decode.get("unicode-string") assert isinstance(cached_val, bytes) assert unicode_string == cached_val.decode("utf-8") - async def test_simple_encoding_and_decoding(self, r: redis.Redis): + async def test_simple_encoding_and_decoding(self, r: valkey.Valkey): unicode_string = chr(3456) + "abcd" + chr(3421) await r.set("unicode-string", unicode_string) cached_val = await r.get("unicode-string") assert isinstance(cached_val, str) assert unicode_string == cached_val - async def test_memoryview_encoding(self, r_no_decode: redis.Redis): + async def test_memoryview_encoding(self, r_no_decode: valkey.Valkey): unicode_string = chr(3456) + "abcd" + chr(3421) unicode_string_view = memoryview(unicode_string.encode("utf-8")) await r_no_decode.set("unicode-string-memoryview", unicode_string_view) cached_val = await r_no_decode.get("unicode-string-memoryview") - # The cached value won't be a memoryview because it's a copy from Redis + # The cached value won't be a memoryview because it's a copy from Valkey assert isinstance(cached_val, bytes) assert unicode_string == cached_val.decode("utf-8") - async def test_memoryview_encoding_and_decoding(self, r: redis.Redis): + async def test_memoryview_encoding_and_decoding(self, r: valkey.Valkey): unicode_string = chr(3456) + "abcd" + chr(3421) unicode_string_view = memoryview(unicode_string.encode("utf-8")) await r.set("unicode-string-memoryview", unicode_string_view) @@ -49,7 +49,7 @@ async def test_memoryview_encoding_and_decoding(self, r: redis.Redis): assert isinstance(cached_val, str) assert unicode_string == cached_val - async def test_list_encoding(self, r: redis.Redis): + async def test_list_encoding(self, r: valkey.Valkey): unicode_string = chr(3456) + "abcd" + chr(3421) result = [unicode_string, unicode_string, unicode_string] await r.rpush("a", *result) @@ -58,13 +58,13 @@ async def test_list_encoding(self, r: redis.Redis): @pytest.mark.onlynoncluster class TestEncodingErrors: - async def test_ignore(self, create_redis): - r = await create_redis(decode_responses=True, encoding_errors="ignore") + async def test_ignore(self, create_valkey): + r = await create_valkey(decode_responses=True, encoding_errors="ignore") await r.set("a", b"foo\xff") assert await r.get("a") == "foo" - async def test_replace(self, create_redis): - r = await create_redis(decode_responses=True, encoding_errors="replace") + async def test_replace(self, create_valkey): + r = await create_valkey(decode_responses=True, encoding_errors="replace") await r.set("a", b"foo\xff") assert await r.get("a") == "foo\ufffd" @@ -84,26 +84,26 @@ async def test_memoryviews_are_not_packed(self, r): class TestCommandsAreNotEncoded: @pytest_asyncio.fixture() - async def r(self, create_redis): - redis = await create_redis(encoding="utf-16") - yield redis - await redis.flushall() + async def r(self, create_valkey): + valkey = await create_valkey(encoding="utf-16") + yield valkey + await valkey.flushall() @pytest.mark.xfail - async def test_basic_command(self, r: redis.Redis): + async def test_basic_command(self, r: valkey.Valkey): await r.set("hello", "world") class TestInvalidUserInput: - async def test_boolean_fails(self, r: redis.Redis): + async def test_boolean_fails(self, r: valkey.Valkey): with pytest.raises(DataError): await r.set("a", True) # type: ignore - async def test_none_fails(self, r: redis.Redis): + async def test_none_fails(self, r: valkey.Valkey): with pytest.raises(DataError): await r.set("a", None) # type: ignore - async def test_user_type_fails(self, r: redis.Redis): + async def test_user_type_fails(self, r: valkey.Valkey): class Foo: def __str__(self): return "Foo" diff --git a/tests/test_asyncio/test_graph.py b/tests/test_asyncio/test_graph.py index 4caf7947..eb019e8c 100644 --- a/tests/test_asyncio/test_graph.py +++ b/tests/test_asyncio/test_graph.py @@ -1,9 +1,11 @@ import pytest -import redis.asyncio as redis -from redis.commands.graph import Edge, Node, Path -from redis.commands.graph.execution_plan import Operation -from redis.exceptions import ResponseError -from tests.conftest import skip_if_redis_enterprise +import valkey.asyncio as valkey +from tests.conftest import skip_if_valkey_enterprise +from valkey.commands.graph import Edge, Node, Path +from valkey.commands.graph.execution_plan import Operation +from valkey.exceptions import ResponseError + +pytestmark = pytest.mark.skip async def test_bulk(decoded_r): @@ -12,7 +14,7 @@ async def test_bulk(decoded_r): await decoded_r.graph().bulk(foo="bar!") -async def test_graph_creation(decoded_r: redis.Redis): +async def test_graph_creation(decoded_r: valkey.Valkey): graph = decoded_r.graph() john = Node( @@ -56,7 +58,7 @@ async def test_graph_creation(decoded_r: redis.Redis): await graph.delete() -async def test_array_functions(decoded_r: redis.Redis): +async def test_array_functions(decoded_r: valkey.Valkey): graph = decoded_r.graph() query = """CREATE (p:person{name:'a',age:32, array:[0,1,2]})""" @@ -78,7 +80,7 @@ async def test_array_functions(decoded_r: redis.Redis): assert [a] == result.result_set[0][0] -async def test_path(decoded_r: redis.Redis): +async def test_path(decoded_r: valkey.Valkey): node0 = Node(node_id=0, label="L1") node1 = Node(node_id=1, label="L1") edge01 = Edge(node0, "R1", node1, edge_id=0, properties={"value": 1}) @@ -97,7 +99,7 @@ async def test_path(decoded_r: redis.Redis): assert expected_results == result.result_set -async def test_param(decoded_r: redis.Redis): +async def test_param(decoded_r: valkey.Valkey): params = [1, 2.3, "str", True, False, None, [0, 1, 2]] query = "RETURN $param" for param in params: @@ -106,7 +108,7 @@ async def test_param(decoded_r: redis.Redis): assert expected_results == result.result_set -async def test_map(decoded_r: redis.Redis): +async def test_map(decoded_r: valkey.Valkey): query = "RETURN {a:1, b:'str', c:NULL, d:[1,2,3], e:True, f:{x:1, y:2}}" actual = (await decoded_r.graph().query(query)).result_set[0][0] @@ -122,7 +124,7 @@ async def test_map(decoded_r: redis.Redis): assert actual == expected -async def test_point(decoded_r: redis.Redis): +async def test_point(decoded_r: valkey.Valkey): query = "RETURN point({latitude: 32.070794860, longitude: 34.820751118})" expected_lat = 32.070794860 expected_lon = 34.820751118 @@ -138,7 +140,7 @@ async def test_point(decoded_r: redis.Redis): assert abs(actual["longitude"] - expected_lon) < 0.001 -async def test_index_response(decoded_r: redis.Redis): +async def test_index_response(decoded_r: valkey.Valkey): result_set = await decoded_r.graph().query("CREATE INDEX ON :person(age)") assert 1 == result_set.indices_created @@ -152,7 +154,7 @@ async def test_index_response(decoded_r: redis.Redis): await decoded_r.graph().query("DROP INDEX ON :person(age)") -async def test_stringify_query_result(decoded_r: redis.Redis): +async def test_stringify_query_result(decoded_r: valkey.Valkey): graph = decoded_r.graph() john = Node( @@ -205,7 +207,7 @@ async def test_stringify_query_result(decoded_r: redis.Redis): await graph.delete() -async def test_optional_match(decoded_r: redis.Redis): +async def test_optional_match(decoded_r: valkey.Valkey): # Build a graph of form (a)-[R]->(b) node0 = Node(node_id=0, label="L1", properties={"value": "a"}) node1 = Node(node_id=1, label="L1", properties={"value": "b"}) @@ -229,7 +231,7 @@ async def test_optional_match(decoded_r: redis.Redis): await graph.delete() -async def test_cached_execution(decoded_r: redis.Redis): +async def test_cached_execution(decoded_r: valkey.Valkey): await decoded_r.graph().query("CREATE ()") uncached_result = await decoded_r.graph().query( @@ -248,7 +250,7 @@ async def test_cached_execution(decoded_r: redis.Redis): assert cached_result.cached_execution -async def test_slowlog(decoded_r: redis.Redis): +async def test_slowlog(decoded_r: valkey.Valkey): create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), @@ -261,7 +263,7 @@ async def test_slowlog(decoded_r: redis.Redis): @pytest.mark.xfail(strict=False) -async def test_query_timeout(decoded_r: redis.Redis): +async def test_query_timeout(decoded_r: valkey.Valkey): # Build a sample graph with 1000 nodes. await decoded_r.graph().query("UNWIND range(0,1000) as val CREATE ({v: val})") # Issue a long-running query with a 1-millisecond timeout. @@ -274,7 +276,7 @@ async def test_query_timeout(decoded_r: redis.Redis): assert False is False -async def test_read_only_query(decoded_r: redis.Redis): +async def test_read_only_query(decoded_r: valkey.Valkey): with pytest.raises(Exception): # Issue a write query, specifying read-only true, # this call should fail. @@ -282,7 +284,7 @@ async def test_read_only_query(decoded_r: redis.Redis): assert False is False -async def test_profile(decoded_r: redis.Redis): +async def test_profile(decoded_r: valkey.Valkey): q = """UNWIND range(1, 3) AS x CREATE (p:Person {v:x})""" profile = (await decoded_r.graph().profile(q)).result_set assert "Create | Records produced: 3" in profile @@ -296,8 +298,8 @@ async def test_profile(decoded_r: redis.Redis): assert "Node By Label Scan | (p:Person) | Records produced: 3" in profile -@skip_if_redis_enterprise() -async def test_config(decoded_r: redis.Redis): +@skip_if_valkey_enterprise() +async def test_config(decoded_r: valkey.Valkey): config_name = "RESULTSET_SIZE" config_value = 3 @@ -328,7 +330,7 @@ async def test_config(decoded_r: redis.Redis): @pytest.mark.onlynoncluster -async def test_list_keys(decoded_r: redis.Redis): +async def test_list_keys(decoded_r: valkey.Valkey): result = await decoded_r.graph().list_keys() assert result == [] @@ -350,15 +352,15 @@ async def test_list_keys(decoded_r: redis.Redis): assert result == [] -async def test_multi_label(decoded_r: redis.Redis): - redis_graph = decoded_r.graph("g") +async def test_multi_label(decoded_r: valkey.Valkey): + valkey_graph = decoded_r.graph("g") node = Node(label=["l", "ll"]) - redis_graph.add_node(node) - await redis_graph.commit() + valkey_graph.add_node(node) + await valkey_graph.commit() query = "MATCH (n) RETURN n" - result = await redis_graph.query(query) + result = await valkey_graph.query(query) result_node = result.result_set[0][0] assert result_node == node @@ -375,34 +377,34 @@ async def test_multi_label(decoded_r: redis.Redis): assert True -async def test_execution_plan(decoded_r: redis.Redis): - redis_graph = decoded_r.graph("execution_plan") +async def test_execution_plan(decoded_r: valkey.Valkey): + valkey_graph = decoded_r.graph("execution_plan") create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})""" - await redis_graph.query(create_query) + await valkey_graph.query(create_query) - result = await redis_graph.execution_plan( + result = await valkey_graph.execution_plan( "MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = $name RETURN r.name, t.name, $params", # noqa {"name": "Yehuda"}, ) expected = "Results\n Project\n Conditional Traverse | (t)->(r:Rider)\n Filter\n Node By Label Scan | (t:Team)" # noqa assert result == expected - await redis_graph.delete() + await valkey_graph.delete() -async def test_explain(decoded_r: redis.Redis): - redis_graph = decoded_r.graph("execution_plan") +async def test_explain(decoded_r: valkey.Valkey): + valkey_graph = decoded_r.graph("execution_plan") # graph creation / population create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})""" - await redis_graph.query(create_query) + await valkey_graph.query(create_query) - result = await redis_graph.explain( + result = await valkey_graph.explain( """MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = $name RETURN r.name, t.name @@ -454,7 +456,7 @@ async def test_explain(decoded_r: redis.Redis): assert result.structured_plan == expected - result = await redis_graph.explain( + result = await valkey_graph.explain( """MATCH (r:Rider), (t:Team) RETURN r.name, t.name""" ) @@ -478,4 +480,4 @@ async def test_explain(decoded_r: redis.Redis): assert result.structured_plan == expected - await redis_graph.delete() + await valkey_graph.delete() diff --git a/tests/test_asyncio/test_json.py b/tests/test_asyncio/test_json.py index 920ec71d..e5227aa6 100644 --- a/tests/test_asyncio/test_json.py +++ b/tests/test_asyncio/test_json.py @@ -1,18 +1,20 @@ import pytest -import redis.asyncio as redis -from redis import exceptions -from redis.commands.json.path import Path +import valkey.asyncio as valkey from tests.conftest import assert_resp_response, skip_ifmodversion_lt +from valkey import exceptions +from valkey.commands.json.path import Path +pytestmark = pytest.mark.skip -async def test_json_setbinarykey(decoded_r: redis.Redis): + +async def test_json_setbinarykey(decoded_r: valkey.Valkey): d = {"hello": "world", b"some": "value"} with pytest.raises(TypeError): decoded_r.json().set("somekey", Path.root_path(), d) assert await decoded_r.json().set("somekey", Path.root_path(), d, decode_keys=True) -async def test_json_setgetdeleteforget(decoded_r: redis.Redis): +async def test_json_setgetdeleteforget(decoded_r: valkey.Valkey): assert await decoded_r.json().set("foo", Path.root_path(), "bar") assert_resp_response(decoded_r, await decoded_r.json().get("foo"), "bar", [["bar"]]) assert await decoded_r.json().get("baz") is None @@ -21,12 +23,12 @@ async def test_json_setgetdeleteforget(decoded_r: redis.Redis): assert await decoded_r.exists("foo") == 0 -async def test_jsonget(decoded_r: redis.Redis): +async def test_jsonget(decoded_r: valkey.Valkey): await decoded_r.json().set("foo", Path.root_path(), "bar") assert_resp_response(decoded_r, await decoded_r.json().get("foo"), "bar", [["bar"]]) -async def test_json_get_jset(decoded_r: redis.Redis): +async def test_json_get_jset(decoded_r: valkey.Valkey): assert await decoded_r.json().set("foo", Path.root_path(), "bar") assert_resp_response(decoded_r, await decoded_r.json().get("foo"), "bar", [["bar"]]) assert await decoded_r.json().get("baz") is None @@ -34,7 +36,7 @@ async def test_json_get_jset(decoded_r: redis.Redis): assert await decoded_r.exists("foo") == 0 -async def test_nonascii_setgetdelete(decoded_r: redis.Redis): +async def test_nonascii_setgetdelete(decoded_r: valkey.Valkey): assert await decoded_r.json().set("notascii", Path.root_path(), "hyvää-élève") res = "hyvää-élève" assert_resp_response( @@ -45,7 +47,7 @@ async def test_nonascii_setgetdelete(decoded_r: redis.Redis): @skip_ifmodversion_lt("2.6.0", "ReJSON") -async def test_json_merge(decoded_r: redis.Redis): +async def test_json_merge(decoded_r: valkey.Valkey): # Test with root path $ assert await decoded_r.json().set( "person_data", @@ -78,7 +80,7 @@ async def test_json_merge(decoded_r: redis.Redis): } -async def test_jsonsetexistentialmodifiersshouldsucceed(decoded_r: redis.Redis): +async def test_jsonsetexistentialmodifiersshouldsucceed(decoded_r: valkey.Valkey): obj = {"foo": "bar"} assert await decoded_r.json().set("obj", Path.root_path(), obj) @@ -95,7 +97,7 @@ async def test_jsonsetexistentialmodifiersshouldsucceed(decoded_r: redis.Redis): await decoded_r.json().set("obj", Path("foo"), "baz", nx=True, xx=True) -async def test_mgetshouldsucceed(decoded_r: redis.Redis): +async def test_mgetshouldsucceed(decoded_r: valkey.Valkey): await decoded_r.json().set("1", Path.root_path(), 1) await decoded_r.json().set("2", Path.root_path(), 2) assert await decoded_r.json().mget(["1"], Path.root_path()) == [1] @@ -105,7 +107,7 @@ async def test_mgetshouldsucceed(decoded_r: redis.Redis): @pytest.mark.onlynoncluster @skip_ifmodversion_lt("2.6.0", "ReJSON") -async def test_mset(decoded_r: redis.Redis): +async def test_mset(decoded_r: valkey.Valkey): await decoded_r.json().mset( [("1", Path.root_path(), 1), ("2", Path.root_path(), 2)] ) @@ -115,13 +117,13 @@ async def test_mset(decoded_r: redis.Redis): @skip_ifmodversion_lt("99.99.99", "ReJSON") # todo: update after the release -async def test_clear(decoded_r: redis.Redis): +async def test_clear(decoded_r: valkey.Valkey): await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4]) assert 1 == await decoded_r.json().clear("arr", Path.root_path()) assert_resp_response(decoded_r, await decoded_r.json().get("arr"), [], [[[]]]) -async def test_type(decoded_r: redis.Redis): +async def test_type(decoded_r: valkey.Valkey): await decoded_r.json().set("1", Path.root_path(), 1) assert_resp_response( decoded_r, @@ -145,7 +147,7 @@ async def test_numincrby(decoded_r): assert_resp_response(decoded_r, res, 1.25, [1.25]) -async def test_nummultby(decoded_r: redis.Redis): +async def test_nummultby(decoded_r: valkey.Valkey): await decoded_r.json().set("num", Path.root_path(), 1) with pytest.deprecated_call(): @@ -158,7 +160,7 @@ async def test_nummultby(decoded_r: redis.Redis): @skip_ifmodversion_lt("99.99.99", "ReJSON") # todo: update after the release -async def test_toggle(decoded_r: redis.Redis): +async def test_toggle(decoded_r: valkey.Valkey): await decoded_r.json().set("bool", Path.root_path(), False) assert await decoded_r.json().toggle("bool", Path.root_path()) assert await decoded_r.json().toggle("bool", Path.root_path()) is False @@ -168,14 +170,14 @@ async def test_toggle(decoded_r: redis.Redis): await decoded_r.json().toggle("num", Path.root_path()) -async def test_strappend(decoded_r: redis.Redis): +async def test_strappend(decoded_r: valkey.Valkey): await decoded_r.json().set("jsonkey", Path.root_path(), "foo") assert 6 == await decoded_r.json().strappend("jsonkey", "bar") res = await decoded_r.json().get("jsonkey", Path.root_path()) assert_resp_response(decoded_r, res, "foobar", [["foobar"]]) -async def test_strlen(decoded_r: redis.Redis): +async def test_strlen(decoded_r: valkey.Valkey): await decoded_r.json().set("str", Path.root_path(), "foo") assert 3 == await decoded_r.json().strlen("str", Path.root_path()) await decoded_r.json().strappend("str", "bar", Path.root_path()) @@ -183,14 +185,14 @@ async def test_strlen(decoded_r: redis.Redis): assert 6 == await decoded_r.json().strlen("str") -async def test_arrappend(decoded_r: redis.Redis): +async def test_arrappend(decoded_r: valkey.Valkey): await decoded_r.json().set("arr", Path.root_path(), [1]) assert 2 == await decoded_r.json().arrappend("arr", Path.root_path(), 2) assert 4 == await decoded_r.json().arrappend("arr", Path.root_path(), 3, 4) assert 7 == await decoded_r.json().arrappend("arr", Path.root_path(), *[5, 6, 7]) -async def test_arrindex(decoded_r: redis.Redis): +async def test_arrindex(decoded_r: valkey.Valkey): r_path = Path.root_path() await decoded_r.json().set("arr", r_path, [0, 1, 2, 3, 4]) assert 1 == await decoded_r.json().arrindex("arr", r_path, 1) @@ -202,7 +204,7 @@ async def test_arrindex(decoded_r: redis.Redis): assert -1 == await decoded_r.json().arrindex("arr", r_path, 4, start=1, stop=3) -async def test_arrinsert(decoded_r: redis.Redis): +async def test_arrinsert(decoded_r: valkey.Valkey): await decoded_r.json().set("arr", Path.root_path(), [0, 4]) assert 5 == await decoded_r.json().arrinsert("arr", Path.root_path(), 1, *[1, 2, 3]) res = [0, 1, 2, 3, 4] @@ -215,14 +217,14 @@ async def test_arrinsert(decoded_r: redis.Redis): assert_resp_response(decoded_r, await decoded_r.json().get("val2"), res, [[res]]) -async def test_arrlen(decoded_r: redis.Redis): +async def test_arrlen(decoded_r: valkey.Valkey): await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4]) assert 5 == await decoded_r.json().arrlen("arr", Path.root_path()) assert 5 == await decoded_r.json().arrlen("arr") assert await decoded_r.json().arrlen("fakekey") is None -async def test_arrpop(decoded_r: redis.Redis): +async def test_arrpop(decoded_r: valkey.Valkey): await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4]) assert 4 == await decoded_r.json().arrpop("arr", Path.root_path(), 4) assert 3 == await decoded_r.json().arrpop("arr", Path.root_path(), -1) @@ -239,7 +241,7 @@ async def test_arrpop(decoded_r: redis.Redis): assert await decoded_r.json().arrpop("arr") is None -async def test_arrtrim(decoded_r: redis.Redis): +async def test_arrtrim(decoded_r: valkey.Valkey): await decoded_r.json().set("arr", Path.root_path(), [0, 1, 2, 3, 4]) assert 3 == await decoded_r.json().arrtrim("arr", Path.root_path(), 1, 3) res = await decoded_r.json().get("arr") @@ -262,7 +264,7 @@ async def test_arrtrim(decoded_r: redis.Redis): assert 0 == await decoded_r.json().arrtrim("arr", Path.root_path(), 9, 11) -async def test_resp(decoded_r: redis.Redis): +async def test_resp(decoded_r: valkey.Valkey): obj = {"foo": "bar", "baz": 1, "qaz": True} await decoded_r.json().set("obj", Path.root_path(), obj) assert "bar" == await decoded_r.json().resp("obj", Path("foo")) @@ -271,7 +273,7 @@ async def test_resp(decoded_r: redis.Redis): assert isinstance(await decoded_r.json().resp("obj"), list) -async def test_objkeys(decoded_r: redis.Redis): +async def test_objkeys(decoded_r: valkey.Valkey): obj = {"foo": "bar", "baz": "qaz"} await decoded_r.json().set("obj", Path.root_path(), obj) keys = await decoded_r.json().objkeys("obj", Path.root_path()) @@ -287,7 +289,7 @@ async def test_objkeys(decoded_r: redis.Redis): assert await decoded_r.json().objkeys("fakekey") is None -async def test_objlen(decoded_r: redis.Redis): +async def test_objlen(decoded_r: valkey.Valkey): obj = {"foo": "bar", "baz": "qaz"} await decoded_r.json().set("obj", Path.root_path(), obj) assert len(obj) == await decoded_r.json().objlen("obj", Path.root_path()) @@ -296,8 +298,8 @@ async def test_objlen(decoded_r: redis.Redis): assert len(obj) == await decoded_r.json().objlen("obj") -# @pytest.mark.redismod -# async def test_json_commands_in_pipeline(decoded_r: redis.Redis): +# @pytest.mark.valkeymod +# async def test_json_commands_in_pipeline(decoded_r: valkey.Valkey): # async with decoded_r.json().pipeline() as p: # p.set("foo", Path.root_path(), "bar") # p.get("foo") @@ -320,7 +322,7 @@ async def test_objlen(decoded_r: redis.Redis): # assert await decoded_r.get("foo") is None -async def test_json_delete_with_dollar(decoded_r: redis.Redis): +async def test_json_delete_with_dollar(decoded_r: valkey.Valkey): doc1 = {"a": 1, "nested": {"a": 2, "b": 3}} assert await decoded_r.json().set("doc1", "$", doc1) assert await decoded_r.json().delete("doc1", "$..a") == 2 @@ -373,7 +375,7 @@ async def test_json_delete_with_dollar(decoded_r: redis.Redis): await decoded_r.json().delete("not_a_document", "..a") -async def test_json_forget_with_dollar(decoded_r: redis.Redis): +async def test_json_forget_with_dollar(decoded_r: valkey.Valkey): doc1 = {"a": 1, "nested": {"a": 2, "b": 3}} assert await decoded_r.json().set("doc1", "$", doc1) assert await decoded_r.json().forget("doc1", "$..a") == 2 @@ -426,7 +428,7 @@ async def test_json_forget_with_dollar(decoded_r: redis.Redis): @pytest.mark.onlynoncluster -async def test_json_mget_dollar(decoded_r: redis.Redis): +async def test_json_mget_dollar(decoded_r: valkey.Valkey): # Test mget with multi paths await decoded_r.json().set( "doc1", @@ -461,7 +463,7 @@ async def test_json_mget_dollar(decoded_r: redis.Redis): assert res == [None, None] -async def test_numby_commands_dollar(decoded_r: redis.Redis): +async def test_numby_commands_dollar(decoded_r: valkey.Valkey): # Test NUMINCRBY await decoded_r.json().set( "doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]} @@ -515,7 +517,7 @@ async def test_numby_commands_dollar(decoded_r: redis.Redis): await decoded_r.json().nummultby("doc1", ".b[0].a", 3) == 6 -async def test_strappend_dollar(decoded_r: redis.Redis): +async def test_strappend_dollar(decoded_r: valkey.Valkey): await decoded_r.json().set( "doc1", "$", {"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}} ) @@ -545,7 +547,7 @@ async def test_strappend_dollar(decoded_r: redis.Redis): await decoded_r.json().strappend("doc1", "piu") -async def test_strlen_dollar(decoded_r: redis.Redis): +async def test_strlen_dollar(decoded_r: valkey.Valkey): # Test multi await decoded_r.json().set( "doc1", "$", {"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}} @@ -565,7 +567,7 @@ async def test_strlen_dollar(decoded_r: redis.Redis): await decoded_r.json().strlen("non_existing_doc", "$..a") -async def test_arrappend_dollar(decoded_r: redis.Redis): +async def test_arrappend_dollar(decoded_r: valkey.Valkey): await decoded_r.json().set( "doc1", "$", @@ -638,7 +640,7 @@ async def test_arrappend_dollar(decoded_r: redis.Redis): await decoded_r.json().arrappend("non_existing_doc", "$..a") -async def test_arrinsert_dollar(decoded_r: redis.Redis): +async def test_arrinsert_dollar(decoded_r: valkey.Valkey): await decoded_r.json().set( "doc1", "$", @@ -676,7 +678,7 @@ async def test_arrinsert_dollar(decoded_r: redis.Redis): await decoded_r.json().arrappend("non_existing_doc", "$..a") -async def test_arrlen_dollar(decoded_r: redis.Redis): +async def test_arrlen_dollar(decoded_r: valkey.Valkey): await decoded_r.json().set( "doc1", "$", @@ -721,7 +723,7 @@ async def test_arrlen_dollar(decoded_r: redis.Redis): assert await decoded_r.json().arrlen("non_existing_doc", "..a") is None -async def test_arrpop_dollar(decoded_r: redis.Redis): +async def test_arrpop_dollar(decoded_r: valkey.Valkey): await decoded_r.json().set( "doc1", "$", @@ -762,7 +764,7 @@ async def test_arrpop_dollar(decoded_r: redis.Redis): await decoded_r.json().arrpop("non_existing_doc", "..a") -async def test_arrtrim_dollar(decoded_r: redis.Redis): +async def test_arrtrim_dollar(decoded_r: valkey.Valkey): await decoded_r.json().set( "doc1", "$", @@ -813,7 +815,7 @@ async def test_arrtrim_dollar(decoded_r: redis.Redis): await decoded_r.json().arrtrim("non_existing_doc", "..a", 1, 1) -async def test_objkeys_dollar(decoded_r: redis.Redis): +async def test_objkeys_dollar(decoded_r: valkey.Valkey): await decoded_r.json().set( "doc1", "$", @@ -842,7 +844,7 @@ async def test_objkeys_dollar(decoded_r: redis.Redis): assert await decoded_r.json().objkeys("doc1", "$..nowhere") == [] -async def test_objlen_dollar(decoded_r: redis.Redis): +async def test_objlen_dollar(decoded_r: valkey.Valkey): await decoded_r.json().set( "doc1", "$", @@ -896,7 +898,7 @@ def load_types_data(nested_key_name): return jdata, types -async def test_type_dollar(decoded_r: redis.Redis): +async def test_type_dollar(decoded_r: valkey.Valkey): jdata, jtypes = load_types_data("a") await decoded_r.json().set("doc1", "$", jdata) # Test multi @@ -914,7 +916,7 @@ async def test_type_dollar(decoded_r: redis.Redis): ) -async def test_clear_dollar(decoded_r: redis.Redis): +async def test_clear_dollar(decoded_r: valkey.Valkey): await decoded_r.json().set( "doc1", "$", @@ -967,7 +969,7 @@ async def test_clear_dollar(decoded_r: redis.Redis): await decoded_r.json().clear("non_existing_doc", "$..a") -async def test_toggle_dollar(decoded_r: redis.Redis): +async def test_toggle_dollar(decoded_r: valkey.Valkey): await decoded_r.json().set( "doc1", "$", diff --git a/tests/test_asyncio/test_lock.py b/tests/test_asyncio/test_lock.py index c052eae2..b2be86f6 100644 --- a/tests/test_asyncio/test_lock.py +++ b/tests/test_asyncio/test_lock.py @@ -2,20 +2,20 @@ import pytest import pytest_asyncio -from redis.asyncio.lock import Lock -from redis.exceptions import LockError, LockNotOwnedError +from valkey.asyncio.lock import Lock +from valkey.exceptions import LockError, LockNotOwnedError class TestLock: @pytest_asyncio.fixture() - async def r_decoded(self, create_redis): - redis = await create_redis(decode_responses=True) - yield redis - await redis.flushall() + async def r_decoded(self, create_valkey): + valkey = await create_valkey(decode_responses=True) + yield valkey + await valkey.flushall() - def get_lock(self, redis, *args, **kwargs): + def get_lock(self, valkey, *args, **kwargs): kwargs["lock_class"] = Lock - return redis.lock(*args, **kwargs) + return valkey.lock(*args, **kwargs) async def test_lock(self, r): lock = self.get_lock(r, "foo") diff --git a/tests/test_asyncio/test_monitor.py b/tests/test_asyncio/test_monitor.py index 73ee3cf8..30835192 100644 --- a/tests/test_asyncio/test_monitor.py +++ b/tests/test_asyncio/test_monitor.py @@ -1,5 +1,5 @@ import pytest -from tests.conftest import skip_if_redis_enterprise, skip_ifnot_redis_enterprise +from tests.conftest import skip_if_valkey_enterprise, skip_ifnot_valkey_enterprise from .conftest import wait_for_command @@ -44,7 +44,7 @@ async def test_command_with_escaped_data(self, r): response = await wait_for_command(r, m, "GET foo\\\\x92") assert response["command"] == "GET foo\\\\x92" - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() async def test_lua_script(self, r): async with r.monitor() as m: script = 'return redis.call("GET", "foo")' @@ -55,7 +55,7 @@ async def test_lua_script(self, r): assert response["client_address"] == "lua" assert response["client_port"] == "" - @skip_ifnot_redis_enterprise() + @skip_ifnot_valkey_enterprise() async def test_lua_script_in_enterprise(self, r): async with r.monitor() as m: script = 'return redis.call("GET", "foo")' diff --git a/tests/test_asyncio/test_pipeline.py b/tests/test_asyncio/test_pipeline.py index 4b29360d..5021f91c 100644 --- a/tests/test_asyncio/test_pipeline.py +++ b/tests/test_asyncio/test_pipeline.py @@ -1,5 +1,5 @@ import pytest -import redis +import valkey from tests.conftest import skip_if_server_version_lt from .compat import aclosing, mock @@ -80,7 +80,7 @@ async def test_pipeline_no_transaction_watch_failure(self, r): pipe.multi() pipe.set("a", int(a) + 1) - with pytest.raises(redis.WatchError): + with pytest.raises(valkey.WatchError): await pipe.execute() assert await r.get("a") == b"bad" @@ -102,7 +102,7 @@ async def test_exec_error_in_response(self, r): # we can't lpush to a key that's a string value, so this should # be a ResponseError exception - assert isinstance(result[2], redis.ResponseError) + assert isinstance(result[2], valkey.ResponseError) assert await r.get("c") == b"a" # since this isn't a transaction, the other commands after the @@ -118,7 +118,7 @@ async def test_exec_error_raised(self, r): await r.set("c", "a") async with r.pipeline() as pipe: pipe.set("a", 1).set("b", 2).lpush("c", 3).set("d", 4) - with pytest.raises(redis.ResponseError) as ex: + with pytest.raises(valkey.ResponseError) as ex: await pipe.execute() assert str(ex.value).startswith( "Command # 3 (LPUSH c 3) of pipeline caused error: " @@ -162,7 +162,7 @@ async def test_parse_error_raised(self, r): async with r.pipeline() as pipe: # the zrem is invalid because we don't pass any keys to it pipe.set("a", 1).zrem("b").set("b", 2) - with pytest.raises(redis.ResponseError) as ex: + with pytest.raises(valkey.ResponseError) as ex: await pipe.execute() assert str(ex.value).startswith( @@ -179,7 +179,7 @@ async def test_parse_error_raised_transaction(self, r): pipe.multi() # the zrem is invalid because we don't pass any keys to it pipe.set("a", 1).zrem("b").set("b", 2) - with pytest.raises(redis.ResponseError) as ex: + with pytest.raises(valkey.ResponseError) as ex: await pipe.execute() assert str(ex.value).startswith( @@ -218,7 +218,7 @@ async def test_watch_failure(self, r): await r.set("b", 3) pipe.multi() pipe.get("a") - with pytest.raises(redis.WatchError): + with pytest.raises(valkey.WatchError): await pipe.execute() assert not pipe.watching @@ -232,7 +232,7 @@ async def test_watch_failure_in_empty_transaction(self, r): await pipe.watch("a", "b") await r.set("b", 3) pipe.multi() - with pytest.raises(redis.WatchError): + with pytest.raises(valkey.WatchError): await pipe.execute() assert not pipe.watching @@ -344,7 +344,7 @@ async def test_exec_error_in_no_transaction_pipeline(self, r): pipe.llen("a") pipe.expire("a", 100) - with pytest.raises(redis.ResponseError) as ex: + with pytest.raises(valkey.ResponseError) as ex: await pipe.execute() assert str(ex.value).startswith( @@ -360,7 +360,7 @@ async def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): pipe.llen(key) pipe.expire(key, 100) - with pytest.raises(redis.ResponseError) as ex: + with pytest.raises(valkey.ResponseError) as ex: await pipe.execute() expected = f"Command # 1 (LLEN {key}) of pipeline caused error: " @@ -399,7 +399,7 @@ async def test_pipeline_discard(self, r): async with r.pipeline() as pipe: pipe.set("key", "someval") await pipe.discard() - with pytest.raises(redis.exceptions.ResponseError): + with pytest.raises(valkey.exceptions.ResponseError): await pipe.execute() # setting a pipeline and discarding should do the same @@ -410,7 +410,7 @@ async def test_pipeline_discard(self, r): pipe.set("key", "another value!") await pipe.discard() pipe.set("key", "another vae!") - with pytest.raises(redis.exceptions.ResponseError): + with pytest.raises(valkey.exceptions.ResponseError): await pipe.execute() pipe.set("foo", "bar") diff --git a/tests/test_asyncio/test_pubsub.py b/tests/test_asyncio/test_pubsub.py index 19d4b1c6..9b839de7 100644 --- a/tests/test_asyncio/test_pubsub.py +++ b/tests/test_asyncio/test_pubsub.py @@ -6,7 +6,7 @@ from unittest.mock import patch # the functionality is available in 3.11.x but has a major issue before -# 3.11.3. See https://github.com/redis/redis-py/issues/2633 +# 3.11.3. See https://github.com/valkey/valkey-py/issues/2633 if sys.version_info >= (3, 11, 3): from asyncio import timeout as async_timeout else: @@ -14,11 +14,11 @@ import pytest import pytest_asyncio -import redis.asyncio as redis -from redis.exceptions import ConnectionError -from redis.typing import EncodableT -from redis.utils import HIREDIS_AVAILABLE +import valkey.asyncio as valkey from tests.conftest import get_protocol_version, skip_if_server_version_lt +from valkey.exceptions import ConnectionError +from valkey.typing import EncodableT +from valkey.utils import HIREDIS_AVAILABLE from .compat import aclosing, create_task, mock @@ -83,7 +83,7 @@ def make_subscribe_test_data(pubsub, type): @pytest_asyncio.fixture() -async def pubsub(r: redis.Redis): +async def pubsub(r: valkey.Valkey): async with r.pubsub() as p: yield p @@ -214,7 +214,7 @@ async def test_subscribe_property_with_patterns(self, pubsub): kwargs = make_subscribe_test_data(pubsub, "pattern") await self._test_subscribed_property(**kwargs) - async def test_aclosing(self, r: redis.Redis): + async def test_aclosing(self, r: valkey.Valkey): p = r.pubsub() async with aclosing(p): assert p.subscribed is False @@ -222,7 +222,7 @@ async def test_aclosing(self, r: redis.Redis): assert p.subscribed is True assert p.subscribed is False - async def test_context_manager(self, r: redis.Redis): + async def test_context_manager(self, r: valkey.Valkey): p = r.pubsub() async with p: assert p.subscribed is False @@ -230,7 +230,7 @@ async def test_context_manager(self, r: redis.Redis): assert p.subscribed is True assert p.subscribed is False - async def test_close_is_aclose(self, r: redis.Redis): + async def test_close_is_aclose(self, r: valkey.Valkey): """ Test backwards compatible close method """ @@ -242,7 +242,7 @@ async def test_close_is_aclose(self, r: redis.Redis): await p.close() assert p.subscribed is False - async def test_reset_is_aclose(self, r: redis.Redis): + async def test_reset_is_aclose(self, r: valkey.Valkey): """ Test backwards compatible reset method """ @@ -254,7 +254,7 @@ async def test_reset_is_aclose(self, r: redis.Redis): await p.reset() assert p.subscribed is False - async def test_ignore_all_subscribe_messages(self, r: redis.Redis): + async def test_ignore_all_subscribe_messages(self, r: valkey.Valkey): p = r.pubsub(ignore_subscribe_messages=True) checks = ( @@ -302,7 +302,7 @@ async def test_sub_unsub_resub_patterns(self, pubsub): async def _test_sub_unsub_resub( self, p, sub_type, unsub_type, sub_func, unsub_func, keys ): - # https://github.com/andymccurdy/redis-py/issues/764 + # https://github.com/andymccurdy/valkey-py/issues/764 key = keys[0] await sub_func(key) await unsub_func(key) @@ -324,7 +324,7 @@ async def test_sub_unsub_all_resub_patterns(self, pubsub): async def _test_sub_unsub_all_resub( self, p, sub_type, unsub_type, sub_func, unsub_func, keys ): - # https://github.com/andymccurdy/redis-py/issues/764 + # https://github.com/andymccurdy/valkey-py/issues/764 key = keys[0] await sub_func(key) await unsub_func() @@ -347,7 +347,7 @@ def message_handler(self, message): async def async_message_handler(self, message): self.async_message = message - async def test_published_message_to_channel(self, r: redis.Redis, pubsub): + async def test_published_message_to_channel(self, r: valkey.Valkey, pubsub): p = pubsub await p.subscribe("foo") assert await wait_for_message(p) == make_message("subscribe", "foo", 1) @@ -357,7 +357,7 @@ async def test_published_message_to_channel(self, r: redis.Redis, pubsub): assert isinstance(message, dict) assert message == make_message("message", "foo", "test message") - async def test_published_message_to_pattern(self, r: redis.Redis, pubsub): + async def test_published_message_to_pattern(self, r: valkey.Valkey, pubsub): p = pubsub await p.subscribe("foo") await p.psubscribe("f*") @@ -380,7 +380,7 @@ async def test_published_message_to_pattern(self, r: redis.Redis, pubsub): assert message2 in expected assert message1 != message2 - async def test_channel_message_handler(self, r: redis.Redis): + async def test_channel_message_handler(self, r: valkey.Valkey): p = r.pubsub(ignore_subscribe_messages=True) await p.subscribe(foo=self.message_handler) assert await wait_for_message(p) is None @@ -411,7 +411,7 @@ async def test_channel_sync_async_message_handler(self, r): await p.aclose() @pytest.mark.onlynoncluster - async def test_pattern_message_handler(self, r: redis.Redis): + async def test_pattern_message_handler(self, r: valkey.Valkey): p = r.pubsub(ignore_subscribe_messages=True) await p.psubscribe(**{"f*": self.message_handler}) assert await wait_for_message(p) is None @@ -422,7 +422,7 @@ async def test_pattern_message_handler(self, r: redis.Redis): ) await p.aclose() - async def test_unicode_channel_message_handler(self, r: redis.Redis): + async def test_unicode_channel_message_handler(self, r: valkey.Valkey): p = r.pubsub(ignore_subscribe_messages=True) channel = "uni" + chr(4456) + "code" channels = {channel: self.message_handler} @@ -434,9 +434,9 @@ async def test_unicode_channel_message_handler(self, r: redis.Redis): await p.aclose() @pytest.mark.onlynoncluster - # see: https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html + # see: https://valkey-py-cluster.readthedocs.io/en/stable/pubsub.html # #known-limitations-with-pubsub - async def test_unicode_pattern_message_handler(self, r: redis.Redis): + async def test_unicode_pattern_message_handler(self, r: valkey.Valkey): p = r.pubsub(ignore_subscribe_messages=True) pattern = "uni" + chr(4456) + "*" channel = "uni" + chr(4456) + "code" @@ -449,7 +449,7 @@ async def test_unicode_pattern_message_handler(self, r: redis.Redis): ) await p.aclose() - async def test_get_message_without_subscribe(self, r: redis.Redis, pubsub): + async def test_get_message_without_subscribe(self, r: valkey.Valkey, pubsub): p = pubsub with pytest.raises(RuntimeError) as info: await p.get_message() @@ -495,8 +495,8 @@ def message_handler(self, message): self.message = message @pytest_asyncio.fixture() - async def r(self, create_redis): - return await create_redis(decode_responses=True) + async def r(self, create_valkey): + return await create_valkey(decode_responses=True) async def test_channel_subscribe_unsubscribe(self, pubsub): p = pubsub @@ -522,7 +522,7 @@ async def test_pattern_subscribe_unsubscribe(self, pubsub): "punsubscribe", self.pattern, 0 ) - async def test_channel_publish(self, r: redis.Redis, pubsub): + async def test_channel_publish(self, r: valkey.Valkey, pubsub): p = pubsub await p.subscribe(self.channel) assert await wait_for_message(p) == self.make_message( @@ -534,7 +534,7 @@ async def test_channel_publish(self, r: redis.Redis, pubsub): ) @pytest.mark.onlynoncluster - async def test_pattern_publish(self, r: redis.Redis, pubsub): + async def test_pattern_publish(self, r: valkey.Valkey, pubsub): p = pubsub await p.psubscribe(self.pattern) assert await wait_for_message(p) == self.make_message( @@ -545,7 +545,7 @@ async def test_pattern_publish(self, r: redis.Redis, pubsub): "pmessage", self.channel, self.data, pattern=self.pattern ) - async def test_channel_message_handler(self, r: redis.Redis): + async def test_channel_message_handler(self, r: valkey.Valkey): p = r.pubsub(ignore_subscribe_messages=True) await p.subscribe(**{self.channel: self.message_handler}) assert await wait_for_message(p) is None @@ -563,7 +563,7 @@ async def test_channel_message_handler(self, r: redis.Redis): assert self.message == self.make_message("message", self.channel, new_data) await p.aclose() - async def test_pattern_message_handler(self, r: redis.Redis): + async def test_pattern_message_handler(self, r: valkey.Valkey): p = r.pubsub(ignore_subscribe_messages=True) await p.psubscribe(**{self.pattern: self.message_handler}) assert await wait_for_message(p) is None @@ -585,7 +585,7 @@ async def test_pattern_message_handler(self, r: redis.Redis): ) await p.aclose() - async def test_context_manager(self, r: redis.Redis): + async def test_context_manager(self, r: valkey.Valkey): async with r.pubsub() as pubsub: await pubsub.subscribe("foo") assert pubsub.connection is not None @@ -597,9 +597,9 @@ async def test_context_manager(self, r: redis.Redis): @pytest.mark.onlynoncluster -class TestPubSubRedisDown: - async def test_channel_subscribe(self, r: redis.Redis): - r = redis.Redis(host="localhost", port=6390) +class TestPubSubValkeyDown: + async def test_channel_subscribe(self, r: valkey.Valkey): + r = valkey.Valkey(host="localhost", port=6390) p = r.pubsub() with pytest.raises(ConnectionError): await p.subscribe("foo") @@ -609,7 +609,7 @@ async def test_channel_subscribe(self, r: redis.Redis): class TestPubSubSubcommands: @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.8.0") - async def test_pubsub_channels(self, r: redis.Redis, pubsub): + async def test_pubsub_channels(self, r: valkey.Valkey, pubsub): p = pubsub await p.subscribe("foo", "bar", "baz", "quux") for i in range(4): @@ -619,7 +619,7 @@ async def test_pubsub_channels(self, r: redis.Redis, pubsub): @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.8.0") - async def test_pubsub_numsub(self, r: redis.Redis): + async def test_pubsub_numsub(self, r: valkey.Valkey): p1 = r.pubsub() await p1.subscribe("foo", "bar", "baz") for i in range(3): @@ -639,7 +639,7 @@ async def test_pubsub_numsub(self, r: redis.Redis): await p3.aclose() @skip_if_server_version_lt("2.8.0") - async def test_pubsub_numpat(self, r: redis.Redis): + async def test_pubsub_numpat(self, r: valkey.Valkey): p = r.pubsub() await p.psubscribe("*oo", "*ar", "b*z") for i in range(3): @@ -651,7 +651,7 @@ async def test_pubsub_numpat(self, r: redis.Redis): @pytest.mark.onlynoncluster class TestPubSubPings: @skip_if_server_version_lt("3.0.0") - async def test_send_pubsub_ping(self, r: redis.Redis): + async def test_send_pubsub_ping(self, r: valkey.Valkey): p = r.pubsub(ignore_subscribe_messages=True) await p.subscribe("foo") await p.ping() @@ -661,7 +661,7 @@ async def test_send_pubsub_ping(self, r: redis.Redis): await p.aclose() @skip_if_server_version_lt("3.0.0") - async def test_send_pubsub_ping_message(self, r: redis.Redis): + async def test_send_pubsub_ping_message(self, r: valkey.Valkey): p = r.pubsub(ignore_subscribe_messages=True) await p.subscribe("foo") await p.ping(message="hello world") @@ -675,7 +675,7 @@ async def test_send_pubsub_ping_message(self, r: redis.Redis): class TestPubSubConnectionKilled: @skip_if_server_version_lt("3.0.0") async def test_connection_error_raised_when_connection_dies( - self, r: redis.Redis, pubsub + self, r: valkey.Valkey, pubsub ): p = pubsub await p.subscribe("foo") @@ -699,7 +699,7 @@ async def test_get_message_with_timeout_returns_none(self, pubsub): @pytest.mark.onlynoncluster class TestPubSubReconnect: @with_timeout(2) - async def test_reconnect_listen(self, r: redis.Redis, pubsub): + async def test_reconnect_listen(self, r: valkey.Valkey, pubsub): """ Test that a loop processing PubSub messages can survive a disconnect, by issuing a connect() call. @@ -717,7 +717,7 @@ async def loop(): try: await pubsub.connect() await loop_step() - except redis.ConnectionError: + except valkey.ConnectionError: await asyncio.sleep(0.1) except asyncio.CancelledError: # we use a cancel to interrupt the "listen" @@ -775,7 +775,7 @@ async def _subscribe(self, p, *args, **kwargs): ): return - async def test_callbacks(self, r: redis.Redis, pubsub): + async def test_callbacks(self, r: valkey.Valkey, pubsub): def callback(message): messages.put_nowait(message) @@ -797,7 +797,7 @@ def callback(message): "type": "message", } - async def test_exception_handler(self, r: redis.Redis, pubsub): + async def test_exception_handler(self, r: valkey.Valkey, pubsub): def exception_handler_callback(e, pubsub) -> None: assert pubsub == p exceptions.put_nowait(e) @@ -817,7 +817,7 @@ def exception_handler_callback(e, pubsub) -> None: pass assert str(e) == "error" - async def test_late_subscribe(self, r: redis.Redis, pubsub): + async def test_late_subscribe(self, r: valkey.Valkey, pubsub): def callback(message): messages.put_nowait(message) @@ -892,7 +892,7 @@ async def mykill(self): self.state = 4 # quit await self.task - async def test_reconnect_socket_error(self, r: redis.Redis, method): + async def test_reconnect_socket_error(self, r: valkey.Valkey, method): """ Test that a socket error will cause reconnect """ @@ -921,7 +921,7 @@ async def test_reconnect_socket_error(self, r: redis.Redis, method): finally: await self.mykill() - async def test_reconnect_disconnect(self, r: redis.Redis, method): + async def test_reconnect_disconnect(self, r: valkey.Valkey, method): """ Test that a manual disconnect() will cause reconnect """ @@ -958,7 +958,7 @@ async def loop(self): assert got_msg if self.state in (1, 2): self.state = 3 # successful reconnect - except redis.ConnectionError: + except valkey.ConnectionError: assert self.state in (1, 2) self.state = 2 # signal that we noticed the disconnect finally: @@ -992,7 +992,7 @@ class TestBaseException: @pytest.mark.skipif( sys.version_info < (3, 8), reason="requires python 3.8 or higher" ) - async def test_outer_timeout(self, r: redis.Redis): + async def test_outer_timeout(self, r: valkey.Valkey): """ Using asyncio_timeout manually outside the inner method timeouts works. This works on Python versions 3.8 and greater, at which time asyncio. @@ -1026,7 +1026,7 @@ async def get_msg_or_timeout(timeout=0.1): @pytest.mark.skipif( sys.version_info < (3, 8), reason="requires python 3.8 or higher" ) - async def test_base_exception(self, r: redis.Redis): + async def test_base_exception(self, r: valkey.Valkey): """ Manually trigger a BaseException inside the parser's .read_response method and verify that it isn't caught @@ -1050,9 +1050,9 @@ async def get_msg(): assert msg is not None # timeout waiting for another message which never arrives assert pubsub.connection.is_connected - with patch("redis._parsers._AsyncRESP2Parser.read_response") as mock1, patch( - "redis._parsers._AsyncHiredisParser.read_response" - ) as mock2, patch("redis._parsers._AsyncRESP3Parser.read_response") as mock3: + with patch("valkey._parsers._AsyncRESP2Parser.read_response") as mock1, patch( + "valkey._parsers._AsyncHiredisParser.read_response" + ) as mock2, patch("valkey._parsers._AsyncRESP3Parser.read_response") as mock3: mock1.side_effect = BaseException("boom") mock2.side_effect = BaseException("boom") mock3.side_effect = BaseException("boom") diff --git a/tests/test_asyncio/test_retry.py b/tests/test_asyncio/test_retry.py index 8bc71c14..dde62e1d 100644 --- a/tests/test_asyncio/test_retry.py +++ b/tests/test_asyncio/test_retry.py @@ -1,9 +1,9 @@ import pytest -from redis.asyncio import Redis -from redis.asyncio.connection import Connection, UnixDomainSocketConnection -from redis.asyncio.retry import Retry -from redis.backoff import AbstractBackoff, ExponentialBackoff, NoBackoff -from redis.exceptions import ConnectionError, TimeoutError +from valkey.asyncio import Valkey +from valkey.asyncio.connection import Connection, UnixDomainSocketConnection +from valkey.asyncio.retry import Retry +from valkey.backoff import AbstractBackoff, ExponentialBackoff, NoBackoff +from valkey.exceptions import ConnectionError, TimeoutError class BackoffMock(AbstractBackoff): @@ -116,13 +116,13 @@ async def test_infinite_retry(self): assert self.actual_failures == 5 -class TestRedisClientRetry: - "Test the Redis client behavior with retries" +class TestValkeyClientRetry: + "Test the Valkey client behavior with retries" async def test_get_set_retry_object(self, request): retry = Retry(NoBackoff(), 2) - url = request.config.getoption("--redis-url") - r = await Redis.from_url(url, retry_on_timeout=True, retry=retry) + url = request.config.getoption("--valkey-url") + r = await Valkey.from_url(url, retry_on_timeout=True, retry=retry) assert r.get_retry()._retries == retry._retries assert isinstance(r.get_retry()._backoff, NoBackoff) new_retry_policy = Retry(ExponentialBackoff(), 3) diff --git a/tests/test_asyncio/test_scripting.py b/tests/test_asyncio/test_scripting.py index 8375ecd7..42269ad0 100644 --- a/tests/test_asyncio/test_scripting.py +++ b/tests/test_asyncio/test_scripting.py @@ -1,7 +1,7 @@ import pytest import pytest_asyncio -from redis import exceptions from tests.conftest import skip_if_server_version_lt +from valkey import exceptions multiply_script = """ local value = redis.call('GET', KEYS[1]) @@ -23,10 +23,10 @@ @pytest.mark.onlynoncluster class TestScripting: @pytest_asyncio.fixture - async def r(self, create_redis): - redis = await create_redis() - yield redis - await redis.script_flush() + async def r(self, create_valkey): + valkey = await create_valkey() + yield valkey + await valkey.script_flush() @pytest.mark.asyncio(forbid_global_loop=True) async def test_eval(self, r): @@ -66,7 +66,7 @@ async def test_evalsha(self, r): async def test_evalsha_script_not_loaded(self, r): await r.set("a", 2) sha = await r.script_load(multiply_script) - # remove the script from Redis's cache + # remove the script from Valkey's cache await r.script_flush() with pytest.raises(exceptions.NoScriptError): await r.evalsha(sha, 1, "a", 3) @@ -92,7 +92,7 @@ async def test_script_object(self, r): assert await multiply(keys=["a"], args=[3]) == 6 # At this point, the script should be loaded assert await r.script_exists(multiply.sha) == [True] - # Test that the precalculated sha matches the one from redis + # Test that the precalculated sha matches the one from valkey assert multiply.sha == precalculated_sha # Test first evalsha block assert await multiply(keys=["a"], args=[3]) == 6 @@ -115,7 +115,7 @@ async def test_script_object_in_pipeline(self, r): # The precalculated sha should have been the correct one assert multiply.sha == precalculated_sha - # purge the script from redis's cache and re-run the pipeline + # purge the script from valkey's cache and re-run the pipeline # the multiply script should be reloaded by pipe.execute() await r.script_flush() pipe = r.pipeline() diff --git a/tests/test_asyncio/test_search.py b/tests/test_asyncio/test_search.py index 1f1931e2..d0e5ebb1 100644 --- a/tests/test_asyncio/test_search.py +++ b/tests/test_asyncio/test_search.py @@ -5,22 +5,24 @@ from io import TextIOWrapper import pytest -import redis.asyncio as redis -import redis.commands.search -import redis.commands.search.aggregation as aggregations -import redis.commands.search.reducers as reducers -from redis.commands.search import AsyncSearch -from redis.commands.search.field import GeoField, NumericField, TagField, TextField -from redis.commands.search.indexDefinition import IndexDefinition -from redis.commands.search.query import GeoFilter, NumericFilter, Query -from redis.commands.search.result import Result -from redis.commands.search.suggestion import Suggestion +import valkey.asyncio as valkey +import valkey.commands.search +import valkey.commands.search.aggregation as aggregations +import valkey.commands.search.reducers as reducers from tests.conftest import ( assert_resp_response, is_resp2_connection, - skip_if_redis_enterprise, + skip_if_valkey_enterprise, skip_ifmodversion_lt, ) +from valkey.commands.search import AsyncSearch +from valkey.commands.search.field import GeoField, NumericField, TagField, TextField +from valkey.commands.search.indexDefinition import IndexDefinition +from valkey.commands.search.query import GeoFilter, NumericFilter, Query +from valkey.commands.search.result import Result +from valkey.commands.search.suggestion import Suggestion + +pytestmark = pytest.mark.skip WILL_PLAY_TEXT = os.path.abspath( os.path.join(os.path.dirname(__file__), "testdata", "will_play_text.csv.bz2") @@ -54,7 +56,7 @@ async def waitForIndex(env, idx, timeout=None): break -def getClient(decoded_r: redis.Redis): +def getClient(decoded_r: valkey.Valkey): """ Gets a client client attached to an index name which is ready to be created @@ -68,7 +70,7 @@ async def createIndex(decoded_r, num_docs=100, definition=None): (TextField("play", weight=5.0), TextField("txt"), NumericField("chapter")), definition=definition, ) - except redis.ResponseError: + except valkey.ResponseError: await decoded_r.dropindex(delete_documents=True) return createIndex(decoded_r, num_docs=num_docs, definition=definition) @@ -96,8 +98,8 @@ async def createIndex(decoded_r, num_docs=100, definition=None): await indexer.commit() -@pytest.mark.redismod -async def test_client(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_client(decoded_r: valkey.Valkey): num_docs = 500 await createIndex(decoded_r.ft(), num_docs=num_docs) await waitForIndex(decoded_r, "idx") @@ -324,9 +326,9 @@ async def test_client(decoded_r: redis.Redis): await decoded_r.ft().delete_document("doc-5ghs2") -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster -async def test_scores(decoded_r: redis.Redis): +async def test_scores(decoded_r: valkey.Valkey): await decoded_r.ft().create_index((TextField("txt"),)) await decoded_r.hset("doc1", mapping={"txt": "foo baz"}) @@ -346,8 +348,8 @@ async def test_scores(decoded_r: redis.Redis): assert "doc1" == res["results"][1]["id"] -@pytest.mark.redismod -async def test_stopwords(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_stopwords(decoded_r: valkey.Valkey): stopwords = ["foo", "bar", "baz"] await decoded_r.ft().create_index((TextField("txt"),), stopwords=stopwords) await decoded_r.hset("doc1", mapping={"txt": "foo bar"}) @@ -365,8 +367,8 @@ async def test_stopwords(decoded_r: redis.Redis): assert 1 == res2["total_results"] -@pytest.mark.redismod -async def test_filters(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_filters(decoded_r: valkey.Valkey): await decoded_r.ft().create_index( (TextField("txt"), NumericField("num"), GeoField("loc")) ) @@ -423,8 +425,8 @@ async def test_filters(decoded_r: redis.Redis): assert ["doc1", "doc2"] == res -@pytest.mark.redismod -async def test_sort_by(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_sort_by(decoded_r: valkey.Valkey): await decoded_r.ft().create_index( (TextField("txt"), NumericField("num", sortable=True)) ) @@ -457,9 +459,9 @@ async def test_sort_by(decoded_r: redis.Redis): assert "doc3" == res2["results"][0]["id"] -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.0.0", "search") -async def test_drop_index(decoded_r: redis.Redis): +async def test_drop_index(decoded_r: valkey.Valkey): """ Ensure the index gets dropped by data remains by default """ @@ -476,8 +478,8 @@ async def test_drop_index(decoded_r: redis.Redis): assert i == keep_docs[1] -@pytest.mark.redismod -async def test_example(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_example(decoded_r: valkey.Valkey): # Creating the index definition and schema await decoded_r.ft().create_index( (TextField("title", weight=5.0), TextField("body")) @@ -488,7 +490,7 @@ async def test_example(decoded_r: redis.Redis): "doc1", mapping={ "title": "RediSearch", - "body": "Redisearch impements a search engine on top of redis", + "body": "RediSearch impements a search engine on top of valkey", }, ) @@ -499,8 +501,8 @@ async def test_example(decoded_r: redis.Redis): assert res is not None -@pytest.mark.redismod -async def test_auto_complete(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_auto_complete(decoded_r: valkey.Valkey): n = 0 with open(TITLES_CSV) as f: cr = csv.reader(f) @@ -550,8 +552,8 @@ async def test_auto_complete(decoded_r: redis.Redis): assert sug.payload.startswith("pl") -@pytest.mark.redismod -async def test_no_index(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_no_index(decoded_r: valkey.Valkey): await decoded_r.ft().create_index( ( TextField("field"), @@ -628,8 +630,8 @@ async def test_no_index(decoded_r: redis.Redis): TagField("name", no_index=True, sortable=False) -@pytest.mark.redismod -async def test_explain(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_explain(decoded_r: valkey.Valkey): await decoded_r.ft().create_index( (TextField("f1"), TextField("f2"), TextField("f3")) ) @@ -637,14 +639,14 @@ async def test_explain(decoded_r: redis.Redis): assert res -@pytest.mark.redismod -async def test_explaincli(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_explaincli(decoded_r: valkey.Valkey): with pytest.raises(NotImplementedError): await decoded_r.ft().explain_cli("foo") -@pytest.mark.redismod -async def test_summarize(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_summarize(decoded_r: valkey.Valkey): await createIndex(decoded_r.ft()) await waitForIndex(decoded_r, "idx") @@ -686,9 +688,9 @@ async def test_summarize(decoded_r: redis.Redis): ) -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.0.0", "search") -async def test_alias(decoded_r: redis.Redis): +async def test_alias(decoded_r: valkey.Valkey): index1 = getClient(decoded_r) index2 = getClient(decoded_r) @@ -749,9 +751,9 @@ async def test_alias(decoded_r: redis.Redis): (await alias_client2.search("*")).docs[0] -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.xfail(strict=False) -async def test_alias_basic(decoded_r: redis.Redis): +async def test_alias_basic(decoded_r: valkey.Valkey): # Creating a client with one index client = getClient(decoded_r) await client.flushdb() @@ -802,8 +804,8 @@ async def test_alias_basic(decoded_r: redis.Redis): _ = (await alias_client2.search("*")).docs[0] -@pytest.mark.redismod -async def test_tags(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_tags(decoded_r: valkey.Valkey): await decoded_r.ft().create_index((TextField("txt"), TagField("tags"))) tags = "foo,foo bar,hello;world" tags2 = "soba,ramen" @@ -851,8 +853,8 @@ async def test_tags(decoded_r: redis.Redis): assert set(tags.split(",") + tags2.split(",")) == q2 -@pytest.mark.redismod -async def test_textfield_sortable_nostem(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_textfield_sortable_nostem(decoded_r: valkey.Valkey): # Creating the index definition with sortable and no_stem await decoded_r.ft().create_index((TextField("txt", sortable=True, no_stem=True),)) @@ -866,8 +868,8 @@ async def test_textfield_sortable_nostem(decoded_r: redis.Redis): assert "NOSTEM" in response["attributes"][0]["flags"] -@pytest.mark.redismod -async def test_alter_schema_add(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_alter_schema_add(decoded_r: valkey.Valkey): # Creating the index definition and schema await decoded_r.ft().create_index(TextField("title")) @@ -890,8 +892,8 @@ async def test_alter_schema_add(decoded_r: redis.Redis): assert 1 == res["total_results"] -@pytest.mark.redismod -async def test_spell_check(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_spell_check(decoded_r: valkey.Valkey): await decoded_r.ft().create_index((TextField("f1"), TextField("f2"))) await decoded_r.hset( @@ -959,8 +961,8 @@ async def test_spell_check(decoded_r: redis.Redis): assert res == {"results": {}} -@pytest.mark.redismod -async def test_dict_operations(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_dict_operations(decoded_r: valkey.Valkey): await decoded_r.ft().create_index((TextField("f1"), TextField("f2"))) # Add three items res = await decoded_r.ft().dict_add("custom_dict", "item1", "item2", "item3") @@ -978,8 +980,8 @@ async def test_dict_operations(decoded_r: redis.Redis): await decoded_r.ft().dict_del("custom_dict", *res) -@pytest.mark.redismod -async def test_phonetic_matcher(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_phonetic_matcher(decoded_r: valkey.Valkey): await decoded_r.ft().create_index((TextField("name"),)) await decoded_r.hset("doc1", mapping={"name": "Jon"}) await decoded_r.hset("doc2", mapping={"name": "John"}) @@ -1010,9 +1012,9 @@ async def test_phonetic_matcher(decoded_r: redis.Redis): ) -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster -async def test_scorer(decoded_r: redis.Redis): +async def test_scorer(decoded_r: valkey.Valkey): await decoded_r.ft().create_index((TextField("description"),)) await decoded_r.hset( @@ -1070,8 +1072,8 @@ async def test_scorer(decoded_r: redis.Redis): assert 0.0 == res["results"][0]["score"] -@pytest.mark.redismod -async def test_get(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_get(decoded_r: valkey.Valkey): await decoded_r.ft().create_index((TextField("f1"), TextField("f2"))) assert [None] == await decoded_r.ft().get("doc1") @@ -1093,12 +1095,12 @@ async def test_get(decoded_r: redis.Redis): ] == await decoded_r.ft().get("doc1", "doc2") -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster @skip_ifmodversion_lt("2.2.0", "search") -async def test_config(decoded_r: redis.Redis): +async def test_config(decoded_r: valkey.Valkey): assert await decoded_r.ft().config_set("TIMEOUT", "100") - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): await decoded_r.ft().config_set("TIMEOUT", "null") res = await decoded_r.ft().config_get("*") assert "100" == res["TIMEOUT"] @@ -1106,9 +1108,9 @@ async def test_config(decoded_r: redis.Redis): assert "100" == res["TIMEOUT"] -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster -async def test_aggregations_groupby(decoded_r: redis.Redis): +async def test_aggregations_groupby(decoded_r: valkey.Valkey): # Creating the index definition and schema await decoded_r.ft().create_index( ( @@ -1124,8 +1126,8 @@ async def test_aggregations_groupby(decoded_r: redis.Redis): "search", mapping={ "title": "RediSearch", - "body": "Redisearch impements a search engine on top of redis", - "parent": "redis", + "body": "RediSearch impements a search engine on top of valkey", + "parent": "valkey", "random_num": 10, }, ) @@ -1134,7 +1136,7 @@ async def test_aggregations_groupby(decoded_r: redis.Redis): mapping={ "title": "RedisAI", "body": "RedisAI executes Deep Learning/Machine Learning models and managing their data.", # noqa - "parent": "redis", + "parent": "valkey", "random_num": 3, }, ) @@ -1143,7 +1145,7 @@ async def test_aggregations_groupby(decoded_r: redis.Redis): mapping={ "title": "RedisJson", "body": "RedisJSON implements ECMA-404 The JSON Data Interchange Standard as a native data type.", # noqa - "parent": "redis", + "parent": "valkey", "random_num": 8, }, ) @@ -1151,116 +1153,116 @@ async def test_aggregations_groupby(decoded_r: redis.Redis): for dialect in [1, 2]: if is_resp2_connection(decoded_r): req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.count()) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req)).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "3" req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.count_distinct("@title")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req)).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "3" req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.count_distinctish("@title")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req)).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "3" req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.sum("@random_num")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req)).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "21" # 10+8+3 req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.min("@random_num")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req)).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "3" # min(10,8,3) req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.max("@random_num")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req)).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "10" # max(10,8,3) req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.avg("@random_num")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req)).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "7" # (10+3+8)/3 req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.stddev("random_num")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req)).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "3.60555127546" req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.quantile("@random_num", 0.5)) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req)).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "8" # median of 3,8,10 req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.tolist("@title")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req)).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert set(res[3]) == {"RediSearch", "RedisAI", "RedisJson"} req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.first_value("@title").alias("first")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req)).rows[0] - assert res == ["parent", "redis", "first", "RediSearch"] + assert res == ["parent", "valkey", "first", "RediSearch"] req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by( "@parent", reducers.random_sample("@title", 2).alias("random") ) @@ -1268,120 +1270,120 @@ async def test_aggregations_groupby(decoded_r: redis.Redis): ) res = (await decoded_r.ft().aggregate(req)).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[2] == "random" assert len(res[3]) == 2 assert res[3][0] in ["RediSearch", "RedisAI", "RedisJson"] else: req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.count()) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req))["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliascount"] == "3" req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.count_distinct("@title")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req))["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert ( res["extra_attributes"]["__generated_aliascount_distincttitle"] == "3" ) req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.count_distinctish("@title")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req))["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert ( res["extra_attributes"]["__generated_aliascount_distinctishtitle"] == "3" ) req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.sum("@random_num")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req))["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliassumrandom_num"] == "21" req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.min("@random_num")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req))["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliasminrandom_num"] == "3" req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.max("@random_num")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req))["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliasmaxrandom_num"] == "10" req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.avg("@random_num")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req))["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliasavgrandom_num"] == "7" req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.stddev("random_num")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req))["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert ( res["extra_attributes"]["__generated_aliasstddevrandom_num"] == "3.60555127546" ) req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.quantile("@random_num", 0.5)) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req))["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert ( res["extra_attributes"]["__generated_aliasquantilerandom_num,0.5"] == "8" ) req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.tolist("@title")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req))["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert set(res["extra_attributes"]["__generated_aliastolisttitle"]) == { "RediSearch", "RedisAI", @@ -1389,16 +1391,19 @@ async def test_aggregations_groupby(decoded_r: redis.Redis): } req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by("@parent", reducers.first_value("@title").alias("first")) .dialect(dialect) ) res = (await decoded_r.ft().aggregate(req))["results"][0] - assert res["extra_attributes"] == {"parent": "redis", "first": "RediSearch"} + assert res["extra_attributes"] == { + "parent": "valkey", + "first": "RediSearch", + } req = ( - aggregations.AggregateRequest("redis") + aggregations.AggregateRequest("valkey") .group_by( "@parent", reducers.random_sample("@title", 2).alias("random") ) @@ -1406,7 +1411,7 @@ async def test_aggregations_groupby(decoded_r: redis.Redis): ) res = (await decoded_r.ft().aggregate(req))["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert "random" in res["extra_attributes"].keys() assert len(res["extra_attributes"]["random"]) == 2 assert res["extra_attributes"]["random"][0] in [ @@ -1416,8 +1421,8 @@ async def test_aggregations_groupby(decoded_r: redis.Redis): ] -@pytest.mark.redismod -async def test_aggregations_sort_by_and_limit(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_aggregations_sort_by_and_limit(decoded_r: valkey.Valkey): await decoded_r.ft().create_index((TextField("t1"), TextField("t2"))) await decoded_r.ft().client.hset("doc1", mapping={"t1": "a", "t2": "b"}) @@ -1475,9 +1480,9 @@ async def test_aggregations_sort_by_and_limit(decoded_r: redis.Redis): assert res["results"][0]["extra_attributes"] == {"t1": "b"} -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.experimental -async def test_withsuffixtrie(decoded_r: redis.Redis): +async def test_withsuffixtrie(decoded_r: valkey.Valkey): # create index assert await decoded_r.ft().create_index((TextField("txt"),)) await waitForIndex(decoded_r, getattr(decoded_r.ft(), "index_name", "idx")) @@ -1517,9 +1522,9 @@ async def test_withsuffixtrie(decoded_r: redis.Redis): assert "WITHSUFFIXTRIE" in info["attributes"][0]["flags"] -@pytest.mark.redismod -@skip_if_redis_enterprise() -async def test_search_commands_in_pipeline(decoded_r: redis.Redis): +@pytest.mark.valkeymod +@skip_if_valkey_enterprise() +async def test_search_commands_in_pipeline(decoded_r: valkey.Valkey): p = await decoded_r.ft().pipeline() p.create_index((TextField("txt"),)) p.hset("doc1", mapping={"txt": "foo bar"}) @@ -1547,10 +1552,10 @@ async def test_search_commands_in_pipeline(decoded_r: redis.Redis): ) -@pytest.mark.redismod -async def test_query_timeout(decoded_r: redis.Redis): +@pytest.mark.valkeymod +async def test_query_timeout(decoded_r: valkey.Valkey): q1 = Query("foo").timeout(5000) assert q1.get_args() == ["foo", "TIMEOUT", 5000, "LIMIT", 0, 10] q2 = Query("foo").timeout("not_a_number") - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): await decoded_r.ft().search(q2) diff --git a/tests/test_asyncio/test_sentinel.py b/tests/test_asyncio/test_sentinel.py index 51e59d69..17f19794 100644 --- a/tests/test_asyncio/test_sentinel.py +++ b/tests/test_asyncio/test_sentinel.py @@ -3,9 +3,9 @@ import pytest import pytest_asyncio -import redis.asyncio.sentinel -from redis import exceptions -from redis.asyncio.sentinel import ( +import valkey.asyncio.sentinel +from valkey import exceptions +from valkey.asyncio.sentinel import ( MasterNotFoundError, Sentinel, SentinelConnectionPool, @@ -37,7 +37,7 @@ async def sentinel_slaves(self, master_name): async def execute_command(self, *args, **kwargs): # wrapper purely to validate the calls don't explode - from redis.asyncio.client import bool_ok + from valkey.asyncio.client import bool_ok return bool_ok @@ -73,10 +73,10 @@ def client(self, host, port, **kwargs): @pytest_asyncio.fixture() async def cluster(master_ip): cluster = SentinelTestCluster(ip=master_ip) - saved_Redis = redis.asyncio.sentinel.Redis - redis.asyncio.sentinel.Redis = cluster.client + saved_Valkey = valkey.asyncio.sentinel.Valkey + valkey.asyncio.sentinel.Valkey = cluster.client yield cluster - redis.asyncio.sentinel.Redis = saved_Redis + valkey.asyncio.sentinel.Valkey = saved_Valkey @pytest_asyncio.fixture() diff --git a/tests/test_asyncio/test_sentinel_managed_connection.py b/tests/test_asyncio/test_sentinel_managed_connection.py index cae4b958..641b1bea 100644 --- a/tests/test_asyncio/test_sentinel_managed_connection.py +++ b/tests/test_asyncio/test_sentinel_managed_connection.py @@ -1,9 +1,9 @@ import socket import pytest -from redis.asyncio.retry import Retry -from redis.asyncio.sentinel import SentinelManagedConnection -from redis.backoff import NoBackoff +from valkey.asyncio.retry import Retry +from valkey.asyncio.sentinel import SentinelManagedConnection +from valkey.backoff import NoBackoff from .compat import mock diff --git a/tests/test_asyncio/test_timeseries.py b/tests/test_asyncio/test_timeseries.py index b4421970..53de9527 100644 --- a/tests/test_asyncio/test_timeseries.py +++ b/tests/test_asyncio/test_timeseries.py @@ -2,18 +2,20 @@ from time import sleep import pytest -import redis.asyncio as redis +import valkey.asyncio as valkey from tests.conftest import ( assert_resp_response, is_resp2_connection, skip_ifmodversion_lt, ) +pytestmark = pytest.mark.skip -async def test_create(decoded_r: redis.Redis): + +async def test_create(decoded_r: valkey.Valkey): assert await decoded_r.ts().create(1) assert await decoded_r.ts().create(2, retention_msecs=5) - assert await decoded_r.ts().create(3, labels={"Redis": "Labs"}) + assert await decoded_r.ts().create(3, labels={"Valkey": "Labs"}) assert await decoded_r.ts().create(4, retention_msecs=20, labels={"Time": "Series"}) info = await decoded_r.ts().info(4) assert_resp_response( @@ -28,7 +30,7 @@ async def test_create(decoded_r: redis.Redis): @skip_ifmodversion_lt("1.4.0", "timeseries") -async def test_create_duplicate_policy(decoded_r: redis.Redis): +async def test_create_duplicate_policy(decoded_r: valkey.Valkey): # Test for duplicate policy for duplicate_policy in ["block", "last", "first", "min", "max"]: ts_name = f"time-serie-ooo-{duplicate_policy}" @@ -42,7 +44,7 @@ async def test_create_duplicate_policy(decoded_r: redis.Redis): ) -async def test_alter(decoded_r: redis.Redis): +async def test_alter(decoded_r: valkey.Valkey): assert await decoded_r.ts().create(1) res = await decoded_r.ts().info(1) assert_resp_response( @@ -65,7 +67,7 @@ async def test_alter(decoded_r: redis.Redis): @skip_ifmodversion_lt("1.4.0", "timeseries") -async def test_alter_diplicate_policy(decoded_r: redis.Redis): +async def test_alter_diplicate_policy(decoded_r: valkey.Valkey): assert await decoded_r.ts().create(1) info = await decoded_r.ts().info(1) assert_resp_response( @@ -78,12 +80,12 @@ async def test_alter_diplicate_policy(decoded_r: redis.Redis): ) -async def test_add(decoded_r: redis.Redis): +async def test_add(decoded_r: valkey.Valkey): assert 1 == await decoded_r.ts().add(1, 1, 1) assert 2 == await decoded_r.ts().add(2, 2, 3, retention_msecs=10) - assert 3 == await decoded_r.ts().add(3, 3, 2, labels={"Redis": "Labs"}) + assert 3 == await decoded_r.ts().add(3, 3, 2, labels={"Valkey": "Labs"}) assert 4 == await decoded_r.ts().add( - 4, 4, 2, retention_msecs=10, labels={"Redis": "Labs", "Time": "Series"} + 4, 4, 2, retention_msecs=10, labels={"Valkey": "Labs", "Time": "Series"} ) res = await decoded_r.ts().add(5, "*", 1) assert abs(time.time() - round(float(res) / 1000)) < 1.0 @@ -92,7 +94,7 @@ async def test_add(decoded_r: redis.Redis): assert_resp_response( decoded_r, 10, info.get("retention_msecs"), info.get("retentionTime") ) - assert "Labs" == info["labels"]["Redis"] + assert "Labs" == info["labels"]["Valkey"] # Test for a chunk size of 128 Bytes on TS.ADD assert await decoded_r.ts().add("time-serie-1", 1, 10.0, chunk_size=128) @@ -101,7 +103,7 @@ async def test_add(decoded_r: redis.Redis): @skip_ifmodversion_lt("1.4.0", "timeseries") -async def test_add_duplicate_policy(r: redis.Redis): +async def test_add_duplicate_policy(r: valkey.Valkey): # Test for duplicate policy BLOCK assert 1 == await r.ts().add("time-serie-add-ooo-block", 1, 5.0) with pytest.raises(Exception): @@ -140,14 +142,14 @@ async def test_add_duplicate_policy(r: redis.Redis): assert 5.0 == res[1] -async def test_madd(decoded_r: redis.Redis): +async def test_madd(decoded_r: valkey.Valkey): await decoded_r.ts().create("a") assert [1, 2, 3] == await decoded_r.ts().madd( [("a", 1, 5), ("a", 2, 10), ("a", 3, 15)] ) -async def test_incrby_decrby(decoded_r: redis.Redis): +async def test_incrby_decrby(decoded_r: valkey.Valkey): for _ in range(100): assert await decoded_r.ts().incrby(1, 1) sleep(0.001) @@ -175,7 +177,7 @@ async def test_incrby_decrby(decoded_r: redis.Redis): assert_resp_response(decoded_r, 128, info.get("chunk_size"), info.get("chunkSize")) -async def test_create_and_delete_rule(decoded_r: redis.Redis): +async def test_create_and_delete_rule(decoded_r: valkey.Valkey): # test rule creation time = 100 await decoded_r.ts().create(1) @@ -199,7 +201,7 @@ async def test_create_and_delete_rule(decoded_r: redis.Redis): @skip_ifmodversion_lt("99.99.99", "timeseries") -async def test_del_range(decoded_r: redis.Redis): +async def test_del_range(decoded_r: valkey.Valkey): try: await decoded_r.ts().delete("test", 0, 100) except Exception as e: @@ -214,7 +216,7 @@ async def test_del_range(decoded_r: redis.Redis): ) -async def test_range(r: redis.Redis): +async def test_range(r: valkey.Valkey): for i in range(100): await r.ts().add(1, i, i % 7) assert 100 == len(await r.ts().range(1, 0, 200)) @@ -229,7 +231,7 @@ async def test_range(r: redis.Redis): @skip_ifmodversion_lt("99.99.99", "timeseries") -async def test_range_advanced(decoded_r: redis.Redis): +async def test_range_advanced(decoded_r: valkey.Valkey): for i in range(100): await decoded_r.ts().add(1, i, i % 7) await decoded_r.ts().add(1, i + 200, i % 7) @@ -259,7 +261,7 @@ async def test_range_advanced(decoded_r: redis.Redis): @skip_ifmodversion_lt("99.99.99", "timeseries") -async def test_rev_range(decoded_r: redis.Redis): +async def test_rev_range(decoded_r: valkey.Valkey): for i in range(100): await decoded_r.ts().add(1, i, i % 7) assert 100 == len(await decoded_r.ts().range(1, 0, 200)) @@ -302,7 +304,7 @@ async def test_rev_range(decoded_r: redis.Redis): @pytest.mark.onlynoncluster -async def test_multi_range(decoded_r: redis.Redis): +async def test_multi_range(decoded_r: valkey.Valkey): await decoded_r.ts().create(1, labels={"Test": "This", "team": "ny"}) await decoded_r.ts().create( 2, labels={"Test": "This", "Taste": "That", "team": "sf"} @@ -357,7 +359,7 @@ async def test_multi_range(decoded_r: redis.Redis): @pytest.mark.onlynoncluster @skip_ifmodversion_lt("99.99.99", "timeseries") -async def test_multi_range_advanced(decoded_r: redis.Redis): +async def test_multi_range_advanced(decoded_r: valkey.Valkey): await decoded_r.ts().create(1, labels={"Test": "This", "team": "ny"}) await decoded_r.ts().create( 2, labels={"Test": "This", "Taste": "That", "team": "sf"} @@ -474,7 +476,7 @@ async def test_multi_range_advanced(decoded_r: redis.Redis): @pytest.mark.onlynoncluster @skip_ifmodversion_lt("99.99.99", "timeseries") -async def test_multi_reverse_range(decoded_r: redis.Redis): +async def test_multi_reverse_range(decoded_r: valkey.Valkey): await decoded_r.ts().create(1, labels={"Test": "This", "team": "ny"}) await decoded_r.ts().create( 2, labels={"Test": "This", "Taste": "That", "team": "sf"} @@ -635,7 +637,7 @@ async def test_multi_reverse_range(decoded_r: redis.Redis): assert [[1, 10.0], [0, 1.0]] == res["1"][2] -async def test_get(decoded_r: redis.Redis): +async def test_get(decoded_r: valkey.Valkey): name = "test" await decoded_r.ts().create(name) assert not await decoded_r.ts().get(name) @@ -646,7 +648,7 @@ async def test_get(decoded_r: redis.Redis): @pytest.mark.onlynoncluster -async def test_mget(decoded_r: redis.Redis): +async def test_mget(decoded_r: valkey.Valkey): await decoded_r.ts().create(1, labels={"Test": "This"}) await decoded_r.ts().create(2, labels={"Test": "This", "Taste": "That"}) act_res = await decoded_r.ts().mget(["Test=This"]) @@ -680,7 +682,7 @@ async def test_mget(decoded_r: redis.Redis): assert {"Taste": "That", "Test": "This"} == res["2"][0] -async def test_info(decoded_r: redis.Redis): +async def test_info(decoded_r: valkey.Valkey): await decoded_r.ts().create( 1, retention_msecs=5, labels={"currentLabel": "currentData"} ) @@ -692,7 +694,7 @@ async def test_info(decoded_r: redis.Redis): @skip_ifmodversion_lt("1.4.0", "timeseries") -async def testInfoDuplicatePolicy(decoded_r: redis.Redis): +async def testInfoDuplicatePolicy(decoded_r: valkey.Valkey): await decoded_r.ts().create( 1, retention_msecs=5, labels={"currentLabel": "currentData"} ) @@ -709,7 +711,7 @@ async def testInfoDuplicatePolicy(decoded_r: redis.Redis): @pytest.mark.onlynoncluster -async def test_query_index(decoded_r: redis.Redis): +async def test_query_index(decoded_r: valkey.Valkey): await decoded_r.ts().create(1, labels={"Test": "This"}) await decoded_r.ts().create(2, labels={"Test": "This", "Taste": "That"}) assert 2 == len(await decoded_r.ts().queryindex(["Test=This"])) @@ -719,7 +721,7 @@ async def test_query_index(decoded_r: redis.Redis): ) -# # async def test_pipeline(r: redis.Redis): +# # async def test_pipeline(r: valkey.Valkey): # pipeline = await r.ts().pipeline() # pipeline.create("with_pipeline") # for i in range(100): @@ -732,7 +734,7 @@ async def test_query_index(decoded_r: redis.Redis): # assert await r.ts().get("with_pipeline")[1] == 99 * 1.1 -async def test_uncompressed(decoded_r: redis.Redis): +async def test_uncompressed(decoded_r: valkey.Valkey): await decoded_r.ts().create("compressed") await decoded_r.ts().create("uncompressed", uncompressed=True) compressed_info = await decoded_r.ts().info("compressed") diff --git a/tests/test_asyncio/testdata/titles.csv b/tests/test_asyncio/testdata/titles.csv index 6428dd2a..cbc2158a 100644 --- a/tests/test_asyncio/testdata/titles.csv +++ b/tests/test_asyncio/testdata/titles.csv @@ -4321,7 +4321,7 @@ gustav krupp von bohlen und halbach,1 yasmany tomás,4 notre temps,1 cats %,1 -intramolecular vibrational energy redistribution,1 +intramolecular vibrational energy valkeytribution,1 graduate management admission test,49 robin fleming,1 daniel gadzhev,1 diff --git a/tests/test_bloom.py b/tests/test_bloom.py index 464a946f..d09f409f 100644 --- a/tests/test_bloom.py +++ b/tests/test_bloom.py @@ -1,12 +1,14 @@ from math import inf import pytest -import redis.commands.bf -from redis.exceptions import ModuleError, RedisError -from redis.utils import HIREDIS_AVAILABLE +import valkey.commands.bf +from valkey.exceptions import ModuleError, ValkeyError +from valkey.utils import HIREDIS_AVAILABLE from .conftest import assert_resp_response, is_resp2_connection, skip_ifmodversion_lt +pytestmark = pytest.mark.skip + def intlist(obj): return [int(v) for v in obj] @@ -14,11 +16,11 @@ def intlist(obj): @pytest.fixture def client(decoded_r): - assert isinstance(decoded_r.bf(), redis.commands.bf.BFBloom) - assert isinstance(decoded_r.cf(), redis.commands.bf.CFBloom) - assert isinstance(decoded_r.cms(), redis.commands.bf.CMSBloom) - assert isinstance(decoded_r.tdigest(), redis.commands.bf.TDigestBloom) - assert isinstance(decoded_r.topk(), redis.commands.bf.TOPKBloom) + assert isinstance(decoded_r.bf(), valkey.commands.bf.BFBloom) + assert isinstance(decoded_r.cf(), valkey.commands.bf.CFBloom) + assert isinstance(decoded_r.cms(), valkey.commands.bf.CMSBloom) + assert isinstance(decoded_r.tdigest(), valkey.commands.bf.TDigestBloom) + assert isinstance(decoded_r.topk(), valkey.commands.bf.TOPKBloom) decoded_r.flushdb() return decoded_r @@ -177,7 +179,7 @@ def test_bf_info(client): "myBloom", "0.0001", "1000", expansion=expansion, noScale=True ) assert False - except RedisError: + except ValkeyError: assert True @@ -190,7 +192,7 @@ def test_bf_card(client): assert client.bf().card("bf1") == 1 # Error when key is of a type other than Bloom filter. - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): client.set("setKey", "value") client.bf().card("setKey") @@ -477,7 +479,7 @@ def test_tdigest_byrank(client): assert 1 == client.tdigest().byrank("t-digest", 0)[0] assert 10 == client.tdigest().byrank("t-digest", 9)[0] assert client.tdigest().byrank("t-digest", 100)[0] == inf - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): client.tdigest().byrank("t-digest", -1)[0] @@ -488,7 +490,7 @@ def test_tdigest_byrevrank(client): assert 10 == client.tdigest().byrevrank("t-digest", 0)[0] assert 1 == client.tdigest().byrevrank("t-digest", 9)[0] assert client.tdigest().byrevrank("t-digest", 100)[0] == -inf - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): client.tdigest().byrevrank("t-digest", -1)[0] diff --git a/tests/test_cache.py b/tests/test_cache.py index 022364e8..e15fad6e 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -4,12 +4,12 @@ import cachetools import pytest -import redis -from redis import RedisError -from redis._cache import AbstractCache, EvictionPolicy, _LocalCache -from redis.typing import KeyT, ResponseT -from redis.utils import HIREDIS_AVAILABLE +import valkey from tests.conftest import _get_client +from valkey import ValkeyError +from valkey._cache import AbstractCache, EvictionPolicy, _LocalCache +from valkey.typing import KeyT, ResponseT +from valkey.utils import HIREDIS_AVAILABLE @pytest.fixture() @@ -19,7 +19,7 @@ def r(request): protocol = request.param.get("protocol", 3) single_connection_client = request.param.get("single_connection_client", False) with _get_client( - redis.Redis, + valkey.Valkey, request, single_connection_client=single_connection_client, protocol=protocol, @@ -40,19 +40,19 @@ class TestLocalCache: @pytest.mark.onlynoncluster def test_get_from_cache(self, r, r2): r, cache = r - # add key to redis + # add key to valkey r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert r.get("foo") == b"bar" # get key from local cache assert cache.get(("GET", "foo")) == b"bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) r2.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) r.ping() # the command is not in the local cache anymore assert cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert r.get("foo") == b"barbar" @pytest.mark.parametrize( @@ -62,11 +62,11 @@ def test_get_from_cache(self, r, r2): ) def test_cache_lru_eviction(self, r): r, cache = r - # add 3 keys to redis + # add 3 keys to valkey r.set("foo", "bar") r.set("foo2", "bar2") r.set("foo3", "bar3") - # get 3 keys from redis and save in local cache + # get 3 keys from valkey and save in local cache assert r.get("foo") == b"bar" assert r.get("foo2") == b"bar2" assert r.get("foo3") == b"bar3" @@ -74,7 +74,7 @@ def test_cache_lru_eviction(self, r): assert cache.get(("GET", "foo")) == b"bar" assert cache.get(("GET", "foo2")) == b"bar2" assert cache.get(("GET", "foo3")) == b"bar3" - # add 1 more key to redis (exceed the max size) + # add 1 more key to valkey (exceed the max size) r.set("foo4", "bar4") assert r.get("foo4") == b"bar4" # the first key is not in the local cache anymore @@ -83,9 +83,9 @@ def test_cache_lru_eviction(self, r): @pytest.mark.parametrize("r", [{"cache": _LocalCache(ttl=1)}], indirect=True) def test_cache_ttl(self, r): r, cache = r - # add key to redis + # add key to valkey r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert r.get("foo") == b"bar" # get key from local cache assert cache.get(("GET", "foo")) == b"bar" @@ -101,11 +101,11 @@ def test_cache_ttl(self, r): ) def test_cache_lfu_eviction(self, r): r, cache = r - # add 3 keys to redis + # add 3 keys to valkey r.set("foo", "bar") r.set("foo2", "bar2") r.set("foo3", "bar3") - # get 3 keys from redis and save in local cache + # get 3 keys from valkey and save in local cache assert r.get("foo") == b"bar" assert r.get("foo2") == b"bar2" assert r.get("foo3") == b"bar3" @@ -113,7 +113,7 @@ def test_cache_lfu_eviction(self, r): assert cache.get(("GET", "foo")) == b"bar" assert cache.get(("GET", "foo")) == b"bar" assert cache.get(("GET", "foo3")) == b"bar3" - # add 1 more key to redis (exceed the max size) + # add 1 more key to valkey (exceed the max size) r.set("foo4", "bar4") assert r.get("foo4") == b"bar4" # test the eviction policy @@ -130,17 +130,17 @@ def test_cache_lfu_eviction(self, r): def test_cache_decode_response(self, r): r, cache = r r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert r.get("foo") == "bar" # get key from local cache assert cache.get(("GET", "foo")) == "bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) r.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) r.ping() # the command is not in the local cache anymore assert cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert r.get("foo") == "barbar" @pytest.mark.parametrize( @@ -150,7 +150,7 @@ def test_cache_decode_response(self, r): ) def test_cache_deny_list(self, r): r, cache = r - # add list to redis + # add list to valkey r.lpush("mylist", "foo", "bar", "baz") assert r.llen("mylist") == 3 assert r.lindex("mylist", 1) == b"bar" @@ -207,7 +207,7 @@ def test_csc_not_cause_disconnects(self, r): r.mset({"a": 2, "b": 2, "c": 2, "d": 2, "e": 2, "f": 2}) id3 = r.client_id() - # client should get value from redis server post invalidate messages + # client should get value from valkey server post invalidate messages assert r.mget("a", "b", "c", "d", "e", "f") == ["2", "2", "2", "2", "2", "2"] r.mset({"a": 3, "b": 3, "c": 3, "d": 3, "e": 3, "f": 3}) @@ -238,11 +238,11 @@ def test_multiple_commands_same_key(self, r): assert cache.get(("MGET", "a", "b")) == ["1", "1"] # set only one key r.set("a", 2) - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) r.ping() # the command is not in the local cache anymore assert cache.get(("MGET", "a", "b")) is None - # get from redis + # get from valkey assert r.mget("a", "b") == ["2", "1"] @pytest.mark.parametrize( @@ -264,7 +264,7 @@ def test_delete_one_command(self, r): # the other command is still in the local cache anymore assert cache.get(("MGET", "a{a}", "b{a}")) is None assert cache.get(("GET", "c")) == "1" - # get from redis + # get from valkey assert r.mget("a{a}", "b{a}") == ["1", "1"] assert r.get("c") == "1" @@ -287,7 +287,7 @@ def test_delete_several_commands(self, r): # the commands are not in the local cache anymore assert cache.get(("MGET", "a{a}", "b{a}")) is None assert cache.get(("GET", "c")) is None - # get from redis + # get from valkey assert r.mget("a{a}", "b{a}") == ["1", "1"] assert r.get("c") == "1" @@ -310,7 +310,7 @@ def test_invalidate_key(self, r): # one other command is still in the local cache anymore assert cache.get(("MGET", "a{a}", "b{a}")) is None assert cache.get(("GET", "c")) == "1" - # get from redis + # get from valkey assert r.mget("a{a}", "b{a}") == ["1", "1"] assert r.get("c") == "1" @@ -333,14 +333,14 @@ def test_flush_entire_cache(self, r): # the commands are not in the local cache anymore assert cache.get(("MGET", "a{a}", "b{a}")) is None assert cache.get(("GET", "c")) is None - # get from redis + # get from valkey assert r.mget("a{a}", "b{a}") == ["1", "1"] assert r.get("c") == "1" @pytest.mark.onlynoncluster def test_cache_not_available_with_resp2(self, request): - with pytest.raises(RedisError) as e: - _get_client(redis.Redis, request, protocol=2, client_cache=_LocalCache()) + with pytest.raises(ValkeyError) as e: + _get_client(valkey.Valkey, request, protocol=2, client_cache=_LocalCache()) assert "protocol version 3 or higher" in str(e.value) @pytest.mark.parametrize( @@ -386,33 +386,33 @@ def test_execute_command_keys_not_provided(self, r): @pytest.mark.onlynoncluster def test_single_connection(self, r): r, cache = r - # add key to redis + # add key to valkey r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert r.get("foo") == b"bar" # get key from local cache assert cache.get(("GET", "foo")) == b"bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) r.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) r.ping() # the command is not in the local cache anymore assert cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert r.get("foo") == b"barbar" @pytest.mark.parametrize("r", [{"cache": _LocalCache()}], indirect=True) def test_get_from_cache_invalidate_via_get(self, r, r2): r, cache = r - # add key to redis + # add key to valkey r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert r.get("foo") == b"bar" # get key from local cache assert cache.get(("GET", "foo")) == b"bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) r2.set("foo", "barbar") - # don't send any command to redis, just run another get + # don't send any command to valkey, just run another get # it should process the invalidation in background assert r.get("foo") == b"barbar" @@ -423,20 +423,20 @@ class TestClusterLocalCache: @pytest.mark.parametrize("r", [{"cache": _LocalCache()}], indirect=True) def test_get_from_cache(self, r, r2): r, cache = r - # add key to redis + # add key to valkey r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert r.get("foo") == b"bar" # get key from local cache assert cache.get(("GET", "foo")) == b"bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) r2.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) node = r.get_node_from_key("foo") r.ping(target_nodes=node) # the command is not in the local cache anymore assert cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert r.get("foo") == b"barbar" @pytest.mark.parametrize( @@ -447,18 +447,18 @@ def test_get_from_cache(self, r, r2): def test_cache_decode_response(self, r): r, cache = r r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert r.get("foo") == "bar" # get key from local cache assert cache.get(("GET", "foo")) == "bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) r.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) node = r.get_node_from_key("foo") r.ping(target_nodes=node) # the command is not in the local cache anymore assert cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert r.get("foo") == "barbar" @pytest.mark.parametrize( @@ -490,17 +490,17 @@ class TestSentinelLocalCache: def test_get_from_cache(self, local_cache, master): master.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert master.get("foo") == b"bar" # get key from local cache assert local_cache.get(("GET", "foo")) == b"bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) master.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) master.ping() # the command is not in the local cache anymore assert local_cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert master.get("foo") == b"barbar" @pytest.mark.parametrize( @@ -510,17 +510,17 @@ def test_get_from_cache(self, local_cache, master): ) def test_cache_decode_response(self, local_cache, sentinel_setup, master): master.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert master.get("foo") == "bar" # get key from local cache assert local_cache.get(("GET", "foo")) == "bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) master.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) master.ping() # the command is not in the local cache anymore assert local_cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert master.get("foo") == "barbar" @@ -571,17 +571,17 @@ def invalidate_key(self, key: KeyT): @pytest.mark.parametrize("r", [{"cache": _CustomCache()}], indirect=True) def test_get_from_cache(self, r, r2): r, cache = r - # add key to redis + # add key to valkey r.set("foo", "bar") - # get key from redis and save in local cache + # get key from valkey and save in local cache assert r.get("foo") == b"bar" # get key from local cache assert cache.get(("GET", "foo")) == b"bar" - # change key in redis (cause invalidation) + # change key in valkey (cause invalidation) r2.set("foo", "barbar") - # send any command to redis (process invalidation in background) + # send any command to valkey (process invalidation in background) r.ping() # the command is not in the local cache anymore assert cache.get(("GET", "foo")) is None - # get key from redis + # get key from valkey assert r.get("foo") == b"barbar" diff --git a/tests/test_cluster.py b/tests/test_cluster.py index 1f505b81..0e068eb0 100644 --- a/tests/test_cluster.py +++ b/tests/test_cluster.py @@ -10,43 +10,43 @@ from unittest.mock import DEFAULT, Mock, call, patch import pytest -import redis -from redis import Redis -from redis._parsers import CommandsParser -from redis.backoff import ExponentialBackoff, NoBackoff, default_backoff -from redis.cluster import ( +import valkey +from tests.test_pubsub import wait_for_message +from valkey import Valkey +from valkey._parsers import CommandsParser +from valkey.backoff import ExponentialBackoff, NoBackoff, default_backoff +from valkey.cluster import ( PRIMARY, - REDIS_CLUSTER_HASH_SLOTS, REPLICA, + VALKEY_CLUSTER_HASH_SLOTS, ClusterNode, NodesManager, - RedisCluster, + ValkeyCluster, get_node_name, ) -from redis.connection import BlockingConnectionPool, Connection, ConnectionPool -from redis.crc import key_slot -from redis.exceptions import ( +from valkey.connection import BlockingConnectionPool, Connection, ConnectionPool +from valkey.crc import key_slot +from valkey.exceptions import ( AskError, ClusterDownError, ConnectionError, DataError, MovedError, NoPermissionError, - RedisClusterException, - RedisError, ResponseError, TimeoutError, + ValkeyClusterException, + ValkeyError, ) -from redis.retry import Retry -from redis.utils import str_if_bytes -from tests.test_pubsub import wait_for_message +from valkey.retry import Retry +from valkey.utils import str_if_bytes from .conftest import ( _get_client, assert_resp_response, is_resp2_connection, - skip_if_redis_enterprise, skip_if_server_version_lt, + skip_if_valkey_enterprise, skip_unless_arch_bits, wait_for_command, ) @@ -69,7 +69,7 @@ def recv(self, sock): def handle(self): self.server.proxy.n_connections += 1 - conn = socket.create_connection(self.server.proxy.redis_addr) + conn = socket.create_connection(self.server.proxy.valkey_addr) stop = False def from_server(): @@ -102,9 +102,9 @@ def from_server(): class NodeProxy: """A class to proxy a node connection to a different port""" - def __init__(self, addr, redis_addr): + def __init__(self, addr, valkey_addr): self.addr = addr - self.redis_addr = redis_addr + self.valkey_addr = valkey_addr self.server = socketserver.ThreadingTCPServer(self.addr, ProxyRequestHandler) self.server.proxy = self self.server.socket_reuse_address = True @@ -112,8 +112,8 @@ def __init__(self, addr, redis_addr): self.n_connections = 0 def start(self): - # test that we can connect to redis - s = socket.create_connection(self.redis_addr, timeout=2) + # test that we can connect to valkey + s = socket.create_connection(self.valkey_addr, timeout=2) s.close() # Start a thread with the server -- that thread will then start one # more thread for each request @@ -151,18 +151,18 @@ def cleanup(): r.config_set("slowlog-max-len", 128) -def get_mocked_redis_client( +def get_mocked_valkey_client( func=None, cluster_slots_raise_error=False, *args, **kwargs ): """ - Return a stable RedisCluster object that have deterministic + Return a stable ValkeyCluster object that have deterministic nodes and slots setup to remove the problem of different IP addresses on different installations and machines. """ cluster_slots = kwargs.pop("cluster_slots", default_cluster_slots) coverage_res = kwargs.pop("coverage_result", "yes") cluster_enabled = kwargs.pop("cluster_enabled", True) - with patch.object(Redis, "execute_command") as execute_command_mock: + with patch.object(Valkey, "execute_command") as execute_command_mock: def execute_command(*_args, **_kwargs): if _args[0] == "CLUSTER SLOTS": @@ -202,14 +202,14 @@ def cmd_init_mock(self, r): cmd_parser_initialize.side_effect = cmd_init_mock - return RedisCluster(*args, **kwargs) + return ValkeyCluster(*args, **kwargs) def mock_node_resp(node, response): connection = Mock() connection.read_response.return_value = response connection._get_from_local_cache.return_value = None - node.redis_connection.connection = connection + node.valkey_connection.connection = connection return node @@ -217,7 +217,7 @@ def mock_node_resp_func(node, func): connection = Mock() connection.read_response.side_effect = func connection._get_from_local_cache.return_value = None - node.redis_connection.connection = connection + node.valkey_connection.connection = connection return node @@ -250,7 +250,7 @@ def moved_redirection_helper(request, failover=False): 3. the redirected node's server type updated to 'primary' 4. the server type of the previous slot owner updated to 'replica' """ - rc = _get_client(RedisCluster, request, flushdb=False) + rc = _get_client(ValkeyCluster, request, flushdb=False) slot = 12182 redirect_node = None # Get the current primary that holds this slot @@ -265,7 +265,7 @@ def moved_redirection_helper(request, failover=False): redirect_node = rc.get_primaries()[0] r_host = redirect_node.host r_port = redirect_node.port - with patch.object(Redis, "parse_response") as parse_response: + with patch.object(Valkey, "parse_response") as parse_response: def moved_redirect_effect(connection, *args, **options): def ok_response(connection, *args, **options): @@ -287,9 +287,9 @@ def ok_response(connection, *args, **options): @pytest.mark.onlycluster -class TestRedisClusterObj: +class TestValkeyClusterObj: """ - Tests for the RedisCluster class + Tests for the ValkeyCluster class """ def test_host_port_startup_node(self): @@ -297,7 +297,7 @@ def test_host_port_startup_node(self): Test that it is possible to use host & port arguments as startup node args """ - cluster = get_mocked_redis_client(host=default_host, port=default_port) + cluster = get_mocked_valkey_client(host=default_host, port=default_port) assert cluster.get_node(host=default_host, port=default_port) is not None def test_startup_nodes(self): @@ -311,7 +311,7 @@ def test_startup_nodes(self): ClusterNode(default_host, port_1), ClusterNode(default_host, port_2), ] - cluster = get_mocked_redis_client(startup_nodes=startup_nodes) + cluster = get_mocked_valkey_client(startup_nodes=startup_nodes) assert ( cluster.get_node(host=default_host, port=port_1) is not None and cluster.get_node(host=default_host, port=port_2) is not None @@ -321,32 +321,32 @@ def test_empty_startup_nodes(self): """ Test that exception is raised when empty providing empty startup_nodes """ - with pytest.raises(RedisClusterException) as ex: - RedisCluster(startup_nodes=[]) + with pytest.raises(ValkeyClusterException) as ex: + ValkeyCluster(startup_nodes=[]) assert str(ex.value).startswith( - "RedisCluster requires at least one node to discover the cluster" + "ValkeyCluster requires at least one node to discover the cluster" ), str_if_bytes(ex.value) def test_from_url(self, r): - redis_url = f"redis://{default_host}:{default_port}/0" - with patch.object(RedisCluster, "from_url") as from_url: + valkey_url = f"valkey://{default_host}:{default_port}/0" + with patch.object(ValkeyCluster, "from_url") as from_url: def from_url_mocked(_url, **_kwargs): - return get_mocked_redis_client(url=_url, **_kwargs) + return get_mocked_valkey_client(url=_url, **_kwargs) from_url.side_effect = from_url_mocked - cluster = RedisCluster.from_url(redis_url) + cluster = ValkeyCluster.from_url(valkey_url) assert cluster.get_node(host=default_host, port=default_port) is not None def test_execute_command_errors(self, r): """ Test that if no key is provided then exception should be raised. """ - with pytest.raises(RedisClusterException) as ex: + with pytest.raises(ValkeyClusterException) as ex: r.execute_command("GET") assert str(ex.value).startswith( - "No way to dispatch this command to Redis Cluster. Missing key." + "No way to dispatch this command to Valkey Cluster. Missing key." ) def test_execute_command_node_flag_primaries(self, r): @@ -356,12 +356,12 @@ def test_execute_command_node_flag_primaries(self, r): primaries = r.get_primaries() replicas = r.get_replicas() mock_all_nodes_resp(r, "PONG") - assert r.ping(target_nodes=RedisCluster.PRIMARIES) is True + assert r.ping(target_nodes=ValkeyCluster.PRIMARIES) is True for primary in primaries: - conn = primary.redis_connection.connection + conn = primary.valkey_connection.connection assert conn.read_response.called is True for replica in replicas: - conn = replica.redis_connection.connection + conn = replica.valkey_connection.connection assert conn.read_response.called is not True def test_execute_command_node_flag_replicas(self, r): @@ -370,15 +370,15 @@ def test_execute_command_node_flag_replicas(self, r): """ replicas = r.get_replicas() if not replicas: - r = get_mocked_redis_client(default_host, default_port) + r = get_mocked_valkey_client(default_host, default_port) primaries = r.get_primaries() mock_all_nodes_resp(r, "PONG") - assert r.ping(target_nodes=RedisCluster.REPLICAS) is True + assert r.ping(target_nodes=ValkeyCluster.REPLICAS) is True for replica in replicas: - conn = replica.redis_connection.connection + conn = replica.valkey_connection.connection assert conn.read_response.called is True for primary in primaries: - conn = primary.redis_connection.connection + conn = primary.valkey_connection.connection assert conn.read_response.called is not True def test_execute_command_node_flag_all_nodes(self, r): @@ -386,9 +386,9 @@ def test_execute_command_node_flag_all_nodes(self, r): Test command execution with nodes flag ALL_NODES """ mock_all_nodes_resp(r, "PONG") - assert r.ping(target_nodes=RedisCluster.ALL_NODES) is True + assert r.ping(target_nodes=ValkeyCluster.ALL_NODES) is True for node in r.get_nodes(): - conn = node.redis_connection.connection + conn = node.valkey_connection.connection assert conn.read_response.called is True def test_execute_command_node_flag_random(self, r): @@ -396,10 +396,10 @@ def test_execute_command_node_flag_random(self, r): Test command execution with nodes flag RANDOM """ mock_all_nodes_resp(r, "PONG") - assert r.ping(target_nodes=RedisCluster.RANDOM) is True + assert r.ping(target_nodes=ValkeyCluster.RANDOM) is True called_count = 0 for node in r.get_nodes(): - conn = node.redis_connection.connection + conn = node.valkey_connection.connection if conn.read_response.called is True: called_count += 1 assert called_count == 1 @@ -412,7 +412,7 @@ def test_execute_command_default_node(self, r): def_node = r.get_default_node() mock_node_resp(def_node, "PONG") assert r.ping() is True - conn = def_node.redis_connection.connection + conn = def_node.valkey_connection.connection assert conn.read_response.called def test_ask_redirection(self, r): @@ -425,7 +425,7 @@ def test_ask_redirection(self, r): Important thing to verify is that it tries to talk to the second node. """ redirect_node = r.get_nodes()[0] - with patch.object(Redis, "parse_response") as parse_response: + with patch.object(Valkey, "parse_response") as parse_response: def ask_redirect_effect(connection, *args, **options): def ok_response(connection, *args, **options): @@ -448,7 +448,7 @@ def test_handling_cluster_failover_to_a_replica(self, r): primary = r.get_node_from_key(key, replica=False) assert str_if_bytes(r.get("key")) == "value" # Get the current output of cluster slots - cluster_slots = primary.redis_connection.execute_command("CLUSTER SLOTS") + cluster_slots = primary.valkey_connection.execute_command("CLUSTER SLOTS") replica_host = "" replica_port = 0 # Replace one of the replicas to be the new primary based on the @@ -478,17 +478,17 @@ def mock_execute_command(*_args, **_kwargs): # Mock connection error for the current primary mock_node_resp_func(primary, raise_connection_error) - primary.redis_connection.set_retry(Retry(NoBackoff(), 1)) + primary.valkey_connection.set_retry(Retry(NoBackoff(), 1)) # Mock the cluster slots response for all other nodes - redis_mock_node = Mock() - redis_mock_node.execute_command.side_effect = mock_execute_command + valkey_mock_node = Mock() + valkey_mock_node.execute_command.side_effect = mock_execute_command # Mock response value for all other commands - redis_mock_node.parse_response.return_value = "MOCK_OK" - redis_mock_node.connection._get_from_local_cache.return_value = None + valkey_mock_node.parse_response.return_value = "MOCK_OK" + valkey_mock_node.connection._get_from_local_cache.return_value = None for node in r.get_nodes(): if node.port != primary.port: - node.redis_connection = redis_mock_node + node.valkey_connection = valkey_mock_node assert r.get(key) == "MOCK_OK" new_primary = r.get_node_from_key(key, replica=False) @@ -515,7 +515,7 @@ def test_refresh_using_specific_nodes(self, request): """ node_7006 = ClusterNode(host=default_host, port=7006, server_type=PRIMARY) node_7007 = ClusterNode(host=default_host, port=7007, server_type=PRIMARY) - with patch.object(Redis, "parse_response") as parse_response: + with patch.object(Valkey, "parse_response") as parse_response: with patch.object(NodesManager, "initialize", autospec=True) as initialize: with patch.multiple( Connection, send_command=DEFAULT, connect=DEFAULT, can_read=DEFAULT @@ -579,7 +579,7 @@ def cmd_init_mock(self, r): cmd_parser_initialize.side_effect = cmd_init_mock - rc = _get_client(RedisCluster, request, flushdb=False) + rc = _get_client(ValkeyCluster, request, flushdb=False) assert len(rc.get_nodes()) == 1 assert rc.get_node(node_name=node_7006.name) is not None @@ -602,7 +602,7 @@ def test_reading_from_replicas_in_round_robin(self): can_read=DEFAULT, on_connect=DEFAULT, ) as mocks: - with patch.object(Redis, "parse_response") as parse_response: + with patch.object(Valkey, "parse_response") as parse_response: def parse_response_mock_first(connection, *args, **options): # Primary @@ -622,7 +622,7 @@ def parse_response_mock_third(connection, *args, **options): return "MOCK_OK" # We don't need to create a real cluster connection but we - # do want RedisCluster.on_connect function to get called, + # do want ValkeyCluster.on_connect function to get called, # so we'll mock some of the Connection's functions to allow it parse_response.side_effect = parse_response_mock_first mocks["send_command"].return_value = True @@ -632,7 +632,7 @@ def parse_response_mock_third(connection, *args, **options): mocks["on_connect"].return_value = True # Create a cluster with reading from replications - read_cluster = get_mocked_redis_client( + read_cluster = get_mocked_valkey_client( host=default_host, port=default_port, read_from_replicas=True ) assert read_cluster.read_from_replicas is True @@ -699,14 +699,14 @@ def test_all_nodes_masters(self, r): for node in r.get_primaries(): assert node in nodes - @pytest.mark.parametrize("error", RedisCluster.ERRORS_ALLOW_RETRY) + @pytest.mark.parametrize("error", ValkeyCluster.ERRORS_ALLOW_RETRY) def test_cluster_down_overreaches_retry_attempts(self, error): """ When error that allows retry is thrown, test that we retry executing the command as many times as configured in cluster_error_retry_attempts and then raise the exception """ - with patch.object(RedisCluster, "_execute_command") as execute_command: + with patch.object(ValkeyCluster, "_execute_command") as execute_command: def raise_error(target_node, *args, **kwargs): execute_command.failed_calls += 1 @@ -714,7 +714,7 @@ def raise_error(target_node, *args, **kwargs): execute_command.side_effect = raise_error - rc = get_mocked_redis_client(host=default_host, port=default_port) + rc = get_mocked_valkey_client(host=default_host, port=default_port) with pytest.raises(error): rc.get("bar") @@ -730,7 +730,7 @@ def on_connect(connection): mock = Mock(side_effect=on_connect) - _get_client(RedisCluster, request, redis_connect_func=mock) + _get_client(ValkeyCluster, request, valkey_connect_func=mock) assert mock.called is True def test_set_default_node_success(self, r): @@ -771,7 +771,7 @@ def test_get_node_from_key(self, r): assert replica.server_type == REPLICA assert replica in slot_nodes - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_not_require_full_coverage_cluster_down_error(self, r): """ When require_full_coverage is set to False (default client config) and not @@ -809,15 +809,15 @@ def test_timeout_error_topology_refresh_reuse_connections(self, r): r.set("key", "value") node_conn_origin = {} for n in r.get_nodes(): - node_conn_origin[n.name] = n.redis_connection - real_func = r.get_redis_connection(node).parse_response + node_conn_origin[n.name] = n.valkey_connection + real_func = r.get_valkey_connection(node).parse_response class counter: def __init__(self, val=0): self.val = int(val) count = counter(0) - with patch.object(Redis, "parse_response") as parse_response: + with patch.object(Valkey, "parse_response") as parse_response: def moved_redirect_effect(connection, *args, **options): # raise a timeout for 5 times so we'll need to reinitialize the topology @@ -830,37 +830,37 @@ def moved_redirect_effect(connection, *args, **options): assert r.get("key") == b"value" for node_name, conn in node_conn_origin.items(): if node_name == node.name: - # The old redis connection of the timed out node should have been + # The old valkey connection of the timed out node should have been # deleted and replaced - assert conn != r.get_redis_connection(node) + assert conn != r.get_valkey_connection(node) else: - # other nodes' redis connection should have been reused during the + # other nodes' valkey connection should have been reused during the # topology refresh cur_node = r.get_node(node_name=node_name) - assert conn == r.get_redis_connection(cur_node) + assert conn == r.get_valkey_connection(cur_node) def test_cluster_get_set_retry_object(self, request): retry = Retry(NoBackoff(), 2) - r = _get_client(RedisCluster, request, retry=retry) + r = _get_client(ValkeyCluster, request, retry=retry) assert r.get_retry()._retries == retry._retries assert isinstance(r.get_retry()._backoff, NoBackoff) for node in r.get_nodes(): - assert node.redis_connection.get_retry()._retries == retry._retries - assert isinstance(node.redis_connection.get_retry()._backoff, NoBackoff) + assert node.valkey_connection.get_retry()._retries == retry._retries + assert isinstance(node.valkey_connection.get_retry()._backoff, NoBackoff) rand_node = r.get_random_node() - existing_conn = rand_node.redis_connection.connection_pool.get_connection("_") + existing_conn = rand_node.valkey_connection.connection_pool.get_connection("_") # Change retry policy new_retry = Retry(ExponentialBackoff(), 3) r.set_retry(new_retry) assert r.get_retry()._retries == new_retry._retries assert isinstance(r.get_retry()._backoff, ExponentialBackoff) for node in r.get_nodes(): - assert node.redis_connection.get_retry()._retries == new_retry._retries + assert node.valkey_connection.get_retry()._retries == new_retry._retries assert isinstance( - node.redis_connection.get_retry()._backoff, ExponentialBackoff + node.valkey_connection.get_retry()._backoff, ExponentialBackoff ) assert existing_conn.retry._retries == new_retry._retries - new_conn = rand_node.redis_connection.connection_pool.get_connection("_") + new_conn = rand_node.valkey_connection.connection_pool.get_connection("_") assert new_conn.retry._retries == new_retry._retries def test_cluster_retry_object(self, r) -> None: @@ -869,16 +869,16 @@ def test_cluster_retry_object(self, r) -> None: assert isinstance(retry, Retry) assert retry._retries == 0 assert isinstance(retry._backoff, type(default_backoff())) - node1 = r.get_node("127.0.0.1", 16379).redis_connection - node2 = r.get_node("127.0.0.1", 16380).redis_connection + node1 = r.get_node("127.0.0.1", 16379).valkey_connection + node2 = r.get_node("127.0.0.1", 16380).valkey_connection assert node1.get_retry()._retries == node2.get_retry()._retries # Test custom retry retry = Retry(ExponentialBackoff(10, 5), 5) - rc_custom_retry = RedisCluster("127.0.0.1", 16379, retry=retry) + rc_custom_retry = ValkeyCluster("127.0.0.1", 16379, retry=retry) assert ( rc_custom_retry.get_node("127.0.0.1", 16379) - .redis_connection.get_retry() + .valkey_connection.get_retry() ._retries == retry._retries ) @@ -907,7 +907,7 @@ def raise_connection_error(): assert r.get_default_node() != curr_default_node def test_address_remap(self, request, master_host): - """Test that we can create a rediscluster object with + """Test that we can create a valkeycluster object with a host-port remapper and map connections through proxy objects """ @@ -935,7 +935,7 @@ def address_remap(address): try: # create cluster: r = _get_client( - RedisCluster, request, flushdb=False, address_remap=address_remap + ValkeyCluster, request, flushdb=False, address_remap=address_remap ) try: assert r.ping() is True @@ -953,9 +953,9 @@ def address_remap(address): @pytest.mark.onlycluster -class TestClusterRedisCommands: +class TestClusterValkeyCommands: """ - Tests for RedisCluster unique commands + Tests for ValkeyCluster unique commands """ def test_case_insensitive_command_names(self, r): @@ -1010,9 +1010,9 @@ def test_cluster_config_resetstat(self, r): def test_client_setname(self, r): node = r.get_random_node() - r.client_setname("redis_py_test", target_nodes=node) + r.client_setname("valkey_py_test", target_nodes=node) client_name = r.client_getname(target_nodes=node) - assert_resp_response(r, client_name, "redis_py_test", b"redis_py_test") + assert_resp_response(r, client_name, "valkey_py_test", b"valkey_py_test") def test_exists(self, r): d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"} @@ -1054,11 +1054,11 @@ def test_pubsub_channels_merge_results(self, r): b_channel = channel.encode("utf-8") channels.append(b_channel) # Assert that each node returns only the channel it subscribed to - sub_channels = node.redis_connection.pubsub_channels() + sub_channels = node.valkey_connection.pubsub_channels() if not sub_channels: # Try again after a short sleep sleep(0.3) - sub_channels = node.redis_connection.pubsub_channels() + sub_channels = node.valkey_connection.pubsub_channels() assert sub_channels == [b_channel] i += 1 # Assert that the cluster's pubsub_channels function returns ALL of @@ -1079,10 +1079,10 @@ def test_pubsub_numsub_merge_results(self, r): pubsub_nodes.append(p) p.subscribe(channel) # Assert that each node returns that only one client is subscribed - sub_chann_num = node.redis_connection.pubsub_numsub(channel) + sub_chann_num = node.valkey_connection.pubsub_numsub(channel) if sub_chann_num == [(b_channel, 0)]: sleep(0.3) - sub_chann_num = node.redis_connection.pubsub_numsub(channel) + sub_chann_num = node.valkey_connection.pubsub_numsub(channel) assert sub_chann_num == [(b_channel, 1)] # Assert that the cluster's pubsub_numsub function returns ALL clients # subscribed to this channel in the entire cluster @@ -1099,10 +1099,10 @@ def test_pubsub_numpat_merge_results(self, r): pubsub_nodes.append(p) p.psubscribe(pattern) # Assert that each node returns that only one client is subscribed - sub_num_pat = node.redis_connection.pubsub_numpat() + sub_num_pat = node.valkey_connection.pubsub_numpat() if sub_num_pat == 0: sleep(0.3) - sub_num_pat = node.redis_connection.pubsub_numpat() + sub_num_pat = node.valkey_connection.pubsub_numpat() assert sub_num_pat == 1 # Assert that the cluster's pubsub_numsub function returns ALL clients # subscribed to this channel in the entire cluster @@ -1136,13 +1136,13 @@ def test_cluster_pubsub_numsub(self, r): channels = [(b"foo", 1), (b"bar", 2), (b"baz", 3)] assert r.pubsub_numsub("foo", "bar", "baz", target_nodes="all") == channels - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_myid(self, r): node = r.get_random_node() myid = r.cluster_myid(node) assert len(myid) == 40 - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_slots(self, r): mock_all_nodes_resp(r, default_cluster_slots) cluster_slots = r.cluster_slots() @@ -1152,7 +1152,7 @@ def test_cluster_slots(self, r): assert cluster_slots.get((0, 8191)).get("primary") == ("127.0.0.1", 7000) @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_shards(self, r): cluster_shards = r.cluster_shards() assert isinstance(cluster_shards, list) @@ -1182,26 +1182,26 @@ def test_cluster_shards(self, r): assert attribute in attributes @skip_if_server_version_lt("7.2.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_myshardid(self, r): myshardid = r.cluster_myshardid() assert isinstance(myshardid, str) assert len(myshardid) > 0 - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_addslots(self, r): node = r.get_random_node() mock_node_resp(node, "OK") assert r.cluster_addslots(node, 1, 2, 3) is True @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_addslotsrange(self, r): node = r.get_random_node() mock_node_resp(node, "OK") assert r.cluster_addslotsrange(node, 1, 5) - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_countkeysinslot(self, r): node = r.nodes_manager.get_node_from_slot(1) mock_node_resp(node, 2) @@ -1211,24 +1211,24 @@ def test_cluster_count_failure_report(self, r): mock_all_nodes_resp(r, 0) assert r.cluster_count_failure_report("node_0") == 0 - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_delslots(self): cluster_slots = [ [0, 8191, ["127.0.0.1", 7000, "node_0"]], [8192, 16383, ["127.0.0.1", 7001, "node_1"]], ] - r = get_mocked_redis_client( + r = get_mocked_valkey_client( host=default_host, port=default_port, cluster_slots=cluster_slots ) mock_all_nodes_resp(r, "OK") node0 = r.get_node(default_host, 7000) node1 = r.get_node(default_host, 7001) assert r.cluster_delslots(0, 8192) == [True, True] - assert node0.redis_connection.connection.read_response.called - assert node1.redis_connection.connection.read_response.called + assert node0.valkey_connection.connection.read_response.called + assert node1.valkey_connection.connection.read_response.called @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_delslotsrange(self): cluster_slots = [ [ @@ -1242,7 +1242,7 @@ def test_cluster_delslotsrange(self): ["127.0.0.1", 7001, "node_1"], ], ] - r = get_mocked_redis_client( + r = get_mocked_valkey_client( host=default_host, port=default_port, cluster_slots=cluster_slots ) mock_all_nodes_resp(r, "OK") @@ -1250,34 +1250,34 @@ def test_cluster_delslotsrange(self): r.cluster_addslots(node, 1, 2, 3, 4, 5) assert r.cluster_delslotsrange(1, 5) - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_failover(self, r): node = r.get_random_node() mock_node_resp(node, "OK") assert r.cluster_failover(node) is True assert r.cluster_failover(node, "FORCE") is True assert r.cluster_failover(node, "TAKEOVER") is True - with pytest.raises(RedisError): + with pytest.raises(ValkeyError): r.cluster_failover(node, "FORCT") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_info(self, r): info = r.cluster_info() assert isinstance(info, dict) assert info["cluster_state"] == "ok" - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_keyslot(self, r): mock_all_nodes_resp(r, 12182) assert r.cluster_keyslot("foo") == 12182 - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_meet(self, r): node = r.get_default_node() mock_node_resp(node, "OK") assert r.cluster_meet("127.0.0.1", 6379) is True - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_nodes(self, r): response = ( "c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 " @@ -1306,7 +1306,7 @@ def test_cluster_nodes(self, r): == "c8253bae761cb1ecb2b61857d85dfe455a0fec8b" ) - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_nodes_importing_migrating(self, r): response = ( "488ead2fcce24d8c0f158f9172cb1f4a9e040fe5 127.0.0.1:16381@26381 " @@ -1343,7 +1343,7 @@ def test_cluster_nodes_importing_migrating(self, r): assert node_16381.get("slots") == [["10923", "16383"]] assert node_16381.get("migrations") == [] - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_replicate(self, r): node = r.get_random_node() all_replicas = r.get_replicas() @@ -1356,7 +1356,7 @@ def test_cluster_replicate(self, r): else: assert results is True - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_reset(self, r): mock_all_nodes_resp(r, "OK") assert r.cluster_reset() is True @@ -1365,7 +1365,7 @@ def test_cluster_reset(self, r): for res in all_results.values(): assert res is True - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_save_config(self, r): node = r.get_random_node() all_nodes = r.get_nodes() @@ -1375,7 +1375,7 @@ def test_cluster_save_config(self, r): for res in all_results.values(): assert res is True - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_get_keys_in_slot(self, r): response = ["{foo}1", "{foo}2"] node = r.nodes_manager.get_node_from_slot(12182) @@ -1383,7 +1383,7 @@ def test_cluster_get_keys_in_slot(self, r): keys = r.cluster_get_keys_in_slot(12182, 4) assert keys == response - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_set_config_epoch(self, r): mock_all_nodes_resp(r, "OK") assert r.cluster_set_config_epoch(3) is True @@ -1391,25 +1391,25 @@ def test_cluster_set_config_epoch(self, r): for res in all_results.values(): assert res is True - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_setslot(self, r): node = r.get_random_node() mock_node_resp(node, "OK") assert r.cluster_setslot(node, "node_0", 1218, "IMPORTING") is True assert r.cluster_setslot(node, "node_0", 1218, "NODE") is True assert r.cluster_setslot(node, "node_0", 1218, "MIGRATING") is True - with pytest.raises(RedisError): + with pytest.raises(ValkeyError): r.cluster_failover(node, "STABLE") - with pytest.raises(RedisError): + with pytest.raises(ValkeyError): r.cluster_failover(node, "STATE") def test_cluster_setslot_stable(self, r): node = r.nodes_manager.get_node_from_slot(12182) mock_node_resp(node, "OK") assert r.cluster_setslot_stable(12182) is True - assert node.redis_connection.connection.read_response.called + assert node.valkey_connection.connection.read_response.called - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_replicas(self, r): response = [ b"01eca22229cf3c652b6fca0d09ff6941e0d2e3 " @@ -1455,29 +1455,29 @@ def test_cluster_bumpepoch_not_implemented(self, r): with pytest.raises(NotImplementedError): r.cluster_bumpepoch() - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_readonly(self): - r = get_mocked_redis_client(host=default_host, port=default_port) + r = get_mocked_valkey_client(host=default_host, port=default_port) mock_all_nodes_resp(r, "OK") assert r.readonly() is True all_replicas_results = r.readonly(target_nodes="replicas") for res in all_replicas_results.values(): assert res is True for replica in r.get_replicas(): - assert replica.redis_connection.connection.read_response.called + assert replica.valkey_connection.connection.read_response.called - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_readwrite(self): - r = get_mocked_redis_client(host=default_host, port=default_port) + r = get_mocked_valkey_client(host=default_host, port=default_port) mock_all_nodes_resp(r, "OK") assert r.readwrite() is True all_replicas_results = r.readwrite(target_nodes="replicas") for res in all_replicas_results.values(): assert res is True for replica in r.get_replicas(): - assert replica.redis_connection.connection.read_response.called + assert replica.valkey_connection.connection.read_response.called - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_bgsave(self, r): assert r.bgsave() sleep(0.3) @@ -1562,12 +1562,12 @@ def test_memory_usage(self, r): assert isinstance(r.memory_usage("foo"), int) @skip_if_server_version_lt("4.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_memory_malloc_stats(self, r): assert r.memory_malloc_stats() @skip_if_server_version_lt("4.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_memory_stats(self, r): # put a key into the current db to make sure that "db." # has data @@ -1589,7 +1589,7 @@ def test_memory_doctor(self, r): with pytest.raises(NotImplementedError): r.memory_doctor() - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_lastsave(self, r): node = r.get_primaries()[0] assert isinstance(r.lastsave(target_nodes=node), datetime.datetime) @@ -1628,11 +1628,11 @@ def test_client_pause(self, r): node = r.get_primaries()[0] assert r.client_pause(1, target_nodes=node) assert r.client_pause(timeout=1, target_nodes=node) - with pytest.raises(RedisError): + with pytest.raises(ValkeyError): r.client_pause(timeout="not an integer", target_nodes=node) @skip_if_server_version_lt("6.2.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_client_unpause(self, r): assert r.client_unpause() @@ -1665,26 +1665,26 @@ def test_client_info(self, r): @skip_if_server_version_lt("2.6.9") def test_client_kill(self, r, r2): node = r.get_primaries()[0] - r.client_setname("redis-py-c1", target_nodes="all") - r2.client_setname("redis-py-c2", target_nodes="all") + r.client_setname("valkey-py-c1", target_nodes="all") + r2.client_setname("valkey-py-c2", target_nodes="all") clients = [ client for client in r.client_list(target_nodes=node) - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} - client_addr = clients_by_name["redis-py-c2"].get("addr") + client_addr = clients_by_name["valkey-py-c2"].get("addr") assert r.client_kill(client_addr, target_nodes=node) is True clients = [ client for client in r.client_list(target_nodes=node) - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 1 - assert clients[0].get("name") == "redis-py-c1" + assert clients[0].get("name") == "valkey-py-c1" @skip_if_server_version_lt("2.6.0") def test_cluster_bitop_not_empty_string(self, r): @@ -2275,7 +2275,7 @@ def test_geosearchstore_dist(self, r): longitude=2.191, latitude=41.433, radius=1000, - storedist=True, + stovalkeyt=True, ) # instead of save the geo score, the distance is saved. assert r.zscore("{foo}places_barcelona", "place1") == 88.05060698409301 @@ -2396,11 +2396,11 @@ def test_cluster_randomkey(self, r): assert r.randomkey(target_nodes=node) in (b"{foo}a", b"{foo}b", b"{foo}c") @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_acl_log(self, r, request): key = "{cache}:" node = r.get_node_from_key(key) - username = "redis-py-user" + username = "valkey-py-user" def teardown(): r.acl_deluser(username, target_nodes="primaries") @@ -2418,7 +2418,7 @@ def teardown(): r.acl_log_reset(target_nodes=node) user_client = _get_client( - RedisCluster, request, flushdb=False, username=username + ValkeyCluster, request, flushdb=False, username=username ) # Valid operation and key @@ -2441,7 +2441,7 @@ def teardown(): assert r.acl_log_reset(target_nodes=node) def generate_lib_code(self, lib_name): - return f"""#!js api_version=1.0 name={lib_name}\n redis.registerFunction('foo', ()=>{{return 'bar'}})""" # noqa + return f"""#!js api_version=1.0 name={lib_name}\n valkey.registerFunction('foo', ()=>{{return 'bar'}})""" # noqa def try_delete_libs(self, r, *lib_names): for lib_name in lib_names: @@ -2451,6 +2451,7 @@ def try_delete_libs(self, r, *lib_names): pass @skip_if_server_version_lt("7.1.140") + @pytest.mark.skip def test_tfunction_load_delete(self, r): r.gears_refresh_cluster() self.try_delete_libs(r, "lib1") @@ -2459,6 +2460,7 @@ def test_tfunction_load_delete(self, r): assert r.tfunction_delete("lib1") @skip_if_server_version_lt("7.1.140") + @pytest.mark.skip def test_tfunction_list(self, r): r.gears_refresh_cluster() self.try_delete_libs(r, "lib1", "lib2", "lib3") @@ -2482,6 +2484,7 @@ def test_tfunction_list(self, r): assert r.tfunction_delete("lib3") @skip_if_server_version_lt("7.1.140") + @pytest.mark.skip def test_tfcall(self, r): r.gears_refresh_cluster() self.try_delete_libs(r, "lib1") @@ -2540,8 +2543,8 @@ def test_init_slots_cache_not_all_slots_covered(self): [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]], [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]], ] - with pytest.raises(RedisClusterException) as ex: - get_mocked_redis_client( + with pytest.raises(ValkeyClusterException) as ex: + get_mocked_valkey_client( host=default_host, port=default_port, cluster_slots=cluster_slots, @@ -2563,7 +2566,7 @@ def test_init_slots_cache_not_require_full_coverage_success(self): [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]], ] - rc = get_mocked_redis_client( + rc = get_mocked_valkey_client( host=default_host, port=default_port, cluster_slots=cluster_slots, @@ -2582,11 +2585,11 @@ def test_init_slots_cache(self): [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.2", 7005]], ] - rc = get_mocked_redis_client( + rc = get_mocked_valkey_client( host=default_host, port=default_port, cluster_slots=good_slots_resp ) n_manager = rc.nodes_manager - assert len(n_manager.slots_cache) == REDIS_CLUSTER_HASH_SLOTS + assert len(n_manager.slots_cache) == VALKEY_CLUSTER_HASH_SLOTS for slot_info in good_slots_resp: all_hosts = ["127.0.0.1", "127.0.0.2"] all_ports = [7000, 7001, 7002, 7003, 7004, 7005] @@ -2618,7 +2621,7 @@ def test_init_promote_server_type_for_node_in_cache(self): cluster_slots_after_promotion, ] - with patch.object(Redis, "execute_command") as execute_command_mock: + with patch.object(Valkey, "execute_command") as execute_command_mock: def execute_command(*_args, **_kwargs): if _args[0] == "CLUSTER SLOTS": @@ -2654,11 +2657,11 @@ def execute_command(*_args, **_kwargs): def test_init_slots_cache_cluster_mode_disabled(self): """ - Test that creating a RedisCluster failes if one of the startup nodes + Test that creating a ValkeyCluster failes if one of the startup nodes has cluster mode disabled """ - with pytest.raises(RedisClusterException) as e: - get_mocked_redis_client( + with pytest.raises(ValkeyClusterException) as e: + get_mocked_valkey_client( cluster_slots_raise_error=True, host=default_host, port=default_port, @@ -2671,7 +2674,7 @@ def test_empty_startup_nodes(self): It should not be possible to create a node manager with no nodes specified """ - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): NodesManager([]) def test_wrong_startup_nodes_type(self): @@ -2679,7 +2682,7 @@ def test_wrong_startup_nodes_type(self): If something other then a list type itteratable is provided it should fail """ - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): NodesManager({}) def test_init_slots_cache_slots_collision(self, request): @@ -2688,12 +2691,12 @@ def test_init_slots_cache_slots_collision(self, request): raise an error. In this test both nodes will say that the first slots block should be bound to different servers. """ - with patch.object(NodesManager, "create_redis_node") as create_redis_node: + with patch.object(NodesManager, "create_valkey_node") as create_valkey_node: - def create_mocked_redis_node(host, port, **kwargs): + def create_mocked_valkey_node(host, port, **kwargs): """ Helper function to return custom slots cache data from - different redis nodes + different valkey nodes """ if port == 7000: result = [ @@ -2709,7 +2712,7 @@ def create_mocked_redis_node(host, port, **kwargs): else: result = [] - r_node = Redis(host=host, port=port) + r_node = Valkey(host=host, port=port) orig_execute_command = r_node.execute_command @@ -2726,12 +2729,12 @@ def execute_command(*args, **kwargs): r_node.execute_command = execute_command return r_node - create_redis_node.side_effect = create_mocked_redis_node + create_valkey_node.side_effect = create_mocked_valkey_node - with pytest.raises(RedisClusterException) as ex: + with pytest.raises(ValkeyClusterException) as ex: node_1 = ClusterNode("127.0.0.1", 7000) node_2 = ClusterNode("127.0.0.1", 7001) - RedisCluster(startup_nodes=[node_1, node_2]) + ValkeyCluster(startup_nodes=[node_1, node_2]) assert str(ex.value).startswith( "startup_nodes could not agree on a valid slots cache" ), str(ex.value) @@ -2743,7 +2746,7 @@ def test_cluster_one_instance(self): """ node = ClusterNode(default_host, default_port) cluster_slots = [[0, 16383, ["", default_port]]] - rc = get_mocked_redis_client(startup_nodes=[node], cluster_slots=cluster_slots) + rc = get_mocked_valkey_client(startup_nodes=[node], cluster_slots=cluster_slots) n = rc.nodes_manager assert len(n.nodes_cache) == 1 @@ -2751,8 +2754,8 @@ def test_cluster_one_instance(self): assert n_node is not None assert n_node == node assert n_node.server_type == PRIMARY - assert len(n.slots_cache) == REDIS_CLUSTER_HASH_SLOTS - for i in range(0, REDIS_CLUSTER_HASH_SLOTS): + assert len(n.slots_cache) == VALKEY_CLUSTER_HASH_SLOTS + for i in range(0, VALKEY_CLUSTER_HASH_SLOTS): assert n.slots_cache[i] == [n_node] def test_init_with_down_node(self): @@ -2760,13 +2763,13 @@ def test_init_with_down_node(self): If I can't connect to one of the nodes, everything should still work. But if I can't connect to any of the nodes, exception should be thrown. """ - with patch.object(NodesManager, "create_redis_node") as create_redis_node: + with patch.object(NodesManager, "create_valkey_node") as create_valkey_node: - def create_mocked_redis_node(host, port, **kwargs): + def create_mocked_valkey_node(host, port, **kwargs): if port == 7000: raise ConnectionError("mock connection error for 7000") - r_node = Redis(host=host, port=port, decode_responses=True) + r_node = Valkey(host=host, port=port, decode_responses=True) def execute_command(*args, **kwargs): if args[0] == "CLUSTER SLOTS": @@ -2783,16 +2786,16 @@ def execute_command(*args, **kwargs): return r_node - create_redis_node.side_effect = create_mocked_redis_node + create_valkey_node.side_effect = create_mocked_valkey_node node_1 = ClusterNode("127.0.0.1", 7000) node_2 = ClusterNode("127.0.0.1", 7001) # If all startup nodes fail to connect, connection error should be # thrown - with pytest.raises(RedisClusterException) as e: - RedisCluster(startup_nodes=[node_1]) - assert "Redis Cluster cannot be connected" in str(e.value) + with pytest.raises(ValkeyClusterException) as e: + ValkeyCluster(startup_nodes=[node_1]) + assert "Valkey Cluster cannot be connected" in str(e.value) with patch.object( CommandsParser, "initialize", autospec=True @@ -2813,13 +2816,13 @@ def cmd_init_mock(self, r): cmd_parser_initialize.side_effect = cmd_init_mock # When at least one startup node is reachable, the cluster # initialization should succeeds - rc = RedisCluster(startup_nodes=[node_1, node_2]) + rc = ValkeyCluster(startup_nodes=[node_1, node_2]) assert rc.get_node(host=default_host, port=7001) is not None assert rc.get_node(host=default_host, port=7002) is not None @pytest.mark.parametrize("dynamic_startup_nodes", [True, False]) def test_init_slots_dynamic_startup_nodes(self, dynamic_startup_nodes): - rc = get_mocked_redis_client( + rc = get_mocked_valkey_client( host="my@DNS.com", port=7000, cluster_slots=default_cluster_slots, @@ -2842,28 +2845,28 @@ def test_init_slots_dynamic_startup_nodes(self, dynamic_startup_nodes): "connection_pool_class", [ConnectionPool, BlockingConnectionPool] ) def test_connection_pool_class(self, connection_pool_class): - rc = get_mocked_redis_client( - url="redis://my@DNS.com:7000", + rc = get_mocked_valkey_client( + url="valkey://my@DNS.com:7000", cluster_slots=default_cluster_slots, connection_pool_class=connection_pool_class, ) for node in rc.nodes_manager.nodes_cache.values(): assert isinstance( - node.redis_connection.connection_pool, connection_pool_class + node.valkey_connection.connection_pool, connection_pool_class ) @pytest.mark.parametrize("queue_class", [Queue, LifoQueue]) def test_allow_custom_queue_class(self, queue_class): - rc = get_mocked_redis_client( - url="redis://my@DNS.com:7000", + rc = get_mocked_valkey_client( + url="valkey://my@DNS.com:7000", cluster_slots=default_cluster_slots, connection_pool_class=BlockingConnectionPool, queue_class=queue_class, ) for node in rc.nodes_manager.nodes_cache.values(): - assert node.redis_connection.connection_pool.queue_class == queue_class + assert node.valkey_connection.connection_pool.queue_class == queue_class @pytest.mark.onlycluster @@ -2904,19 +2907,19 @@ def test_init_pubusub_without_specifying_node(self, r): def test_init_pubsub_with_a_non_existent_node(self, r): """ Test creation of pubsub instance with node that doesn't exists in the - cluster. RedisClusterException should be raised. + cluster. ValkeyClusterException should be raised. """ node = ClusterNode("1.1.1.1", 1111) - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): r.pubsub(node) def test_init_pubsub_with_a_non_existent_host_port(self, r): """ Test creation of pubsub instance with host and port that don't belong to a node in the cluster. - RedisClusterException should be raised. + ValkeyClusterException should be raised. """ - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): r.pubsub(host="1.1.1.1", port=1111) def test_init_pubsub_host_or_port(self, r): @@ -2930,14 +2933,14 @@ def test_init_pubsub_host_or_port(self, r): with pytest.raises(DataError): r.pubsub(port=16379) - def test_get_redis_connection(self, r): + def test_get_valkey_connection(self, r): """ - Test that get_redis_connection() returns the redis connection of the + Test that get_valkey_connection() returns the valkey connection of the set pubsub node """ node = r.get_default_node() p = r.pubsub(node=node) - assert p.get_redis_connection() == node.redis_connection + assert p.get_valkey_connection() == node.valkey_connection @pytest.mark.onlycluster @@ -2953,28 +2956,28 @@ def test_blocked_methods(self, r): They maybe implemented in the future. """ pipe = r.pipeline() - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.multi() - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.immediate_execute_command() - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe._execute_transaction(None, None, None) - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.load_scripts() - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.watch() - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.unwatch() - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.script_load_for_pipeline(None) - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.eval() def test_blocked_arguments(self, r): @@ -2982,7 +2985,7 @@ def test_blocked_arguments(self, r): Currently some arguments is blocked when using in cluster mode. They maybe implemented in the future. """ - with pytest.raises(RedisClusterException) as ex: + with pytest.raises(ValkeyClusterException) as ex: r.pipeline(transaction=True) assert ( @@ -2990,16 +2993,16 @@ def test_blocked_arguments(self, r): is True ) - with pytest.raises(RedisClusterException) as ex: + with pytest.raises(ValkeyClusterException) as ex: r.pipeline(shard_hint=True) assert ( str(ex.value).startswith("shard_hint is deprecated in cluster mode") is True ) - def test_redis_cluster_pipeline(self, r): + def test_valkey_cluster_pipeline(self, r): """ - Test that we can use a pipeline with the RedisCluster class + Test that we can use a pipeline with the ValkeyCluster class """ with r.pipeline() as pipe: pipe.set("foo", "bar") @@ -3011,7 +3014,7 @@ def test_mget_disabled(self, r): Test that mget is disabled for ClusterPipeline """ with r.pipeline() as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.mget(["a"]) def test_mset_disabled(self, r): @@ -3019,7 +3022,7 @@ def test_mset_disabled(self, r): Test that mset is disabled for ClusterPipeline """ with r.pipeline() as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.mset({"a": 1, "b": 2}) def test_rename_disabled(self, r): @@ -3027,7 +3030,7 @@ def test_rename_disabled(self, r): Test that rename is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.rename("a", "b") def test_renamenx_disabled(self, r): @@ -3035,7 +3038,7 @@ def test_renamenx_disabled(self, r): Test that renamenx is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.renamenx("a", "b") def test_delete_single(self, r): @@ -3054,7 +3057,7 @@ def test_multi_delete_unsupported(self, r): with r.pipeline(transaction=False) as pipe: r["a"] = 1 r["b"] = 2 - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.delete("a", "b") def test_unlink_single(self, r): @@ -3073,7 +3076,7 @@ def test_multi_unlink_unsupported(self, r): with r.pipeline(transaction=False) as pipe: r["a"] = 1 r["b"] = 2 - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.unlink("a", "b") def test_brpoplpush_disabled(self, r): @@ -3081,7 +3084,7 @@ def test_brpoplpush_disabled(self, r): Test that brpoplpush is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.brpoplpush() def test_rpoplpush_disabled(self, r): @@ -3089,7 +3092,7 @@ def test_rpoplpush_disabled(self, r): Test that rpoplpush is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.rpoplpush() def test_sort_disabled(self, r): @@ -3097,7 +3100,7 @@ def test_sort_disabled(self, r): Test that sort is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.sort() def test_sdiff_disabled(self, r): @@ -3105,7 +3108,7 @@ def test_sdiff_disabled(self, r): Test that sdiff is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.sdiff() def test_sdiffstore_disabled(self, r): @@ -3113,7 +3116,7 @@ def test_sdiffstore_disabled(self, r): Test that sdiffstore is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.sdiffstore() def test_sinter_disabled(self, r): @@ -3121,7 +3124,7 @@ def test_sinter_disabled(self, r): Test that sinter is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.sinter() def test_sinterstore_disabled(self, r): @@ -3129,7 +3132,7 @@ def test_sinterstore_disabled(self, r): Test that sinterstore is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.sinterstore() def test_smove_disabled(self, r): @@ -3137,7 +3140,7 @@ def test_smove_disabled(self, r): Test that move is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.smove() def test_sunion_disabled(self, r): @@ -3145,7 +3148,7 @@ def test_sunion_disabled(self, r): Test that sunion is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.sunion() def test_sunionstore_disabled(self, r): @@ -3153,7 +3156,7 @@ def test_sunionstore_disabled(self, r): Test that sunionstore is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.sunionstore() def test_spfmerge_disabled(self, r): @@ -3161,7 +3164,7 @@ def test_spfmerge_disabled(self, r): Test that spfmerge is disabled for ClusterPipeline """ with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): + with pytest.raises(ValkeyClusterException): pipe.pfmerge() def test_multi_key_operation_with_a_single_slot(self, r): @@ -3212,7 +3215,7 @@ def raise_connection_error(): with r.pipeline() as pipe: mock_node_resp_func(node, raise_connection_error) res = pipe.get(key).get(key).execute(raise_on_error=False) - assert node.redis_connection.connection.read_response.called + assert node.valkey_connection.connection.read_response.called assert isinstance(res[0], ConnectionError) def test_connection_error_raised(self, r): @@ -3255,8 +3258,8 @@ def raise_ask_error(): mock_node_resp_func(first_node, raise_ask_error) mock_node_resp(ask_node, "MOCK_OK") res = pipe.get(key).execute() - assert first_node.redis_connection.connection.read_response.called - assert ask_node.redis_connection.connection.read_response.called + assert first_node.valkey_connection.connection.read_response.called + assert ask_node.valkey_connection.connection.read_response.called assert res == ["MOCK_OK"] def test_return_previously_acquired_connections(self, r): @@ -3264,8 +3267,8 @@ def test_return_previously_acquired_connections(self, r): # from different nodes assert r.keyslot("a") != r.keyslot("b") - orig_func = redis.cluster.get_connection - with patch("redis.cluster.get_connection") as get_connection: + orig_func = valkey.cluster.get_connection + with patch("valkey.cluster.get_connection") as get_connection: def raise_error(target_node, *args, **kwargs): if get_connection.call_count == 2: @@ -3280,7 +3283,7 @@ def raise_error(target_node, *args, **kwargs): # 4 = 2 get_connections per execution * 2 executions assert get_connection.call_count == 4 for cluster_node in r.nodes_manager.nodes_cache.values(): - connection_pool = cluster_node.redis_connection.connection_pool + connection_pool = cluster_node.valkey_connection.connection_pool num_of_conns = len(connection_pool._available_connections) assert num_of_conns == connection_pool._created_connections @@ -3346,7 +3349,7 @@ def raise_moved_error(): # occurred. If MovedError occurs, we should see the # reinitialize_counter increase. assert readwrite_pipe.reinitialize_counter == 1 - conn = replica.redis_connection.connection + conn = replica.valkey_connection.connection assert conn.read_response.called is True def test_readonly_pipeline_from_readonly_client(self, request): @@ -3355,7 +3358,7 @@ def test_readonly_pipeline_from_readonly_client(self, request): has it enabled """ # Create a cluster with reading from replications - ro = _get_client(RedisCluster, request, read_from_replicas=True) + ro = _get_client(ValkeyCluster, request, read_from_replicas=True) key = "bar" ro.set(key, "foo") import time @@ -3370,7 +3373,7 @@ def test_readonly_pipeline_from_readonly_client(self, request): executed_on_replica = False for node in slot_nodes: if node.server_type == REPLICA: - conn = node.redis_connection.connection + conn = node.valkey_connection.connection executed_on_replica = conn.read_response.called if executed_on_replica: break diff --git a/tests/test_command_parser.py b/tests/test_command_parser.py index e3b44a14..dc125199 100644 --- a/tests/test_command_parser.py +++ b/tests/test_command_parser.py @@ -1,10 +1,10 @@ import pytest -from redis._parsers import CommandsParser +from valkey._parsers import CommandsParser from .conftest import ( assert_resp_response, - skip_if_redis_enterprise, skip_if_server_version_lt, + skip_if_valkey_enterprise, ) @@ -24,7 +24,7 @@ def test_get_keys_predetermined_key_location(self, r): assert commands_parser.get_keys(r, *args3) == ["foo", "bar", "foobar"] @pytest.mark.filterwarnings("ignore:ResponseError") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_get_moveable_keys(self, r): commands_parser = CommandsParser(r) args1 = [ diff --git a/tests/test_commands.py b/tests/test_commands.py index cd392552..c6a50634 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -9,24 +9,24 @@ from unittest.mock import patch import pytest -import redis -from redis import exceptions -from redis._parsers.helpers import ( - _RedisCallbacks, - _RedisCallbacksRESP2, - _RedisCallbacksRESP3, +import valkey +from valkey import exceptions +from valkey._parsers.helpers import ( + _ValkeyCallbacks, + _ValkeyCallbacksRESP2, + _ValkeyCallbacksRESP3, parse_info, ) -from redis.client import EMPTY_RESPONSE, NEVER_DECODE +from valkey.client import EMPTY_RESPONSE, NEVER_DECODE from .conftest import ( _get_client, assert_resp_response, assert_resp_response_in, is_resp2_connection, - skip_if_redis_enterprise, skip_if_server_version_gte, skip_if_server_version_lt, + skip_if_valkey_enterprise, skip_unless_arch_bits, ) @@ -47,7 +47,7 @@ def cleanup(): r.config_set("slowlog-max-len", 128) -def redis_server_time(client): +def valkey_server_time(client): seconds, milliseconds = client.time() timestamp = float(f"{seconds}.{milliseconds}") return datetime.datetime.fromtimestamp(timestamp) @@ -66,13 +66,13 @@ class TestResponseCallbacks: "Tests for the response callback system" def test_response_callbacks(self, r): - callbacks = _RedisCallbacks + callbacks = _ValkeyCallbacks if is_resp2_connection(r): - callbacks.update(_RedisCallbacksRESP2) + callbacks.update(_ValkeyCallbacksRESP2) else: - callbacks.update(_RedisCallbacksRESP3) + callbacks.update(_ValkeyCallbacksRESP3) assert r.response_callbacks == callbacks - assert id(r.response_callbacks) != id(_RedisCallbacks) + assert id(r.response_callbacks) != id(_ValkeyCallbacks) r.set_response_callback("GET", lambda x: "static") r["a"] = "foo" assert r["a"] == "static" @@ -81,9 +81,9 @@ def test_case_insensitive_command_names(self, r): assert r.response_callbacks["ping"] == r.response_callbacks["PING"] -class TestRedisCommands: +class TestValkeyCommands: @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_auth(self, r, request): # sending an AUTH command before setting a user/password on the # server should return an AuthenticationError @@ -102,7 +102,7 @@ def test_auth(self, r, request): assert r.auth(temp_pass) is True # test for other users - username = "redis-py-auth" + username = "valkey-py-auth" def teardown(): try: @@ -114,8 +114,8 @@ def teardown(): # authentication in the connection's `on_connect` method r.connection.password = temp_pass except AttributeError: - # connection field is not set in Redis Cluster, but that's ok - # because the problem discussed above does not apply to Redis Cluster + # connection field is not set in Valkey Cluster, but that's ok + # because the problem discussed above does not apply to Valkey Cluster pass r.auth(temp_pass) r.config_set("requirepass", "") @@ -134,7 +134,7 @@ def teardown(): def test_command_on_invalid_key_type(self, r): r.lpush("a", "1") - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): r["a"] # SERVER INFORMATION @@ -151,9 +151,9 @@ def test_acl_cat_with_category(self, r): assert "get" in commands or b"get" in commands @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_acl_dryrun(self, r, request): - username = "redis-py-user" + username = "valkey-py-user" def teardown(): r.acl_deluser(username) @@ -166,9 +166,9 @@ def teardown(): assert no_permissions_message in r.acl_dryrun(username, "get", "key") @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_acl_deluser(self, r, request): - username = "redis-py-user" + username = "valkey-py-user" def teardown(): r.acl_deluser(username) @@ -191,7 +191,7 @@ def teardown(): assert r.acl_getuser(users[4]) is None @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_acl_genpass(self, r): password = r.acl_genpass() assert isinstance(password, (str, bytes)) @@ -206,10 +206,10 @@ def test_acl_genpass(self, r): assert len(password) == 139 @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_acl_getuser_setuser(self, r, request): r.flushall() - username = "redis-py-user" + username = "valkey-py-user" def teardown(): r.acl_deluser(username) @@ -346,9 +346,9 @@ def test_acl_help(self, r): assert len(res) != 0 @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_acl_list(self, r, request): - username = "redis-py-user" + username = "valkey-py-user" start = r.acl_list() def teardown(): @@ -361,10 +361,10 @@ def teardown(): assert len(users) == len(start) + 1 @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() @pytest.mark.onlynoncluster def test_acl_log(self, r, request): - username = "redis-py-user" + username = "valkey-py-user" def teardown(): r.acl_deluser(username) @@ -381,7 +381,7 @@ def teardown(): r.acl_log_reset() user_client = _get_client( - redis.Redis, request, flushdb=False, username=username + valkey.Valkey, request, flushdb=False, username=username ) # Valid operation and key @@ -409,9 +409,9 @@ def teardown(): ) @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_acl_setuser_categories_without_prefix_fails(self, r, request): - username = "redis-py-user" + username = "valkey-py-user" def teardown(): r.acl_deluser(username) @@ -422,9 +422,9 @@ def teardown(): r.acl_setuser(username, categories=["list"]) @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_acl_setuser_commands_without_prefix_fails(self, r, request): - username = "redis-py-user" + username = "valkey-py-user" def teardown(): r.acl_deluser(username) @@ -435,9 +435,9 @@ def teardown(): r.acl_setuser(username, commands=["get"]) @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_acl_setuser_add_passwords_and_nopass_fails(self, r, request): - username = "redis-py-user" + username = "valkey-py-user" def teardown(): r.acl_deluser(username) @@ -474,13 +474,13 @@ def test_client_info(self, r): @pytest.mark.onlynoncluster @skip_if_server_version_lt("5.0.0") def test_client_list_types_not_replica(self, r): - with pytest.raises(exceptions.RedisError): + with pytest.raises(exceptions.ValkeyError): r.client_list(_type="not a client type") for client_type in ["normal", "master", "pubsub"]: clients = r.client_list(_type=client_type) assert isinstance(clients, list) - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_client_list_replica(self, r): clients = r.client_list(_type="replica") assert isinstance(clients, list) @@ -494,9 +494,9 @@ def test_client_list_client_id(self, r, request): assert "addr" in clients[0] # testing multiple client ids - _get_client(redis.Redis, request, flushdb=False) - _get_client(redis.Redis, request, flushdb=False) - _get_client(redis.Redis, request, flushdb=False) + _get_client(valkey.Valkey, request, flushdb=False) + _get_client(valkey.Valkey, request, flushdb=False) + _get_client(valkey.Valkey, request, flushdb=False) clients_listed = r.client_list(client_id=clients[:-1]) assert len(clients_listed) > 1 @@ -507,7 +507,7 @@ def test_client_id(self, r): @pytest.mark.onlynoncluster @skip_if_server_version_lt("6.2.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_client_trackinginfo(self, r): res = r.client_trackinginfo() assert len(res) > 2 @@ -515,7 +515,7 @@ def test_client_trackinginfo(self, r): @pytest.mark.onlynoncluster @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_client_tracking(self, r, r2): # simple case assert r.client_tracking_on() @@ -551,25 +551,25 @@ def test_client_getname(self, r): @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.6.9") def test_client_setname(self, r): - assert r.client_setname("redis_py_test") - assert_resp_response(r, r.client_getname(), "redis_py_test", b"redis_py_test") + assert r.client_setname("valkey_py_test") + assert_resp_response(r, r.client_getname(), "valkey_py_test", b"valkey_py_test") @skip_if_server_version_lt("7.2.0") - def test_client_setinfo(self, r: redis.Redis): + def test_client_setinfo(self, r: valkey.Valkey): r.ping() info = r.client_info() - assert info["lib-name"] == "redis-py" - assert info["lib-ver"] == redis.__version__ + assert info["lib-name"] == "valkey-py" + assert info["lib-ver"] == valkey.__version__ assert r.client_setinfo("lib-name", "test") assert r.client_setinfo("lib-ver", "123") info = r.client_info() assert info["lib-name"] == "test" assert info["lib-ver"] == "123" - r2 = redis.Redis(lib_name="test2", lib_version="1234") + r2 = valkey.Valkey(lib_name="test2", lib_version="1234") info = r2.client_info() assert info["lib-name"] == "test2" assert info["lib-ver"] == "1234" - r3 = redis.Redis(lib_name=None, lib_version=None) + r3 = valkey.Valkey(lib_name=None, lib_version=None) info = r3.client_info() assert info["lib-name"] == "" assert info["lib-ver"] == "" @@ -577,27 +577,27 @@ def test_client_setinfo(self, r: redis.Redis): @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.6.9") def test_client_kill(self, r, r2): - r.client_setname("redis-py-c1") - r2.client_setname("redis-py-c2") + r.client_setname("valkey-py-c1") + r2.client_setname("valkey-py-c2") clients = [ client for client in r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} - client_addr = clients_by_name["redis-py-c2"].get("addr") + client_addr = clients_by_name["valkey-py-c2"].get("addr") assert r.client_kill(client_addr) is True clients = [ client for client in r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 1 - assert clients[0].get("name") == "redis-py-c1" + assert clients[0].get("name") == "valkey-py-c1" @skip_if_server_version_lt("2.8.12") def test_client_kill_filter_invalid_params(self, r): @@ -616,80 +616,80 @@ def test_client_kill_filter_invalid_params(self, r): @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.8.12") def test_client_kill_filter_by_id(self, r, r2): - r.client_setname("redis-py-c1") - r2.client_setname("redis-py-c2") + r.client_setname("valkey-py-c1") + r2.client_setname("valkey-py-c2") clients = [ client for client in r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} - client_2_id = clients_by_name["redis-py-c2"].get("id") + client_2_id = clients_by_name["valkey-py-c2"].get("id") resp = r.client_kill_filter(_id=client_2_id) assert resp == 1 clients = [ client for client in r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 1 - assert clients[0].get("name") == "redis-py-c1" + assert clients[0].get("name") == "valkey-py-c1" @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.8.12") def test_client_kill_filter_by_addr(self, r, r2): - r.client_setname("redis-py-c1") - r2.client_setname("redis-py-c2") + r.client_setname("valkey-py-c1") + r2.client_setname("valkey-py-c2") clients = [ client for client in r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} - client_2_addr = clients_by_name["redis-py-c2"].get("addr") + client_2_addr = clients_by_name["valkey-py-c2"].get("addr") resp = r.client_kill_filter(addr=client_2_addr) assert resp == 1 clients = [ client for client in r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 1 - assert clients[0].get("name") == "redis-py-c1" + assert clients[0].get("name") == "valkey-py-c1" @skip_if_server_version_lt("2.6.9") def test_client_list_after_client_setname(self, r): - r.client_setname("redis_py_test") + r.client_setname("valkey_py_test") clients = r.client_list() # we don't know which client ours will be - assert "redis_py_test" in [c["name"] for c in clients] + assert "valkey_py_test" in [c["name"] for c in clients] @skip_if_server_version_lt("6.2.0") def test_client_kill_filter_by_laddr(self, r, r2): - r.client_setname("redis-py-c1") - r2.client_setname("redis-py-c2") + r.client_setname("valkey-py-c1") + r2.client_setname("valkey-py-c2") clients = [ client for client in r.client_list() - if client.get("name") in ["redis-py-c1", "redis-py-c2"] + if client.get("name") in ["valkey-py-c1", "valkey-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} - client_2_addr = clients_by_name["redis-py-c2"].get("laddr") + client_2_addr = clients_by_name["valkey-py-c2"].get("laddr") assert r.client_kill_filter(laddr=client_2_addr) @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_client_kill_filter_by_user(self, r, request): killuser = "user_to_kill" r.acl_setuser( @@ -700,7 +700,7 @@ def test_client_kill_filter_by_user(self, r, request): keys=["cache:*"], nopass=True, ) - _get_client(redis.Redis, request, flushdb=False, username=killuser) + _get_client(valkey.Valkey, request, flushdb=False, username=killuser) r.client_kill_filter(user=killuser) clients = r.client_list() for c in clients: @@ -708,9 +708,9 @@ def test_client_kill_filter_by_user(self, r, request): r.acl_deluser(killuser) @skip_if_server_version_lt("7.4.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_client_kill_filter_by_maxage(self, r, request): - _get_client(redis.Redis, request, flushdb=False) + _get_client(valkey.Valkey, request, flushdb=False) time.sleep(4) assert len(r.client_list()) == 2 r.client_kill_filter(maxage=2) @@ -718,15 +718,15 @@ def test_client_kill_filter_by_maxage(self, r, request): @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.9.50") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_client_pause(self, r): assert r.client_pause(1) assert r.client_pause(timeout=1) - with pytest.raises(exceptions.RedisError): + with pytest.raises(exceptions.ValkeyError): r.client_pause(timeout="not an integer") @skip_if_server_version_lt("6.2.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_client_pause_all(self, r, r2): assert r.client_pause(1, all=False) assert r2.set("foo", "bar") @@ -735,7 +735,7 @@ def test_client_pause_all(self, r, r2): @pytest.mark.onlynoncluster @skip_if_server_version_lt("6.2.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_client_unpause(self, r): assert r.client_unpause() == b"OK" @@ -758,7 +758,7 @@ def test_client_no_touch(self, r): @skip_if_server_version_lt("3.2.0") def test_client_reply(self, r, r_timeout): assert r_timeout.client_reply("ON") == b"OK" - with pytest.raises(exceptions.RedisError): + with pytest.raises(exceptions.ValkeyError): r_timeout.client_reply("OFF") r_timeout.client_reply("SKIP") @@ -770,7 +770,7 @@ def test_client_reply(self, r, r_timeout): @pytest.mark.onlynoncluster @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_client_getredir(self, r): assert isinstance(r.client_getredir(), int) assert r.client_getredir() == -1 @@ -787,13 +787,13 @@ def test_config_get(self, r): # assert data['maxmemory'].isdigit() @skip_if_server_version_lt("7.0.0") - def test_config_get_multi_params(self, r: redis.Redis): + def test_config_get_multi_params(self, r: valkey.Valkey): res = r.config_get("*max-*-entries*", "maxmemory") assert "maxmemory" in res assert "hash-max-listpack-entries" in res @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_config_resetstat(self, r): r.ping() prior_commands_processed = int(r.info()["total_commands_processed"]) @@ -802,7 +802,7 @@ def test_config_resetstat(self, r): reset_commands_processed = int(r.info()["total_commands_processed"]) assert reset_commands_processed < prior_commands_processed - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_config_set(self, r): r.config_set("timeout", 70) assert r.config_get()["timeout"] == "70" @@ -810,8 +810,8 @@ def test_config_set(self, r): assert r.config_get()["timeout"] == "0" @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() - def test_config_set_multi_params(self, r: redis.Redis): + @skip_if_valkey_enterprise() + def test_config_set_multi_params(self, r: valkey.Valkey): r.config_set("timeout", 70, "maxmemory", 100) assert r.config_get()["timeout"] == "70" assert r.config_get()["maxmemory"] == "100" @@ -820,7 +820,7 @@ def test_config_set_multi_params(self, r: redis.Redis): assert r.config_get()["maxmemory"] == "0" @skip_if_server_version_lt("6.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_failover(self, r): with pytest.raises(NotImplementedError): r.failover() @@ -842,18 +842,18 @@ def test_info(self, r): info = r.info() assert isinstance(info, dict) assert "arch_bits" in info.keys() - assert "redis_version" in info.keys() + assert "valkey_version" in info.keys() @pytest.mark.onlynoncluster @skip_if_server_version_lt("7.0.0") def test_info_multi_sections(self, r): res = r.info("clients", "server") assert isinstance(res, dict) - assert "redis_version" in res + assert "valkey_version" in res assert "connected_clients" in res @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_lastsave(self, r): assert isinstance(r.lastsave(), datetime.datetime) @@ -868,7 +868,7 @@ def test_lolwut(self, r): @pytest.mark.onlynoncluster @skip_if_server_version_lt("6.2.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_reset(self, r): assert_resp_response(r, r.reset(), "RESET", b"RESET") @@ -887,7 +887,7 @@ def test_quit(self, r): assert r.quit() @skip_if_server_version_lt("2.8.12") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() @pytest.mark.onlynoncluster def test_role(self, r): assert r.role()[0] == b"master" @@ -895,7 +895,7 @@ def test_role(self, r): assert isinstance(r.role()[2], list) @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_select(self, r): assert r.select(5) assert r.select(2) @@ -974,18 +974,18 @@ def test_time(self, r): assert isinstance(t[0], int) assert isinstance(t[1], int) - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_bgsave(self, r): assert r.bgsave() time.sleep(0.3) assert r.bgsave(True) - def test_never_decode_option(self, r: redis.Redis): + def test_never_decode_option(self, r: valkey.Valkey): opts = {NEVER_DECODE: []} r.delete("a") assert r.execute_command("EXISTS", "a", **opts) == 0 - def test_empty_response_option(self, r: redis.Redis): + def test_empty_response_option(self, r: valkey.Valkey): opts = {EMPTY_RESPONSE: []} r.delete("a") assert r.execute_command("EXISTS", "a", **opts) == 0 @@ -1022,7 +1022,7 @@ def test_bitcount_mode(self, r): assert r.bitcount("mykey") == 26 assert r.bitcount("mykey", 1, 1, "byte") == 6 assert r.bitcount("mykey", 5, 30, "bit") == 17 - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): assert r.bitcount("mykey", 5, 30, "but") @pytest.mark.onlynoncluster @@ -1092,9 +1092,9 @@ def test_bitpos(self, r): def test_bitpos_wrong_arguments(self, r): key = "key:bitpos:wrong:args" r.set(key, b"\xff\xf0\x00") - with pytest.raises(exceptions.RedisError): + with pytest.raises(exceptions.ValkeyError): r.bitpos(key, 0, end=1) == 12 - with pytest.raises(exceptions.RedisError): + with pytest.raises(exceptions.ValkeyError): r.bitpos(key, 7) == 12 @skip_if_server_version_lt("7.0.0") @@ -1103,7 +1103,7 @@ def test_bitpos_mode(self, r): assert r.bitpos("mykey", 1, 0) == 8 assert r.bitpos("mykey", 1, 2, -1, "byte") == 16 assert r.bitpos("mykey", 0, 7, 15, "bit") == 7 - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): r.bitpos("mykey", 1, 7, 15, "bite") @pytest.mark.onlynoncluster @@ -1126,8 +1126,8 @@ def test_copy_and_replace(self, r): @pytest.mark.onlynoncluster @skip_if_server_version_lt("6.2.0") def test_copy_to_another_database(self, request): - r0 = _get_client(redis.Redis, request, db=0) - r1 = _get_client(redis.Redis, request, db=1) + r0 = _get_client(valkey.Valkey, request, db=0) + r1 = _get_client(valkey.Valkey, request, db=1) r0.set("a", "foo") assert r0.copy("a", "b", destination_db=1) == 1 assert r1.get("b") == b"foo" @@ -1189,7 +1189,7 @@ def test_lcs(self, r): [b"matches", [[[4, 7], [5, 8]]], b"len", 6], {b"matches": [[[4, 7], [5, 8]]], b"len": 6}, ) - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): assert r.lcs("foo", "bar", len=True, idx=True) @skip_if_server_version_lt("2.6.0") @@ -1204,7 +1204,7 @@ def test_dump_and_restore(self, r): def test_dump_and_restore_and_replace(self, r): r["a"] = "bar" dumped = r.dump("a") - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): r.restore("a", 0, dumped) r.restore("a", 0, dumped, replace=True) @@ -1216,7 +1216,7 @@ def test_dump_and_restore_absttl(self, r): dumped = r.dump("a") del r["a"] ttl = int( - (redis_server_time(r) + datetime.timedelta(minutes=1)).timestamp() * 1000 + (valkey_server_time(r) + datetime.timedelta(minutes=1)).timestamp() * 1000 ) r.restore("a", ttl, dumped, absttl=True) assert r["a"] == b"foo" @@ -1268,17 +1268,17 @@ def test_expire_option_lt(self, r): assert r.expire("key", 150, lt=True) == 0 def test_expireat_datetime(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) r["a"] = "foo" assert r.expireat("a", expire_at) is True assert 0 < r.ttl("a") <= 61 def test_expireat_no_key(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) assert r.expireat("a", expire_at) is False def test_expireat_unixtime(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) r["a"] = "foo" expire_at_seconds = int(expire_at.timestamp()) assert r.expireat("a", expire_at_seconds) is True @@ -1293,38 +1293,38 @@ def test_expiretime(self, r): @skip_if_server_version_lt("7.0.0") def test_expireat_option_nx(self, r): assert r.set("key", "val") is True - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) assert r.expireat("key", expire_at, nx=True) is True - expire_at = redis_server_time(r) + datetime.timedelta(minutes=2) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=2) assert r.expireat("key", expire_at, nx=True) is False @skip_if_server_version_lt("7.0.0") def test_expireat_option_xx(self, r): assert r.set("key", "val") is True - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) assert r.expireat("key", expire_at, xx=True) is False assert r.expireat("key", expire_at) is True - expire_at = redis_server_time(r) + datetime.timedelta(minutes=2) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=2) assert r.expireat("key", expire_at, xx=True) is True @skip_if_server_version_lt("7.0.0") def test_expireat_option_gt(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=2) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=2) assert r.set("key", "val") is True assert r.expireat("key", expire_at) is True - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) assert r.expireat("key", expire_at, gt=True) is False - expire_at = redis_server_time(r) + datetime.timedelta(minutes=3) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=3) assert r.expireat("key", expire_at, gt=True) is True @skip_if_server_version_lt("7.0.0") def test_expireat_option_lt(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=2) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=2) assert r.set("key", "val") is True assert r.expireat("key", expire_at) is True - expire_at = redis_server_time(r) + datetime.timedelta(minutes=3) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=3) assert r.expireat("key", expire_at, lt=True) is False - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) assert r.expireat("key", expire_at, lt=True) is True def test_get_and_set(self, r): @@ -1356,7 +1356,7 @@ def test_getex(self, r): assert r.ttl("a") == 60 assert r.getex("a", px=6000) == b"1" assert r.ttl("a") == 6 - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) assert r.getex("a", pxat=expire_at) == b"1" assert r.ttl("a") <= 61 assert r.getex("a", persist=True) == b"1" @@ -1509,19 +1509,19 @@ def test_pexpire_option_lt(self, r): @skip_if_server_version_lt("2.6.0") def test_pexpireat_datetime(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) r["a"] = "foo" assert r.pexpireat("a", expire_at) is True assert 0 < r.pttl("a") <= 61000 @skip_if_server_version_lt("2.6.0") def test_pexpireat_no_key(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) assert r.pexpireat("a", expire_at) is False @skip_if_server_version_lt("2.6.0") def test_pexpireat_unixtime(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) r["a"] = "foo" expire_at_milliseconds = int(expire_at.timestamp() * 1000) assert r.pexpireat("a", expire_at_milliseconds) is True @@ -1530,14 +1530,14 @@ def test_pexpireat_unixtime(self, r): @skip_if_server_version_lt("7.0.0") def test_pexpireat_option_nx(self, r): assert r.set("key", "val") is True - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) assert r.pexpireat("key", expire_at, nx=True) is True assert r.pexpireat("key", expire_at, nx=True) is False @skip_if_server_version_lt("7.0.0") def test_pexpireat_option_xx(self, r): assert r.set("key", "val") is True - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) assert r.pexpireat("key", expire_at, xx=True) is False assert r.pexpireat("key", expire_at) is True assert r.pexpireat("key", expire_at, xx=True) is True @@ -1545,21 +1545,21 @@ def test_pexpireat_option_xx(self, r): @skip_if_server_version_lt("7.0.0") def test_pexpireat_option_gt(self, r): assert r.set("key", "val") is True - expire_at = redis_server_time(r) + datetime.timedelta(minutes=2) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=2) assert r.pexpireat("key", expire_at) is True - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) assert r.pexpireat("key", expire_at, gt=True) is False - expire_at = redis_server_time(r) + datetime.timedelta(minutes=3) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=3) assert r.pexpireat("key", expire_at, gt=True) is True @skip_if_server_version_lt("7.0.0") def test_pexpireat_option_lt(self, r): assert r.set("key", "val") is True - expire_at = redis_server_time(r) + datetime.timedelta(minutes=2) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=2) assert r.pexpireat("key", expire_at) is True - expire_at = redis_server_time(r) + datetime.timedelta(minutes=3) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=3) assert r.pexpireat("key", expire_at, lt=True) is False - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + expire_at = valkey_server_time(r) + datetime.timedelta(minutes=1) assert r.pexpireat("key", expire_at, lt=True) is True @skip_if_server_version_lt("7.0.0") @@ -1682,13 +1682,13 @@ def test_set_ex_timedelta(self, r): @skip_if_server_version_lt("6.2.0") def test_set_exat_timedelta(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(seconds=10) + expire_at = valkey_server_time(r) + datetime.timedelta(seconds=10) assert r.set("a", "1", exat=expire_at) assert 0 < r.ttl("a") <= 10 @skip_if_server_version_lt("6.2.0") def test_set_pxat_timedelta(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(seconds=50) + expire_at = valkey_server_time(r) + datetime.timedelta(seconds=50) assert r.set("a", "1", pxat=expire_at) assert 0 < r.ttl("a") <= 100 @@ -1735,7 +1735,7 @@ def test_setrange(self, r): @skip_if_server_version_lt("6.0.0") @skip_if_server_version_gte("7.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_stralgo_lcs(self, r): key1 = "{foo}key1" key2 = "{foo}key2" @@ -1743,8 +1743,8 @@ def test_stralgo_lcs(self, r): value2 = "mynewtext" res = "mytext" - if skip_if_redis_enterprise().args[0] is True: - with pytest.raises(redis.exceptions.ResponseError): + if skip_if_valkey_enterprise().args[0] is True: + with pytest.raises(valkey.exceptions.ResponseError): assert r.stralgo("LCS", value1, value2) == res return @@ -1778,7 +1778,7 @@ def test_stralgo_lcs(self, r): @skip_if_server_version_lt("6.0.0") @skip_if_server_version_gte("7.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_stralgo_negative(self, r): with pytest.raises(exceptions.DataError): r.stralgo("ISSUB", "value1", "value2") @@ -1796,8 +1796,8 @@ def test_strlen(self, r): def test_substr(self, r): r["a"] = "0123456789" - if skip_if_redis_enterprise().args[0] is True: - with pytest.raises(redis.exceptions.ResponseError): + if skip_if_valkey_enterprise().args[0] is True: + with pytest.raises(valkey.exceptions.ResponseError): assert r.substr("a", 0) == b"0123456789" return @@ -1807,7 +1807,7 @@ def test_substr(self, r): assert r.substr("a", 3, -2) == b"345678" def generate_lib_code(self, lib_name): - return f"""#!js api_version=1.0 name={lib_name}\n redis.registerFunction('foo', ()=>{{return 'bar'}})""" # noqa + return f"""#!js api_version=1.0 name={lib_name}\n valkey.registerFunction('foo', ()=>{{return 'bar'}})""" # noqa def try_delete_libs(self, r, *lib_names): for lib_name in lib_names: @@ -1817,7 +1817,7 @@ def try_delete_libs(self, r, *lib_names): pass @pytest.mark.onlynoncluster - @skip_if_server_version_lt("7.1.140") + @pytest.mark.skip def test_tfunction_load_delete(self, r): self.try_delete_libs(r, "lib1") lib_code = self.generate_lib_code("lib1") @@ -1825,7 +1825,7 @@ def test_tfunction_load_delete(self, r): assert r.tfunction_delete("lib1") @pytest.mark.onlynoncluster - @skip_if_server_version_lt("7.1.140") + @pytest.mark.skip def test_tfunction_list(self, r): self.try_delete_libs(r, "lib1", "lib2", "lib3") assert r.tfunction_load(self.generate_lib_code("lib1")) @@ -1833,7 +1833,7 @@ def test_tfunction_list(self, r): assert r.tfunction_load(self.generate_lib_code("lib3")) # test error thrown when verbose > 4 - with pytest.raises(redis.exceptions.DataError): + with pytest.raises(valkey.exceptions.DataError): assert r.tfunction_list(verbose=8) functions = r.tfunction_list(verbose=1) @@ -1848,7 +1848,7 @@ def test_tfunction_list(self, r): assert r.tfunction_delete("lib3") @pytest.mark.onlynoncluster - @skip_if_server_version_lt("7.1.140") + @pytest.mark.skip def test_tfcall(self, r): self.try_delete_libs(r, "lib1") assert r.tfunction_load(self.generate_lib_code("lib1")) @@ -1957,7 +1957,7 @@ def test_lmpop(self, r): r.rpush("foo", "1", "2", "3", "4", "5") result = [b"foo", [b"1", b"2"]] assert r.lmpop("2", "bar", "foo", direction="LEFT", count=2) == result - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): r.lmpop("2", "bar", "foo", direction="up", count=2) r.rpush("bar", "a", "b", "c", "d") assert r.lmpop("2", "bar", "foo", direction="LEFT") == [b"bar", [b"a"]] @@ -2355,14 +2355,14 @@ def test_sunionstore(self, r): assert r.smembers("c") == {b"1", b"2", b"3"} @skip_if_server_version_lt("1.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_debug_segfault(self, r): with pytest.raises(NotImplementedError): r.debug_segfault() @pytest.mark.onlynoncluster @skip_if_server_version_lt("3.2.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_script_debug(self, r): with pytest.raises(NotImplementedError): r.script_debug() @@ -2426,8 +2426,8 @@ def test_zadd_incr(self, r): def test_zadd_incr_with_xx(self, r): # this asks zadd to incr 'a1' only if it exists, but it clearly - # doesn't. Redis returns a null value in this case and so should - # redis-py + # doesn't. Valkey returns a null value in this case and so should + # valkey-py assert r.zadd("a", {"a1": 1}, xx=True, incr=True) is None @skip_if_server_version_lt("6.2.0") @@ -2697,7 +2697,7 @@ def test_zmpop(self, r): [b"a", [[b"a1", b"1"], [b"a2", b"2"]]], [b"a", [[b"a1", 1.0], [b"a2", 2.0]]], ) - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): r.zmpop("2", ["b", "a"], count=2) r.zadd("b", {"b1": 10, "ab": 9, "b3": 8}) assert_resp_response( @@ -2717,7 +2717,7 @@ def test_bzmpop(self, r): [b"a", [[b"a1", b"1"], [b"a2", b"2"]]], [b"a", [[b"a1", 1.0], [b"a2", 2.0]]], ) - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): r.bzmpop(1, "2", ["b", "a"], count=2) r.zadd("b", {"b1": 10, "ab": 9, "b3": 8}) assert_resp_response( @@ -2871,7 +2871,7 @@ def test_zrank(self, r): assert r.zrank("a", "a6") is None @skip_if_server_version_lt("7.2.0") - def test_zrank_withscore(self, r: redis.Redis): + def test_zrank_withscore(self, r: valkey.Valkey): r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert r.zrank("a", "a1") == 0 assert r.zrank("a", "a2") == 1 @@ -3120,11 +3120,11 @@ def test_hget_and_hset(self, r): assert r.hget("a", "2") == b"2" assert r.hget("a", "3") == b"3" - # field was updated, redis returns 0 + # field was updated, valkey returns 0 assert r.hset("a", "2", 5) == 0 assert r.hget("a", "2") == b"5" - # field is new, redis returns 1 + # field is new, valkey returns 1 assert r.hset("a", "4", 4) == 1 assert r.hget("a", "4") == b"4" @@ -3205,10 +3205,10 @@ def test_hmget(self, r): assert r.hmget("a", "a", "b", "c") == [b"1", b"2", b"3"] def test_hmset(self, r): - redis_class = type(r).__name__ + valkey_class = type(r).__name__ warning_message = ( r"^{0}\.hmset\(\) is deprecated\. " - r"Use {0}\.hset\(\) instead\.$".format(redis_class) + r"Use {0}\.hset\(\) instead\.$".format(valkey_class) ) h = {b"a": b"1", b"b": b"2", b"c": b"3"} with pytest.warns(DeprecationWarning, match=warning_message): @@ -3388,103 +3388,103 @@ def test_sort_ro(self, r): assert r.sort_ro("b", desc=True) == [b"3", b"2", b"1"] def test_sort_issue_924(self, r): - # Tests for issue https://github.com/andymccurdy/redis-py/issues/924 + # Tests for issue https://github.com/andymccurdy/valkey-py/issues/924 r.execute_command("SADD", "issue#924", 1) r.execute_command("SORT", "issue#924") @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_addslots(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster("ADDSLOTS", 1) is True @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_count_failure_reports(self, mock_cluster_resp_int): assert isinstance( mock_cluster_resp_int.cluster("COUNT-FAILURE-REPORTS", "node"), int ) @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_countkeysinslot(self, mock_cluster_resp_int): assert isinstance(mock_cluster_resp_int.cluster("COUNTKEYSINSLOT", 2), int) @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_delslots(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster("DELSLOTS", 1) is True @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_failover(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster("FAILOVER", 1) is True @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_forget(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster("FORGET", 1) is True @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_info(self, mock_cluster_resp_info): assert isinstance(mock_cluster_resp_info.cluster("info"), dict) @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_keyslot(self, mock_cluster_resp_int): assert isinstance(mock_cluster_resp_int.cluster("keyslot", "asdf"), int) @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_meet(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster("meet", "ip", "port", 1) is True @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_nodes(self, mock_cluster_resp_nodes): assert isinstance(mock_cluster_resp_nodes.cluster("nodes"), dict) @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_replicate(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster("replicate", "nodeid") is True @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_reset(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster("reset", "hard") is True @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_saveconfig(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster("saveconfig") is True @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_setslot(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.cluster("setslot", 1, "IMPORTING", "nodeid") is True @pytest.mark.onlynoncluster - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_cluster_slaves(self, mock_cluster_resp_slaves): assert isinstance(mock_cluster_resp_slaves.cluster("slaves", "nodeid"), dict) @pytest.mark.onlynoncluster @skip_if_server_version_lt("3.0.0") @skip_if_server_version_gte("7.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_readwrite(self, r): assert r.readwrite() @pytest.mark.onlynoncluster @skip_if_server_version_lt("3.0.0") def test_readonly_invalid_cluster_state(self, r): - with pytest.raises(exceptions.RedisError): + with pytest.raises(exceptions.ValkeyError): r.readonly() @pytest.mark.onlynoncluster @skip_if_server_version_lt("3.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_readonly(self, mock_cluster_resp_ok): assert mock_cluster_resp_ok.readonly() is True @@ -3541,7 +3541,7 @@ def test_geoadd_ch(self, r): @skip_if_server_version_lt("3.2.0") def test_geoadd_invalid_params(self, r): - with pytest.raises(exceptions.RedisError): + with pytest.raises(exceptions.ValkeyError): r.geoadd("barcelona", (1, 2)) @skip_if_server_version_lt("3.2.0") @@ -3572,7 +3572,7 @@ def test_geodist_missing_one_member(self, r): @skip_if_server_version_lt("3.2.0") def test_geodist_invalid_units(self, r): - with pytest.raises(exceptions.RedisError): + with pytest.raises(exceptions.ValkeyError): assert r.geodist("x", "y", "z", "inches") @skip_if_server_version_lt("3.2.0") @@ -3599,7 +3599,7 @@ def test_geopos(self, r): "place2", ) r.geoadd("barcelona", values) - # redis uses 52 bits precision, hereby small errors may be introduced. + # valkey uses 52 bits precision, hereby small errors may be introduced. assert_resp_response( r, r.geopos("barcelona", "place1", "place2"), @@ -3847,7 +3847,7 @@ def test_geosearchstore_dist(self, r): longitude=2.191, latitude=41.433, radius=1000, - storedist=True, + stovalkeyt=True, ) # instead of save the geo score, the distance is saved. assert r.zscore("places_barcelona", "place1") == 88.05060698409301 @@ -4108,11 +4108,11 @@ def test_xadd_minlen_and_limit(self, r): r.xadd(stream, {"foo": "bar"}) # Future self: No limits without approximate, according to the api - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): assert r.xadd(stream, {"foo": "bar"}, maxlen=3, approximate=False, limit=2) # limit can not be provided without maxlen or minid - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): assert r.xadd(stream, {"foo": "bar"}, limit=2) # maxlen with a limit @@ -4120,7 +4120,7 @@ def test_xadd_minlen_and_limit(self, r): r.delete(stream) # maxlen and minid can not be provided together - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): assert r.xadd(stream, {"foo": "bar"}, maxlen=3, minid="sometestvalue") # minid with a limit @@ -4145,7 +4145,7 @@ def test_xadd_minlen_and_limit(self, r): assert r.xadd(stream, {"foo": "bar"}, approximate=True, minid=m3) @skip_if_server_version_lt("7.0.0") - def test_xadd_explicit_ms(self, r: redis.Redis): + def test_xadd_explicit_ms(self, r: valkey.Valkey): stream = "stream" message_id = r.xadd(stream, {"foo": "bar"}, "9999999999999999999-*") ms = message_id[: message_id.index(b"-")] @@ -4189,11 +4189,11 @@ def test_xautoclaim_negative(self, r): stream = "stream" group = "group" consumer = "consumer" - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): r.xautoclaim(stream, group, consumer, min_idle_time=-1) with pytest.raises(ValueError): r.xautoclaim(stream, group, consumer, min_idle_time="wrong") - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): r.xautoclaim(stream, group, consumer, min_idle_time=0, count=-1) @skip_if_server_version_lt("5.0.0") @@ -4322,7 +4322,7 @@ def test_xgroup_create_mkstream(self, r): assert r.xinfo_groups(stream) == expected @skip_if_server_version_lt("7.0.0") - def test_xgroup_create_entriesread(self, r: redis.Redis): + def test_xgroup_create_entriesread(self, r: valkey.Valkey): stream = "stream" group = "group" r.xadd(stream, {"foo": "bar"}) @@ -4558,21 +4558,21 @@ def test_xpending_range_idle(self, r): def test_xpending_range_negative(self, r): stream = "stream" group = "group" - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): r.xpending_range(stream, group, min="-", max="+", count=None) with pytest.raises(ValueError): r.xpending_range(stream, group, min="-", max="+", count="one") - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): r.xpending_range(stream, group, min="-", max="+", count=-1) with pytest.raises(ValueError): r.xpending_range(stream, group, min="-", max="+", count=5, idle="one") - with pytest.raises(redis.exceptions.ResponseError): + with pytest.raises(valkey.exceptions.ResponseError): r.xpending_range(stream, group, min="-", max="+", count=5, idle=1.5) - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): r.xpending_range(stream, group, min="-", max="+", count=5, idle=-1) - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): r.xpending_range(stream, group, min=None, max=None, count=None, idle=0) - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): r.xpending_range( stream, group, min=None, max=None, count=None, consumername=0 ) @@ -4766,14 +4766,14 @@ def test_xtrim_minlen_and_length_args(self, r): r.xadd(stream, {"foo": "bar"}) # Future self: No limits without approximate, according to the api - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): assert r.xtrim(stream, 3, approximate=False, limit=2) # maxlen with a limit assert r.xtrim(stream, 3, approximate=True, limit=2) == 0 r.delete(stream) - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): assert r.xtrim(stream, maxlen=3, minid="sometestvalue") # minid with a limit @@ -4866,7 +4866,7 @@ def test_bitfield_operations(self, r): assert resp == [0, None, 255] @skip_if_server_version_lt("6.0.0") - def test_bitfield_ro(self, r: redis.Redis): + def test_bitfield_ro(self, r: valkey.Valkey): bf = r.bitfield("a") resp = bf.set("u8", 8, 255).execute() assert resp == [0] @@ -4889,24 +4889,24 @@ def test_memory_doctor(self, r): r.memory_doctor() @skip_if_server_version_lt("4.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_memory_malloc_stats(self, r): - if skip_if_redis_enterprise().args[0] is True: - with pytest.raises(redis.exceptions.ResponseError): + if skip_if_valkey_enterprise().args[0] is True: + with pytest.raises(valkey.exceptions.ResponseError): assert r.memory_malloc_stats() return assert r.memory_malloc_stats() @skip_if_server_version_lt("4.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_memory_stats(self, r): # put a key into the current db to make sure that "db." # has data r.set("foo", "bar") - if skip_if_redis_enterprise().args[0] is True: - with pytest.raises(redis.exceptions.ResponseError): + if skip_if_valkey_enterprise().args[0] is True: + with pytest.raises(valkey.exceptions.ResponseError): stats = r.memory_stats() return @@ -4922,36 +4922,36 @@ def test_memory_usage(self, r): assert isinstance(r.memory_usage("foo"), int) @skip_if_server_version_lt("7.0.0") - def test_latency_histogram_not_implemented(self, r: redis.Redis): + def test_latency_histogram_not_implemented(self, r: valkey.Valkey): with pytest.raises(NotImplementedError): r.latency_histogram() - def test_latency_graph_not_implemented(self, r: redis.Redis): + def test_latency_graph_not_implemented(self, r: valkey.Valkey): with pytest.raises(NotImplementedError): r.latency_graph() - def test_latency_doctor_not_implemented(self, r: redis.Redis): + def test_latency_doctor_not_implemented(self, r: valkey.Valkey): with pytest.raises(NotImplementedError): r.latency_doctor() - def test_latency_history(self, r: redis.Redis): + def test_latency_history(self, r: valkey.Valkey): assert r.latency_history("command") == [] - def test_latency_latest(self, r: redis.Redis): + def test_latency_latest(self, r: valkey.Valkey): assert r.latency_latest() == [] - def test_latency_reset(self, r: redis.Redis): + def test_latency_reset(self, r: valkey.Valkey): assert r.latency_reset() == 0 @skip_if_server_version_lt("4.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_module_list(self, r): assert isinstance(r.module_list(), list) for x in r.module_list(): assert isinstance(x, dict) @skip_if_server_version_lt("2.8.13") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_command_count(self, r): res = r.command_count() assert isinstance(res, int) @@ -4963,18 +4963,18 @@ def test_command_docs(self, r): r.command_docs("set") @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() - def test_command_list(self, r: redis.Redis): + @skip_if_valkey_enterprise() + def test_command_list(self, r: valkey.Valkey): assert len(r.command_list()) > 300 assert len(r.command_list(module="fakemod")) == 0 assert len(r.command_list(category="list")) > 15 assert b"lpop" in r.command_list(pattern="l*") - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): r.command_list(category="list", pattern="l*") @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.8.13") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_command_getkeys(self, r): res = r.command_getkeys("MSET", "a", "b", "c", "d", "e", "f") assert_resp_response(r, res, ["a", "c", "e"], [b"a", b"c", b"e"]) @@ -5004,8 +5004,8 @@ def test_command(self, r): @pytest.mark.onlynoncluster @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() - def test_command_getkeysandflags(self, r: redis.Redis): + @skip_if_valkey_enterprise() + def test_command_getkeysandflags(self, r: valkey.Valkey): assert_resp_response( r, r.command_getkeysandflags("LMOVE", "mylist1", "mylist2", "left", "left"), @@ -5021,25 +5021,25 @@ def test_command_getkeysandflags(self, r: redis.Redis): @pytest.mark.onlynoncluster @skip_if_server_version_lt("4.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_module(self, r): - with pytest.raises(redis.exceptions.ModuleError) as excinfo: + with pytest.raises(valkey.exceptions.ModuleError) as excinfo: r.module_load("/some/fake/path") assert "Error loading the extension." in str(excinfo.value) - with pytest.raises(redis.exceptions.ModuleError) as excinfo: + with pytest.raises(valkey.exceptions.ModuleError) as excinfo: r.module_load("/some/fake/path", "arg1", "arg2", "arg3", "arg4") assert "Error loading the extension." in str(excinfo.value) @pytest.mark.onlynoncluster @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() - def test_module_loadex(self, r: redis.Redis): - with pytest.raises(redis.exceptions.ModuleError) as excinfo: + @skip_if_valkey_enterprise() + def test_module_loadex(self, r: valkey.Valkey): + with pytest.raises(valkey.exceptions.ModuleError) as excinfo: r.module_loadex("/some/fake/path") assert "Error loading the extension." in str(excinfo.value) - with pytest.raises(redis.exceptions.ModuleError) as excinfo: + with pytest.raises(valkey.exceptions.ModuleError) as excinfo: r.module_loadex("/some/fake/path", ["name", "value"], ["arg1", "arg2"]) assert "Error loading the extension." in str(excinfo.value) @@ -5054,7 +5054,7 @@ def test_restore(self, r): assert r.get(key) == b"bar" # overwrite restore - with pytest.raises(redis.exceptions.ResponseError): + with pytest.raises(valkey.exceptions.ResponseError): assert r.restore(key, 0, dumpdata) r.set(key, "a new value!") assert r.restore(key, 0, dumpdata, replace=True) @@ -5088,19 +5088,19 @@ def test_restore_frequency(self, r): @pytest.mark.onlynoncluster @skip_if_server_version_lt("5.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_replicaof(self, r): - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): assert r.replicaof("NO ONE") assert r.replicaof("NO", "ONE") - def test_shutdown(self, r: redis.Redis): + def test_shutdown(self, r: valkey.Valkey): r.execute_command = mock.MagicMock() r.execute_command("SHUTDOWN", "NOSAVE") r.execute_command.assert_called_once_with("SHUTDOWN", "NOSAVE") @skip_if_server_version_lt("7.0.0") - def test_shutdown_with_params(self, r: redis.Redis): + def test_shutdown_with_params(self, r: valkey.Valkey): r.execute_command = mock.MagicMock() r.execute_command("SHUTDOWN", "SAVE", "NOW", "FORCE") r.execute_command.assert_called_once_with("SHUTDOWN", "SAVE", "NOW", "FORCE") @@ -5110,24 +5110,24 @@ def test_shutdown_with_params(self, r: redis.Redis): @pytest.mark.replica @pytest.mark.xfail(strict=False) @skip_if_server_version_lt("2.8.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_sync(self, r): r.flushdb() time.sleep(1) - r2 = redis.Redis(port=6380, decode_responses=False) + r2 = valkey.Valkey(port=6380, decode_responses=False) res = r2.sync() - assert b"REDIS" in res + assert b"VALKEY" in res @pytest.mark.replica @skip_if_server_version_lt("2.8.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_psync(self, r): - r2 = redis.Redis(port=6380, decode_responses=False) + r2 = valkey.Valkey(port=6380, decode_responses=False) res = r2.psync(r2.client_id(), 1) assert b"FULLRESYNC" in res @pytest.mark.onlynoncluster - def test_interrupted_command(self, r: redis.Redis): + def test_interrupted_command(self, r: valkey.Valkey): """ Regression test for issue #1128: An Un-handled BaseException will leave the socket with un-read response to a previous @@ -5200,7 +5200,7 @@ def test_binary_lists(self, r): def test_22_info(self, r): """ - Older Redis versions contained 'allocation_stats' in INFO that + Older Valkey versions contained 'allocation_stats' in INFO that was the cause of a number of bugs when parsing. """ info = ( @@ -5234,7 +5234,7 @@ def test_22_info(self, r): assert "6" in parsed["allocation_stats"] assert ">=256" in parsed["allocation_stats"] - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_large_responses(self, r): "The PythonParser has some special cases for return values > 1MB" # load up 5MB of data into a key diff --git a/tests/test_connect.py b/tests/test_connect.py index 71986dd8..3ed672fc 100644 --- a/tests/test_connect.py +++ b/tests/test_connect.py @@ -6,8 +6,8 @@ import threading import pytest -from redis.connection import Connection, SSLConnection, UnixDomainSocketConnection -from redis.exceptions import ConnectionError +from valkey.connection import Connection, SSLConnection, UnixDomainSocketConnection +from valkey.exceptions import ConnectionError from .ssl_utils import get_ssl_filename @@ -124,11 +124,11 @@ def test_tcp_ssl_version_mismatch(tcp_address): def _assert_connect(conn, server_address, **tcp_kw): if isinstance(server_address, str): - if not _RedisUDSServer: + if not _ValkeyUDSServer: pytest.skip("Unix domain sockets are not supported on this platform") - server = _RedisUDSServer(server_address, _RedisRequestHandler) + server = _ValkeyUDSServer(server_address, _ValkeyRequestHandler) else: - server = _RedisTCPServer(server_address, _RedisRequestHandler, **tcp_kw) + server = _ValkeyTCPServer(server_address, _ValkeyRequestHandler, **tcp_kw) with server as aserver: t = threading.Thread(target=aserver.serve_forever) t.start() @@ -141,7 +141,7 @@ def _assert_connect(conn, server_address, **tcp_kw): t.join(timeout=5) -class _RedisTCPServer(socketserver.TCPServer): +class _ValkeyTCPServer(socketserver.TCPServer): def __init__( self, *args, @@ -186,7 +186,7 @@ def get_request(self): if hasattr(socketserver, "UnixStreamServer"): - class _RedisUDSServer(socketserver.UnixStreamServer): + class _ValkeyUDSServer(socketserver.UnixStreamServer): def __init__(self, *args, **kw) -> None: self._ready_event = threading.Event() self._stop_requested = False @@ -206,10 +206,10 @@ def is_serving(self): return not self._stop_requested else: - _RedisUDSServer = None + _ValkeyUDSServer = None -class _RedisRequestHandler(socketserver.StreamRequestHandler): +class _ValkeyRequestHandler(socketserver.StreamRequestHandler): def setup(self): _logger.info("%s connected", self.client_address) diff --git a/tests/test_connection.py b/tests/test_connection.py index bff24955..9c60aa8f 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -4,19 +4,19 @@ from unittest.mock import patch import pytest -import redis -from redis import ConnectionPool, Redis -from redis._parsers import _HiredisParser, _RESP2Parser, _RESP3Parser -from redis.backoff import NoBackoff -from redis.connection import ( +import valkey +from valkey import ConnectionPool, Valkey +from valkey._parsers import _HiredisParser, _RESP2Parser, _RESP3Parser +from valkey.backoff import NoBackoff +from valkey.connection import ( Connection, SSLConnection, UnixDomainSocketConnection, parse_url, ) -from redis.exceptions import ConnectionError, InvalidResponse, TimeoutError -from redis.retry import Retry -from redis.utils import HIREDIS_AVAILABLE +from valkey.exceptions import ConnectionError, InvalidResponse, TimeoutError +from valkey.retry import Retry +from valkey.utils import HIREDIS_AVAILABLE from .conftest import skip_if_server_version_lt from .mocks import MockSocket @@ -34,7 +34,7 @@ def test_invalid_response(r): @skip_if_server_version_lt("4.0.0") -@pytest.mark.redismod +@pytest.mark.valkeymod def test_loading_external_modules(r): def inner(): pass @@ -44,9 +44,9 @@ def inner(): assert isinstance(getattr(r, "myfuncname"), types.FunctionType) # and call it - from redis.commands import RedisModuleCommands + from valkey.commands import ValkeyModuleCommands - j = RedisModuleCommands.json + j = ValkeyModuleCommands.json r.load_external_module("sometestfuncname", j) # d = {'hello': 'world!'} @@ -138,7 +138,7 @@ def test_connect_timeout_error_without_retry(self): [_RESP2Parser, _RESP3Parser, _HiredisParser], ids=["RESP2Parser", "RESP3Parser", "HiredisParser"], ) -def test_connection_parse_response_resume(r: redis.Redis, parser_class): +def test_connection_parse_response_resume(r: valkey.Valkey, parser_class): """ This test verifies that the Connection parser, be that PythonParser or HiredisParser, @@ -211,46 +211,46 @@ def test_pack_command(Class): @pytest.mark.onlynoncluster def test_create_single_connection_client_from_url(): - client = redis.Redis.from_url( - "redis://localhost:6379/0?", single_connection_client=True + client = valkey.Valkey.from_url( + "valkey://localhost:6379/0?", single_connection_client=True ) assert client.connection is not None @pytest.mark.parametrize("from_url", (True, False), ids=("from_url", "from_args")) def test_pool_auto_close(request, from_url): - """Verify that basic Redis instances have auto_close_connection_pool set to True""" + """Verify that basic Valkey instances have auto_close_connection_pool set to True""" - url: str = request.config.getoption("--redis-url") + url: str = request.config.getoption("--valkey-url") url_args = parse_url(url) - def get_redis_connection(): + def get_valkey_connection(): if from_url: - return Redis.from_url(url) - return Redis(**url_args) + return Valkey.from_url(url) + return Valkey(**url_args) - r1 = get_redis_connection() + r1 = get_valkey_connection() assert r1.auto_close_connection_pool is True r1.close() @pytest.mark.parametrize("from_url", (True, False), ids=("from_url", "from_args")) -def test_redis_connection_pool(request, from_url): - """Verify that basic Redis instances using `connection_pool` +def test_valkey_connection_pool(request, from_url): + """Verify that basic Valkey instances using `connection_pool` have auto_close_connection_pool set to False""" - url: str = request.config.getoption("--redis-url") + url: str = request.config.getoption("--valkey-url") url_args = parse_url(url) pool = None - def get_redis_connection(): + def get_valkey_connection(): nonlocal pool if from_url: pool = ConnectionPool.from_url(url) else: pool = ConnectionPool(**url_args) - return Redis(connection_pool=pool) + return Valkey(connection_pool=pool) called = 0 @@ -259,7 +259,7 @@ def mock_disconnect(_): called += 1 with patch.object(ConnectionPool, "disconnect", mock_disconnect): - with get_redis_connection() as r1: + with get_valkey_connection() as r1: assert r1.auto_close_connection_pool is False assert called == 0 @@ -267,22 +267,22 @@ def mock_disconnect(_): @pytest.mark.parametrize("from_url", (True, False), ids=("from_url", "from_args")) -def test_redis_from_pool(request, from_url): - """Verify that basic Redis instances created using `from_pool()` +def test_valkey_from_pool(request, from_url): + """Verify that basic Valkey instances created using `from_pool()` have auto_close_connection_pool set to True""" - url: str = request.config.getoption("--redis-url") + url: str = request.config.getoption("--valkey-url") url_args = parse_url(url) pool = None - def get_redis_connection(): + def get_valkey_connection(): nonlocal pool if from_url: pool = ConnectionPool.from_url(url) else: pool = ConnectionPool(**url_args) - return Redis.from_pool(pool) + return Valkey.from_pool(pool) called = 0 @@ -291,7 +291,7 @@ def mock_disconnect(_): called += 1 with patch.object(ConnectionPool, "disconnect", mock_disconnect): - with get_redis_connection() as r1: + with get_valkey_connection() as r1: assert r1.auto_close_connection_pool is True assert called == 1 diff --git a/tests/test_connection_pool.py b/tests/test_connection_pool.py index dee7c554..6db0e182 100644 --- a/tests/test_connection_pool.py +++ b/tests/test_connection_pool.py @@ -6,11 +6,11 @@ from unittest import mock import pytest -import redis -from redis.connection import to_bool -from redis.utils import SSL_AVAILABLE +import valkey +from valkey.connection import to_bool +from valkey.utils import SSL_AVAILABLE -from .conftest import _get_client, skip_if_redis_enterprise, skip_if_server_version_lt +from .conftest import _get_client, skip_if_server_version_lt, skip_if_valkey_enterprise from .test_pubsub import wait_for_message @@ -33,10 +33,10 @@ def get_pool( self, connection_kwargs=None, max_connections=None, - connection_class=redis.Connection, + connection_class=valkey.Connection, ): connection_kwargs = connection_kwargs or {} - pool = redis.ConnectionPool( + pool = valkey.ConnectionPool( connection_class=connection_class, max_connections=max_connections, **connection_kwargs, @@ -54,7 +54,7 @@ def test_connection_creation(self): def test_closing(self): connection_kwargs = {"foo": "bar", "biz": "baz"} - pool = redis.ConnectionPool( + pool = valkey.ConnectionPool( connection_class=DummyConnection, max_connections=None, **connection_kwargs, @@ -74,7 +74,7 @@ def test_max_connections(self, master_host): pool = self.get_pool(max_connections=2, connection_kwargs=connection_kwargs) pool.get_connection("_") pool.get_connection("_") - with pytest.raises(redis.ConnectionError): + with pytest.raises(valkey.ConnectionError): pool.get_connection("_") def test_reuse_previously_released_connection(self, master_host): @@ -93,7 +93,7 @@ def test_repr_contains_db_info_tcp(self): "client_name": "test-client", } pool = self.get_pool( - connection_kwargs=connection_kwargs, connection_class=redis.Connection + connection_kwargs=connection_kwargs, connection_class=valkey.Connection ) expected = "host=localhost,port=6379,db=1,client_name=test-client" assert expected in repr(pool) @@ -102,7 +102,7 @@ def test_repr_contains_db_info_unix(self): connection_kwargs = {"path": "/abc", "db": 1, "client_name": "test-client"} pool = self.get_pool( connection_kwargs=connection_kwargs, - connection_class=redis.UnixDomainSocketConnection, + connection_class=valkey.UnixDomainSocketConnection, ) expected = "path=/abc,db=1,client_name=test-client" assert expected in repr(pool) @@ -111,7 +111,7 @@ def test_repr_contains_db_info_unix(self): class TestBlockingConnectionPool: def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20): connection_kwargs = connection_kwargs or {} - pool = redis.BlockingConnectionPool( + pool = valkey.BlockingConnectionPool( connection_class=DummyConnection, max_connections=max_connections, timeout=timeout, @@ -147,7 +147,7 @@ def test_connection_pool_blocks_until_timeout(self, master_host): pool.get_connection("_") start = time.time() - with pytest.raises(redis.ConnectionError): + with pytest.raises(valkey.ConnectionError): pool.get_connection("_") # we should have waited at least 0.1 seconds assert time.time() - start >= 0.1 @@ -181,15 +181,15 @@ def test_reuse_previously_released_connection(self, master_host): assert c1 == c2 def test_repr_contains_db_info_tcp(self): - pool = redis.ConnectionPool( + pool = valkey.ConnectionPool( host="localhost", port=6379, client_name="test-client" ) expected = "host=localhost,port=6379,db=0,client_name=test-client" assert expected in repr(pool) def test_repr_contains_db_info_unix(self): - pool = redis.ConnectionPool( - connection_class=redis.UnixDomainSocketConnection, + pool = valkey.ConnectionPool( + connection_class=valkey.UnixDomainSocketConnection, path="abc", client_name="test-client", ) @@ -199,47 +199,47 @@ def test_repr_contains_db_info_unix(self): class TestConnectionPoolURLParsing: def test_hostname(self): - pool = redis.ConnectionPool.from_url("redis://my.host") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://my.host") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "my.host"} def test_quoted_hostname(self): - pool = redis.ConnectionPool.from_url("redis://my %2F host %2B%3D+") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://my %2F host %2B%3D+") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "my / host +=+"} def test_port(self): - pool = redis.ConnectionPool.from_url("redis://localhost:6380") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://localhost:6380") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "port": 6380} @skip_if_server_version_lt("6.0.0") def test_username(self): - pool = redis.ConnectionPool.from_url("redis://myuser:@localhost") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://myuser:@localhost") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "username": "myuser"} @skip_if_server_version_lt("6.0.0") def test_quoted_username(self): - pool = redis.ConnectionPool.from_url( - "redis://%2Fmyuser%2F%2B name%3D%24+:@localhost" + pool = valkey.ConnectionPool.from_url( + "valkey://%2Fmyuser%2F%2B name%3D%24+:@localhost" ) - assert pool.connection_class == redis.Connection + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == { "host": "localhost", "username": "/myuser/+ name=$+", } def test_password(self): - pool = redis.ConnectionPool.from_url("redis://:mypassword@localhost") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://:mypassword@localhost") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "password": "mypassword"} def test_quoted_password(self): - pool = redis.ConnectionPool.from_url( - "redis://:%2Fmypass%2F%2B word%3D%24+@localhost" + pool = valkey.ConnectionPool.from_url( + "valkey://:%2Fmypass%2F%2B word%3D%24+@localhost" ) - assert pool.connection_class == redis.Connection + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == { "host": "localhost", "password": "/mypass/+ word=$+", @@ -247,8 +247,8 @@ def test_quoted_password(self): @skip_if_server_version_lt("6.0.0") def test_username_and_password(self): - pool = redis.ConnectionPool.from_url("redis://myuser:mypass@localhost") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://myuser:mypass@localhost") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == { "host": "localhost", "username": "myuser", @@ -256,27 +256,27 @@ def test_username_and_password(self): } def test_db_as_argument(self): - pool = redis.ConnectionPool.from_url("redis://localhost", db=1) - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://localhost", db=1) + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "db": 1} def test_db_in_path(self): - pool = redis.ConnectionPool.from_url("redis://localhost/2", db=1) - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://localhost/2", db=1) + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "db": 2} def test_db_in_querystring(self): - pool = redis.ConnectionPool.from_url("redis://localhost/2?db=3", db=1) - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://localhost/2?db=3", db=1) + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "db": 3} def test_extra_typed_querystring_options(self): - pool = redis.ConnectionPool.from_url( - "redis://localhost/2?socket_timeout=20&socket_connect_timeout=10" + pool = valkey.ConnectionPool.from_url( + "valkey://localhost/2?socket_timeout=20&socket_connect_timeout=10" "&socket_keepalive=&retry_on_timeout=Yes&max_connections=10" ) - assert pool.connection_class == redis.Connection + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == { "host": "localhost", "db": 2, @@ -307,54 +307,56 @@ def test_boolean_parsing(self): assert expected is to_bool(value) def test_client_name_in_querystring(self): - pool = redis.ConnectionPool.from_url("redis://location?client_name=test-client") + pool = valkey.ConnectionPool.from_url( + "valkey://location?client_name=test-client" + ) assert pool.connection_kwargs["client_name"] == "test-client" def test_invalid_extra_typed_querystring_options(self): with pytest.raises(ValueError): - redis.ConnectionPool.from_url( - "redis://localhost/2?socket_timeout=_&socket_connect_timeout=abc" + valkey.ConnectionPool.from_url( + "valkey://localhost/2?socket_timeout=_&socket_connect_timeout=abc" ) def test_extra_querystring_options(self): - pool = redis.ConnectionPool.from_url("redis://localhost?a=1&b=2") - assert pool.connection_class == redis.Connection + pool = valkey.ConnectionPool.from_url("valkey://localhost?a=1&b=2") + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == {"host": "localhost", "a": "1", "b": "2"} def test_calling_from_subclass_returns_correct_instance(self): - pool = redis.BlockingConnectionPool.from_url("redis://localhost") - assert isinstance(pool, redis.BlockingConnectionPool) + pool = valkey.BlockingConnectionPool.from_url("valkey://localhost") + assert isinstance(pool, valkey.BlockingConnectionPool) def test_client_creates_connection_pool(self): - r = redis.Redis.from_url("redis://myhost") - assert r.connection_pool.connection_class == redis.Connection + r = valkey.Valkey.from_url("valkey://myhost") + assert r.connection_pool.connection_class == valkey.Connection assert r.connection_pool.connection_kwargs == {"host": "myhost"} def test_invalid_scheme_raises_error(self): with pytest.raises(ValueError) as cm: - redis.ConnectionPool.from_url("localhost") + valkey.ConnectionPool.from_url("localhost") assert str(cm.value) == ( - "Redis URL must specify one of the following schemes " - "(redis://, rediss://, unix://)" + "Valkey URL must specify one of the following schemes " + "(valkey://, valkeys://, unix://)" ) def test_invalid_scheme_raises_error_when_double_slash_missing(self): with pytest.raises(ValueError) as cm: - redis.ConnectionPool.from_url("redis:foo.bar.com:12345") + valkey.ConnectionPool.from_url("valkey:foo.bar.com:12345") assert str(cm.value) == ( - "Redis URL must specify one of the following schemes " - "(redis://, rediss://, unix://)" + "Valkey URL must specify one of the following schemes " + "(valkey://, valkeys://, unix://)" ) class TestBlockingConnectionPoolURLParsing: def test_extra_typed_querystring_options(self): - pool = redis.BlockingConnectionPool.from_url( - "redis://localhost/2?socket_timeout=20&socket_connect_timeout=10" + pool = valkey.BlockingConnectionPool.from_url( + "valkey://localhost/2?socket_timeout=20&socket_connect_timeout=10" "&socket_keepalive=&retry_on_timeout=Yes&max_connections=10&timeout=42" ) - assert pool.connection_class == redis.Connection + assert pool.connection_class == valkey.Connection assert pool.connection_kwargs == { "host": "localhost", "db": 2, @@ -367,83 +369,85 @@ def test_extra_typed_querystring_options(self): def test_invalid_extra_typed_querystring_options(self): with pytest.raises(ValueError): - redis.BlockingConnectionPool.from_url( - "redis://localhost/2?timeout=_not_a_float_" + valkey.BlockingConnectionPool.from_url( + "valkey://localhost/2?timeout=_not_a_float_" ) class TestConnectionPoolUnixSocketURLParsing: def test_defaults(self): - pool = redis.ConnectionPool.from_url("unix:///socket") - assert pool.connection_class == redis.UnixDomainSocketConnection + pool = valkey.ConnectionPool.from_url("unix:///socket") + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == {"path": "/socket"} @skip_if_server_version_lt("6.0.0") def test_username(self): - pool = redis.ConnectionPool.from_url("unix://myuser:@/socket") - assert pool.connection_class == redis.UnixDomainSocketConnection + pool = valkey.ConnectionPool.from_url("unix://myuser:@/socket") + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == {"path": "/socket", "username": "myuser"} @skip_if_server_version_lt("6.0.0") def test_quoted_username(self): - pool = redis.ConnectionPool.from_url( + pool = valkey.ConnectionPool.from_url( "unix://%2Fmyuser%2F%2B name%3D%24+:@/socket" ) - assert pool.connection_class == redis.UnixDomainSocketConnection + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == { "path": "/socket", "username": "/myuser/+ name=$+", } def test_password(self): - pool = redis.ConnectionPool.from_url("unix://:mypassword@/socket") - assert pool.connection_class == redis.UnixDomainSocketConnection + pool = valkey.ConnectionPool.from_url("unix://:mypassword@/socket") + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == {"path": "/socket", "password": "mypassword"} def test_quoted_password(self): - pool = redis.ConnectionPool.from_url( + pool = valkey.ConnectionPool.from_url( "unix://:%2Fmypass%2F%2B word%3D%24+@/socket" ) - assert pool.connection_class == redis.UnixDomainSocketConnection + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == { "path": "/socket", "password": "/mypass/+ word=$+", } def test_quoted_path(self): - pool = redis.ConnectionPool.from_url( + pool = valkey.ConnectionPool.from_url( "unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket" ) - assert pool.connection_class == redis.UnixDomainSocketConnection + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == { "path": "/my/path/to/../+_+=$ocket", "password": "mypassword", } def test_db_as_argument(self): - pool = redis.ConnectionPool.from_url("unix:///socket", db=1) - assert pool.connection_class == redis.UnixDomainSocketConnection + pool = valkey.ConnectionPool.from_url("unix:///socket", db=1) + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == {"path": "/socket", "db": 1} def test_db_in_querystring(self): - pool = redis.ConnectionPool.from_url("unix:///socket?db=2", db=1) - assert pool.connection_class == redis.UnixDomainSocketConnection + pool = valkey.ConnectionPool.from_url("unix:///socket?db=2", db=1) + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == {"path": "/socket", "db": 2} def test_client_name_in_querystring(self): - pool = redis.ConnectionPool.from_url("redis://location?client_name=test-client") + pool = valkey.ConnectionPool.from_url( + "valkey://location?client_name=test-client" + ) assert pool.connection_kwargs["client_name"] == "test-client" def test_extra_querystring_options(self): - pool = redis.ConnectionPool.from_url("unix:///socket?a=1&b=2") - assert pool.connection_class == redis.UnixDomainSocketConnection + pool = valkey.ConnectionPool.from_url("unix:///socket?a=1&b=2") + assert pool.connection_class == valkey.UnixDomainSocketConnection assert pool.connection_kwargs == {"path": "/socket", "a": "1", "b": "2"} def test_connection_class_override(self): - class MyConnection(redis.UnixDomainSocketConnection): + class MyConnection(valkey.UnixDomainSocketConnection): pass - pool = redis.ConnectionPool.from_url( + pool = valkey.ConnectionPool.from_url( "unix:///socket", connection_class=MyConnection ) assert pool.connection_class == MyConnection @@ -452,39 +456,39 @@ class MyConnection(redis.UnixDomainSocketConnection): @pytest.mark.skipif(not SSL_AVAILABLE, reason="SSL not installed") class TestSSLConnectionURLParsing: def test_host(self): - pool = redis.ConnectionPool.from_url("rediss://my.host") - assert pool.connection_class == redis.SSLConnection + pool = valkey.ConnectionPool.from_url("valkeys://my.host") + assert pool.connection_class == valkey.SSLConnection assert pool.connection_kwargs == {"host": "my.host"} def test_connection_class_override(self): - class MyConnection(redis.SSLConnection): + class MyConnection(valkey.SSLConnection): pass - pool = redis.ConnectionPool.from_url( - "rediss://my.host", connection_class=MyConnection + pool = valkey.ConnectionPool.from_url( + "valkeys://my.host", connection_class=MyConnection ) assert pool.connection_class == MyConnection def test_cert_reqs_options(self): import ssl - class DummyConnectionPool(redis.ConnectionPool): + class DummyConnectionPool(valkey.ConnectionPool): def get_connection(self, *args, **kwargs): return self.make_connection() - pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=none") + pool = DummyConnectionPool.from_url("valkeys://?ssl_cert_reqs=none") assert pool.get_connection("_").cert_reqs == ssl.CERT_NONE - pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=optional") + pool = DummyConnectionPool.from_url("valkeys://?ssl_cert_reqs=optional") assert pool.get_connection("_").cert_reqs == ssl.CERT_OPTIONAL - pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=required") + pool = DummyConnectionPool.from_url("valkeys://?ssl_cert_reqs=required") assert pool.get_connection("_").cert_reqs == ssl.CERT_REQUIRED - pool = DummyConnectionPool.from_url("rediss://?ssl_check_hostname=False") + pool = DummyConnectionPool.from_url("valkeys://?ssl_check_hostname=False") assert pool.get_connection("_").check_hostname is False - pool = DummyConnectionPool.from_url("rediss://?ssl_check_hostname=True") + pool = DummyConnectionPool.from_url("valkeys://?ssl_check_hostname=True") assert pool.get_connection("_").check_hostname is True @@ -492,13 +496,13 @@ class TestConnection: def test_on_connect_error(self): """ An error in Connection.on_connect should disconnect from the server - see for details: https://github.com/andymccurdy/redis-py/issues/368 + see for details: https://github.com/andymccurdy/valkey-py/issues/368 """ - # this assumes the Redis server being tested against doesn't have + # this assumes the Valkey server being tested against doesn't have # 9999 databases ;) - bad_connection = redis.Redis(db=9999) + bad_connection = valkey.Valkey(db=9999) # an error should be raised on connect - with pytest.raises(redis.RedisError): + with pytest.raises(valkey.ValkeyError): bad_connection.info() pool = bad_connection.connection_pool assert len(pool._available_connections) == 1 @@ -506,26 +510,26 @@ def test_on_connect_error(self): @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.8.8") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_busy_loading_disconnects_socket(self, r): """ - If Redis raises a LOADING error, the connection should be + If Valkey raises a LOADING error, the connection should be disconnected and a BusyLoadingError raised """ - with pytest.raises(redis.BusyLoadingError): + with pytest.raises(valkey.BusyLoadingError): r.execute_command("DEBUG", "ERROR", "LOADING fake message") assert not r.connection._sock @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.8.8") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_busy_loading_from_pipeline_immediate_command(self, r): """ BusyLoadingErrors should raise from Pipelines that execute a command immediately, like WATCH does. """ pipe = r.pipeline() - with pytest.raises(redis.BusyLoadingError): + with pytest.raises(valkey.BusyLoadingError): pipe.immediate_execute_command("DEBUG", "ERROR", "LOADING fake message") pool = r.connection_pool assert not pipe.connection @@ -534,7 +538,7 @@ def test_busy_loading_from_pipeline_immediate_command(self, r): @pytest.mark.onlynoncluster @skip_if_server_version_lt("2.8.8") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_busy_loading_from_pipeline(self, r): """ BusyLoadingErrors should be raised from a pipeline execution @@ -542,7 +546,7 @@ def test_busy_loading_from_pipeline(self, r): """ pipe = r.pipeline() pipe.execute_command("DEBUG", "ERROR", "LOADING fake message") - with pytest.raises(redis.BusyLoadingError): + with pytest.raises(valkey.BusyLoadingError): pipe.execute() pool = r.connection_pool assert not pipe.connection @@ -550,21 +554,21 @@ def test_busy_loading_from_pipeline(self, r): assert not pool._available_connections[0]._sock @skip_if_server_version_lt("2.8.8") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_read_only_error(self, r): "READONLY errors get turned into ReadOnlyError exceptions" - with pytest.raises(redis.ReadOnlyError): + with pytest.raises(valkey.ReadOnlyError): r.execute_command("DEBUG", "ERROR", "READONLY blah blah") def test_oom_error(self, r): "OOM errors get turned into OutOfMemoryError exceptions" - with pytest.raises(redis.OutOfMemoryError): + with pytest.raises(valkey.OutOfMemoryError): # note: don't use the DEBUG OOM command since it's not the same # as the db being full r.execute_command("DEBUG", "ERROR", "OOM blah blah") def test_connect_from_url_tcp(self): - connection = redis.Redis.from_url("redis://localhost") + connection = valkey.Valkey.from_url("valkey://localhost") pool = connection.connection_pool assert re.match( @@ -576,7 +580,7 @@ def test_connect_from_url_tcp(self): ) def test_connect_from_url_unix(self): - connection = redis.Redis.from_url("unix:///path/to/socket") + connection = valkey.Valkey.from_url("unix:///path/to/socket") pool = connection.connection_pool assert re.match( @@ -587,20 +591,20 @@ def test_connect_from_url_unix(self): "path=/path/to/socket,db=0", ) - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_connect_no_auth_configured(self, r): """ AuthenticationError should be raised when the server is not configured with auth but credentials are supplied by the user. """ - # Redis < 6 - with pytest.raises(redis.AuthenticationError): + # Valkey < 6 + with pytest.raises(valkey.AuthenticationError): r.execute_command( "DEBUG", "ERROR", "ERR Client sent AUTH, but no password is set" ) - # Redis >= 6 - with pytest.raises(redis.AuthenticationError): + # Valkey >= 6 + with pytest.raises(valkey.AuthenticationError): r.execute_command( "DEBUG", "ERROR", @@ -609,17 +613,17 @@ def test_connect_no_auth_configured(self, r): "your configuration is correct?", ) - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_connect_invalid_auth_credentials_supplied(self, r): """ AuthenticationError should be raised when sending invalid username/password """ - # Redis < 6 - with pytest.raises(redis.AuthenticationError): + # Valkey < 6 + with pytest.raises(valkey.AuthenticationError): r.execute_command("DEBUG", "ERROR", "ERR invalid password") - # Redis >= 6 - with pytest.raises(redis.AuthenticationError): + # Valkey >= 6 + with pytest.raises(valkey.AuthenticationError): r.execute_command("DEBUG", "ERROR", "WRONGPASS") @@ -627,7 +631,7 @@ def test_connect_invalid_auth_credentials_supplied(self, r): class TestMultiConnectionClient: @pytest.fixture() def r(self, request): - return _get_client(redis.Redis, request, single_connection_client=False) + return _get_client(valkey.Valkey, request, single_connection_client=False) def test_multi_connection_command(self, r): assert not r.connection @@ -641,7 +645,7 @@ class TestHealthCheck: @pytest.fixture() def r(self, request): - return _get_client(redis.Redis, request, health_check_interval=self.interval) + return _get_client(valkey.Valkey, request, health_check_interval=self.interval) def assert_interval_advanced(self, connection): diff = connection.next_health_check - time.time() diff --git a/tests/test_credentials.py b/tests/test_credentials.py index aade04e0..fd15428d 100644 --- a/tests/test_credentials.py +++ b/tests/test_credentials.py @@ -4,11 +4,11 @@ from typing import Optional, Tuple, Union import pytest -import redis -from redis import AuthenticationError, DataError, ResponseError -from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider -from redis.utils import str_if_bytes -from tests.conftest import _get_client, skip_if_redis_enterprise +import valkey +from tests.conftest import _get_client, skip_if_valkey_enterprise +from valkey import AuthenticationError, DataError, ResponseError +from valkey.credentials import CredentialProvider, UsernamePasswordCredentialProvider +from valkey.utils import str_if_bytes class NoPassCredProvider(CredentialProvider): @@ -98,18 +98,18 @@ def teardown(): class TestCredentialsProvider: - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_only_pass_without_creds_provider(self, r, request): # test for default user (`username` is supposed to be optional) password = "password" init_required_pass(r, request, password) assert r.auth(password) is True - r2 = _get_client(redis.Redis, request, flushdb=False, password=password) + r2 = _get_client(valkey.Valkey, request, flushdb=False, password=password) assert r2.ping() is True - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_user_and_pass_without_creds_provider(self, r, request): """ Test backward compatibility with username and password @@ -120,13 +120,13 @@ def test_user_and_pass_without_creds_provider(self, r, request): init_acl_user(r, request, username, password) r2 = _get_client( - redis.Redis, request, flushdb=False, username=username, password=password + valkey.Valkey, request, flushdb=False, username=username, password=password ) assert r2.ping() is True @pytest.mark.parametrize("username", ["username", None]) - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() @pytest.mark.onlynoncluster def test_credential_provider_with_supplier(self, r, request, username): creds_provider = RandomAuthCredProvider( @@ -142,7 +142,7 @@ def test_credential_provider_with_supplier(self, r, request, username): init_required_pass(r, request, password) r2 = _get_client( - redis.Redis, request, flushdb=False, credential_provider=creds_provider + valkey.Valkey, request, flushdb=False, credential_provider=creds_provider ) assert r2.ping() is True @@ -150,7 +150,7 @@ def test_credential_provider_with_supplier(self, r, request, username): def test_credential_provider_no_password_success(self, r, request): init_acl_user(r, request, "username", "") r2 = _get_client( - redis.Redis, + valkey.Valkey, request, flushdb=False, credential_provider=NoPassCredProvider(), @@ -162,7 +162,7 @@ def test_credential_provider_no_password_error(self, r, request): init_acl_user(r, request, "username", "password") with pytest.raises(AuthenticationError) as e: _get_client( - redis.Redis, + valkey.Valkey, request, flushdb=False, credential_provider=NoPassCredProvider(), @@ -179,7 +179,7 @@ def test_password_and_username_together_with_cred_provider_raise_error( ) with pytest.raises(DataError) as e: _get_client( - redis.Redis, + valkey.Valkey, request, flushdb=False, username="username", @@ -205,7 +205,7 @@ def teardown(): init_acl_user(r, request, username, password) r2 = _get_client( - redis.Redis, request, flushdb=False, username=username, password=password + valkey.Valkey, request, flushdb=False, username=username, password=password ) assert r2.ping() is True conn = r2.connection_pool.get_connection("_") @@ -230,7 +230,7 @@ def test_user_pass_credential_provider_acl_user_and_pass(self, r, request): assert provider.get_credentials() == (username, password) init_acl_user(r, request, provider.username, provider.password) r2 = _get_client( - redis.Redis, request, flushdb=False, credential_provider=provider + valkey.Valkey, request, flushdb=False, credential_provider=provider ) assert r2.ping() is True @@ -244,7 +244,7 @@ def test_user_pass_provider_only_password(self, r, request): init_required_pass(r, request, password) r2 = _get_client( - redis.Redis, request, flushdb=False, credential_provider=provider + valkey.Valkey, request, flushdb=False, credential_provider=provider ) assert r2.auth(provider.password) is True assert r2.ping() is True diff --git a/tests/test_encoding.py b/tests/test_encoding.py index 331cd510..3dda182c 100644 --- a/tests/test_encoding.py +++ b/tests/test_encoding.py @@ -1,7 +1,7 @@ import pytest -import redis -from redis.connection import Connection -from redis.utils import HIREDIS_PACK_AVAILABLE +import valkey +from valkey.connection import Connection +from valkey.utils import HIREDIS_PACK_AVAILABLE from .conftest import _get_client @@ -9,11 +9,11 @@ class TestEncoding: @pytest.fixture() def r(self, request): - return _get_client(redis.Redis, request=request, decode_responses=True) + return _get_client(valkey.Valkey, request=request, decode_responses=True) @pytest.fixture() def r_no_decode(self, request): - return _get_client(redis.Redis, request=request, decode_responses=False) + return _get_client(valkey.Valkey, request=request, decode_responses=False) def test_simple_encoding(self, r_no_decode): unicode_string = chr(3456) + "abcd" + chr(3421) @@ -34,7 +34,7 @@ def test_memoryview_encoding(self, r_no_decode): unicode_string_view = memoryview(unicode_string.encode("utf-8")) r_no_decode["unicode-string-memoryview"] = unicode_string_view cached_val = r_no_decode["unicode-string-memoryview"] - # The cached value won't be a memoryview because it's a copy from Redis + # The cached value won't be a memoryview because it's a copy from Valkey assert isinstance(cached_val, bytes) assert unicode_string == cached_val.decode("utf-8") @@ -56,7 +56,7 @@ def test_list_encoding(self, r): class TestEncodingErrors: def test_ignore(self, request): r = _get_client( - redis.Redis, + valkey.Valkey, request=request, decode_responses=True, encoding_errors="ignore", @@ -66,7 +66,7 @@ def test_ignore(self, request): def test_replace(self, request): r = _get_client( - redis.Redis, + valkey.Valkey, request=request, decode_responses=True, encoding_errors="replace", @@ -94,7 +94,7 @@ def test_memoryviews_are_not_packed(self): class TestCommandsAreNotEncoded: @pytest.fixture() def r(self, request): - return _get_client(redis.Redis, request=request, encoding="utf-8") + return _get_client(valkey.Valkey, request=request, encoding="utf-8") def test_basic_command(self, r): r.set("hello", "world") @@ -102,11 +102,11 @@ def test_basic_command(self, r): class TestInvalidUserInput: def test_boolean_fails(self, r): - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): r.set("a", True) def test_none_fails(self, r): - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): r.set("a", None) def test_user_type_fails(self, r): @@ -114,5 +114,5 @@ class Foo: def __str__(self): return "Foo" - with pytest.raises(redis.DataError): + with pytest.raises(valkey.DataError): r.set("a", Foo()) diff --git a/tests/test_function.py b/tests/test_function.py index 9d6712ec..a4637872 100644 --- a/tests/test_function.py +++ b/tests/test_function.py @@ -1,5 +1,5 @@ import pytest -from redis.exceptions import ResponseError +from valkey.exceptions import ResponseError from .conftest import assert_resp_response, skip_if_server_version_lt diff --git a/tests/test_graph.py b/tests/test_graph.py index c6d12890..3bd7a65c 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -1,10 +1,11 @@ from unittest.mock import patch import pytest -from redis import Redis -from redis.commands.graph import Edge, Node, Path -from redis.commands.graph.execution_plan import Operation -from redis.commands.graph.query_result import ( +from tests.conftest import _get_client, skip_if_valkey_enterprise +from valkey import Valkey +from valkey.commands.graph import Edge, Node, Path +from valkey.commands.graph.execution_plan import Operation +from valkey.commands.graph.query_result import ( CACHED_EXECUTION, INDICES_CREATED, INDICES_DELETED, @@ -19,13 +20,14 @@ RELATIONSHIPS_DELETED, QueryResult, ) -from redis.exceptions import ResponseError -from tests.conftest import _get_client, skip_if_redis_enterprise +from valkey.exceptions import ResponseError + +pytestmark = pytest.mark.skip @pytest.fixture def client(request): - r = _get_client(Redis, request, decode_responses=True) + r = _get_client(Valkey, request, decode_responses=True) r.flushdb() return r @@ -316,7 +318,7 @@ def test_profile(client): assert "Node By Label Scan | (p:Person) | Records produced: 3" in profile -@skip_if_redis_enterprise() +@skip_if_valkey_enterprise() def test_config(client): config_name = "RESULTSET_SIZE" config_value = 3 @@ -371,14 +373,14 @@ def test_list_keys(client): def test_multi_label(client): - redis_graph = client.graph("g") + valkey_graph = client.graph("g") node = Node(label=["l", "ll"]) - redis_graph.add_node(node) - redis_graph.commit() + valkey_graph.add_node(node) + valkey_graph.commit() query = "MATCH (n) RETURN n" - result = redis_graph.query(query) + result = valkey_graph.query(query) result_node = result.result_set[0][0] assert result_node == node @@ -468,33 +470,33 @@ def test_cache_sync(client): def test_execution_plan(client): - redis_graph = client.graph("execution_plan") + valkey_graph = client.graph("execution_plan") create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})""" - redis_graph.query(create_query) + valkey_graph.query(create_query) - result = redis_graph.execution_plan( + result = valkey_graph.execution_plan( "MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = $name RETURN r.name, t.name, $params", # noqa {"name": "Yehuda"}, ) expected = "Results\n Project\n Conditional Traverse | (t)->(r:Rider)\n Filter\n Node By Label Scan | (t:Team)" # noqa assert result == expected - redis_graph.delete() + valkey_graph.delete() def test_explain(client): - redis_graph = client.graph("execution_plan") + valkey_graph = client.graph("execution_plan") # graph creation / population create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})""" - redis_graph.query(create_query) + valkey_graph.query(create_query) - result = redis_graph.explain( + result = valkey_graph.explain( """MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = $name RETURN r.name, t.name @@ -546,7 +548,7 @@ def test_explain(client): assert result.structured_plan == expected - result = redis_graph.explain( + result = valkey_graph.explain( """MATCH (r:Rider), (t:Team) RETURN r.name, t.name""" ) @@ -570,7 +572,7 @@ def test_explain(client): assert result.structured_plan == expected - redis_graph.delete() + valkey_graph.delete() def test_resultset_statistics(client): diff --git a/tests/test_graph_utils/test_edge.py b/tests/test_graph_utils/test_edge.py index 1918a6ff..c3432148 100644 --- a/tests/test_graph_utils/test_edge.py +++ b/tests/test_graph_utils/test_edge.py @@ -1,8 +1,8 @@ import pytest -from redis.commands.graph import edge, node +from valkey.commands.graph import edge, node -@pytest.mark.redismod +@pytest.mark.valkeymod def test_init(): with pytest.raises(AssertionError): edge.Edge(None, None, None) @@ -14,7 +14,7 @@ def test_init(): ) -@pytest.mark.redismod +@pytest.mark.valkeymod def test_to_string(): props_result = edge.Edge( node.Node(), None, node.Node(), properties={"a": "a", "b": 10} @@ -27,7 +27,7 @@ def test_to_string(): assert no_props_result == "" -@pytest.mark.redismod +@pytest.mark.valkeymod def test_stringify(): john = node.Node( alias="a", @@ -60,7 +60,7 @@ def test_stringify(): ) -@pytest.mark.redismod +@pytest.mark.valkeymod def test_comparison(): node1 = node.Node(node_id=1) node2 = node.Node(node_id=2) diff --git a/tests/test_graph_utils/test_node.py b/tests/test_graph_utils/test_node.py index 22e6d594..4c23a160 100644 --- a/tests/test_graph_utils/test_node.py +++ b/tests/test_graph_utils/test_node.py @@ -1,5 +1,5 @@ import pytest -from redis.commands.graph import node +from valkey.commands.graph import node @pytest.fixture @@ -12,7 +12,7 @@ def fixture(): return no_args, no_props, props_only, no_label, multi_label -@pytest.mark.redismod +@pytest.mark.valkeymod def test_to_string(fixture): no_args, no_props, props_only, no_label, multi_label = fixture assert no_args.to_string() == "" @@ -22,7 +22,7 @@ def test_to_string(fixture): assert multi_label.to_string() == "" -@pytest.mark.redismod +@pytest.mark.valkeymod def test_stringify(fixture): no_args, no_props, props_only, no_label, multi_label = fixture assert str(no_args) == "()" @@ -32,7 +32,7 @@ def test_stringify(fixture): assert str(multi_label) == "(alias:l:ll)" -@pytest.mark.redismod +@pytest.mark.valkeymod def test_comparison(fixture): no_args, no_props, props_only, no_label, multi_label = fixture diff --git a/tests/test_graph_utils/test_path.py b/tests/test_graph_utils/test_path.py index 1bd38efa..476fc0ea 100644 --- a/tests/test_graph_utils/test_path.py +++ b/tests/test_graph_utils/test_path.py @@ -1,8 +1,8 @@ import pytest -from redis.commands.graph import edge, node, path +from valkey.commands.graph import edge, node, path -@pytest.mark.redismod +@pytest.mark.valkeymod def test_init(): with pytest.raises(TypeError): path.Path(None, None) @@ -12,7 +12,7 @@ def test_init(): assert isinstance(path.Path([], []), path.Path) -@pytest.mark.redismod +@pytest.mark.valkeymod def test_new_empty_path(): new_empty_path = path.Path.new_empty_path() assert isinstance(new_empty_path, path.Path) @@ -20,7 +20,7 @@ def test_new_empty_path(): assert new_empty_path._edges == [] -@pytest.mark.redismod +@pytest.mark.valkeymod def test_wrong_flows(): node_1 = node.Node(node_id=1) node_2 = node.Node(node_id=2) @@ -42,7 +42,7 @@ def test_wrong_flows(): p.add_edge(edge_2) -@pytest.mark.redismod +@pytest.mark.valkeymod def test_nodes_and_edges(): node_1 = node.Node(node_id=1) node_2 = node.Node(node_id=2) @@ -69,7 +69,7 @@ def test_nodes_and_edges(): assert 2 == p.nodes_count() -@pytest.mark.redismod +@pytest.mark.valkeymod def test_compare(): node_1 = node.Node(node_id=1) node_2 = node.Node(node_id=2) diff --git a/tests/test_helpers.py b/tests/test_helpers.py index 66ee1c53..b906f573 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -1,6 +1,6 @@ import string -from redis.commands.helpers import ( +from valkey.commands.helpers import ( delist, list_or_args, nativestr, diff --git a/tests/test_json.py b/tests/test_json.py index 73d72b8c..15de1538 100644 --- a/tests/test_json.py +++ b/tests/test_json.py @@ -1,15 +1,17 @@ import pytest -import redis -from redis import Redis, exceptions -from redis.commands.json.decoders import decode_list, unstring -from redis.commands.json.path import Path +import valkey +from valkey import Valkey, exceptions +from valkey.commands.json.decoders import decode_list, unstring +from valkey.commands.json.path import Path from .conftest import _get_client, assert_resp_response, skip_ifmodversion_lt +pytestmark = pytest.mark.skip + @pytest.fixture def client(request): - r = _get_client(Redis, request, decode_responses=True) + r = _get_client(Valkey, request, decode_responses=True) r.flushdb() return r @@ -169,7 +171,7 @@ def test_toggle(client): assert client.json().toggle("bool", Path.root_path()) is False # check non-boolean value client.json().set("num", Path.root_path(), 1) - with pytest.raises(redis.exceptions.ResponseError): + with pytest.raises(valkey.exceptions.ResponseError): client.json().toggle("num", Path.root_path()) diff --git a/tests/test_lock.py b/tests/test_lock.py index 5c804b42..00c50f54 100644 --- a/tests/test_lock.py +++ b/tests/test_lock.py @@ -1,9 +1,9 @@ import time import pytest -from redis.client import Redis -from redis.exceptions import LockError, LockNotOwnedError -from redis.lock import Lock +from valkey.client import Valkey +from valkey.exceptions import LockError, LockNotOwnedError +from valkey.lock import Lock from .conftest import _get_client @@ -11,11 +11,11 @@ class TestLock: @pytest.fixture() def r_decoded(self, request): - return _get_client(Redis, request=request, decode_responses=True) + return _get_client(Valkey, request=request, decode_responses=True) - def get_lock(self, redis, *args, **kwargs): + def get_lock(self, valkey, *args, **kwargs): kwargs["lock_class"] = Lock - return redis.lock(*args, **kwargs) + return valkey.lock(*args, **kwargs) def test_lock(self, r): lock = self.get_lock(r, "foo") diff --git a/tests/test_monitor.py b/tests/test_monitor.py index 9b07c802..62d05568 100644 --- a/tests/test_monitor.py +++ b/tests/test_monitor.py @@ -1,8 +1,8 @@ import pytest from .conftest import ( - skip_if_redis_enterprise, - skip_ifnot_redis_enterprise, + skip_if_valkey_enterprise, + skip_ifnot_valkey_enterprise, wait_for_command, ) @@ -47,7 +47,7 @@ def test_command_with_escaped_data(self, r): response = wait_for_command(r, m, "GET foo\\\\x92") assert response["command"] == "GET foo\\\\x92" - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_lua_script(self, r): with r.monitor() as m: script = 'return redis.call("GET", "foo")' @@ -58,7 +58,7 @@ def test_lua_script(self, r): assert response["client_address"] == "lua" assert response["client_port"] == "" - @skip_ifnot_redis_enterprise() + @skip_ifnot_valkey_enterprise() def test_lua_script_in_enterprise(self, r): with r.monitor() as m: script = 'return redis.call("GET", "foo")' diff --git a/tests/test_multiprocessing.py b/tests/test_multiprocessing.py index 5cda3190..a749246c 100644 --- a/tests/test_multiprocessing.py +++ b/tests/test_multiprocessing.py @@ -2,9 +2,9 @@ import multiprocessing import pytest -import redis -from redis.connection import Connection, ConnectionPool -from redis.exceptions import ConnectionError +import valkey +from valkey.connection import Connection, ConnectionPool +from valkey.exceptions import ConnectionError from .conftest import _get_client @@ -25,7 +25,9 @@ class TestMultiprocessing: # actually fork/process-safe @pytest.fixture() def r(self, request): - return _get_client(redis.Redis, request=request, single_connection_client=False) + return _get_client( + valkey.Valkey, request=request, single_connection_client=False + ) def test_close_connection_in_child(self, master_host): """ @@ -87,7 +89,7 @@ def test_pool(self, max_connections, master_host): by a parent. """ pool = ConnectionPool.from_url( - f"redis://{master_host[0]}:{master_host[1]}", + f"valkey://{master_host[0]}:{master_host[1]}", max_connections=max_connections, ) @@ -124,7 +126,7 @@ def test_close_pool_in_main(self, max_connections, master_host): when the parent disconnects all connections within the pool. """ pool = ConnectionPool.from_url( - f"redis://{master_host[0]}:{master_host[1]}", + f"valkey://{master_host[0]}:{master_host[1]}", max_connections=max_connections, ) @@ -151,8 +153,8 @@ def target(pool, disconnect_event): proc.join(3) assert proc.exitcode == 0 - def test_redis_client(self, r): - "A redis client created in a parent can also be used in a child" + def test_valkey_client(self, r): + "A valkey client created in a parent can also be used in a child" assert r.ping() is True def target(client): diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 7f10fcad..065f898c 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -2,7 +2,7 @@ from unittest import mock import pytest -import redis +import valkey from .conftest import skip_if_server_version_lt, wait_for_command @@ -81,7 +81,7 @@ def test_pipeline_no_transaction_watch_failure(self, r): pipe.multi() pipe.set("a", int(a) + 1) - with pytest.raises(redis.WatchError): + with pytest.raises(valkey.WatchError): pipe.execute() assert r["a"] == b"bad" @@ -103,7 +103,7 @@ def test_exec_error_in_response(self, r): # we can't lpush to a key that's a string value, so this should # be a ResponseError exception - assert isinstance(result[2], redis.ResponseError) + assert isinstance(result[2], valkey.ResponseError) assert r["c"] == b"a" # since this isn't a transaction, the other commands after the @@ -119,7 +119,7 @@ def test_exec_error_raised(self, r): r["c"] = "a" with r.pipeline() as pipe: pipe.set("a", 1).set("b", 2).lpush("c", 3).set("d", 4) - with pytest.raises(redis.ResponseError) as ex: + with pytest.raises(valkey.ResponseError) as ex: pipe.execute() assert str(ex.value).startswith( "Command # 3 (LPUSH c 3) of pipeline caused error: " @@ -163,7 +163,7 @@ def test_parse_error_raised(self, r): with r.pipeline() as pipe: # the zrem is invalid because we don't pass any keys to it pipe.set("a", 1).zrem("b").set("b", 2) - with pytest.raises(redis.ResponseError) as ex: + with pytest.raises(valkey.ResponseError) as ex: pipe.execute() assert str(ex.value).startswith( @@ -180,7 +180,7 @@ def test_parse_error_raised_transaction(self, r): pipe.multi() # the zrem is invalid because we don't pass any keys to it pipe.set("a", 1).zrem("b").set("b", 2) - with pytest.raises(redis.ResponseError) as ex: + with pytest.raises(valkey.ResponseError) as ex: pipe.execute() assert str(ex.value).startswith( @@ -219,7 +219,7 @@ def test_watch_failure(self, r): r["b"] = 3 pipe.multi() pipe.get("a") - with pytest.raises(redis.WatchError): + with pytest.raises(valkey.WatchError): pipe.execute() assert not pipe.watching @@ -233,7 +233,7 @@ def test_watch_failure_in_empty_transaction(self, r): pipe.watch("a", "b") r["b"] = 3 pipe.multi() - with pytest.raises(redis.WatchError): + with pytest.raises(valkey.WatchError): pipe.execute() assert not pipe.watching @@ -345,7 +345,7 @@ def test_exec_error_in_no_transaction_pipeline(self, r): pipe.llen("a") pipe.expire("a", 100) - with pytest.raises(redis.ResponseError) as ex: + with pytest.raises(valkey.ResponseError) as ex: pipe.execute() assert str(ex.value).startswith( @@ -361,7 +361,7 @@ def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): pipe.llen(key) pipe.expire(key, 100) - with pytest.raises(redis.ResponseError) as ex: + with pytest.raises(valkey.ResponseError) as ex: pipe.execute() expected = f"Command # 1 (LLEN {key}) of pipeline caused error: " @@ -394,7 +394,7 @@ def test_pipeline_discard(self, r): with r.pipeline() as pipe: pipe.set("key", "someval") pipe.discard() - with pytest.raises(redis.exceptions.ResponseError): + with pytest.raises(valkey.exceptions.ResponseError): pipe.execute() # setting a pipeline and discarding should do the same @@ -405,7 +405,7 @@ def test_pipeline_discard(self, r): pipe.set("key", "another value!") pipe.discard() pipe.set("key", "another vae!") - with pytest.raises(redis.exceptions.ResponseError): + with pytest.raises(valkey.exceptions.ResponseError): pipe.execute() pipe.set("foo", "bar") diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index fb46772a..84bcd2a5 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -8,15 +8,15 @@ from unittest.mock import patch import pytest -import redis -from redis.exceptions import ConnectionError -from redis.utils import HIREDIS_AVAILABLE +import valkey +from valkey.exceptions import ConnectionError +from valkey.utils import HIREDIS_AVAILABLE from .conftest import ( _get_client, is_resp2_connection, - skip_if_redis_enterprise, skip_if_server_version_lt, + skip_if_valkey_enterprise, ) @@ -378,7 +378,7 @@ def test_sub_unsub_resub_shard_channels(self, r): def _test_sub_unsub_resub( self, p, sub_type, unsub_type, sub_func, unsub_func, keys ): - # https://github.com/andymccurdy/redis-py/issues/764 + # https://github.com/andymccurdy/valkey-py/issues/764 key = keys[0] sub_func(key) unsub_func(key) @@ -426,7 +426,7 @@ def test_sub_unsub_all_resub_shard_channels(self, r): def _test_sub_unsub_all_resub( self, p, sub_type, unsub_type, sub_func, unsub_func, keys ): - # https://github.com/andymccurdy/redis-py/issues/764 + # https://github.com/andymccurdy/valkey-py/issues/764 key = keys[0] sub_func(key) unsub_func() @@ -574,7 +574,7 @@ def test_unicode_shard_channel_message_handler(self, r): assert self.message == make_message("smessage", channel, "test message") @pytest.mark.onlynoncluster - # see: https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html + # see: https://valkey-py-cluster.readthedocs.io/en/stable/pubsub.html # #known-limitations-with-pubsub def test_unicode_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) @@ -637,7 +637,7 @@ def message_handler(self, message): @pytest.fixture() def r(self, request): - return _get_client(redis.Redis, request=request, decode_responses=True) + return _get_client(valkey.Valkey, request=request, decode_responses=True) def test_channel_subscribe_unsubscribe(self, r): p = r.pubsub() @@ -771,9 +771,9 @@ def test_context_manager(self, r): assert pubsub.patterns == {} -class TestPubSubRedisDown: +class TestPubSubValkeyDown: def test_channel_subscribe(self, r): - r = redis.Redis(host="localhost", port=6390) + r = valkey.Valkey(host="localhost", port=6390) p = r.pubsub() with pytest.raises(ConnectionError): p.subscribe("foo") @@ -898,7 +898,7 @@ def test_send_pubsub_ping_message(self, r): @pytest.mark.onlynoncluster class TestPubSubConnectionKilled: @skip_if_server_version_lt("3.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_connection_error_raised_when_connection_dies(self, r): p = r.pubsub() p.subscribe("foo") @@ -977,8 +977,8 @@ def exception_handler(ex, pubsub, thread): class TestPubSubDeadlock: @pytest.mark.timeout(30, method="thread") def test_pubsub_deadlock(self, master_host): - pool = redis.ConnectionPool(host=master_host[0], port=master_host[1]) - r = redis.Redis(connection_pool=pool) + pool = valkey.ConnectionPool(host=master_host[0], port=master_host[1]) + r = valkey.Valkey(connection_pool=pool) for i in range(60): p = r.pubsub() @@ -1032,7 +1032,7 @@ def mycleanup(self): self.cond.notify() self.thread.join() - def test_reconnect_socket_error(self, r: redis.Redis, method): + def test_reconnect_socket_error(self, r: valkey.Valkey, method): """ Test that a socket error will cause reconnect """ @@ -1054,7 +1054,7 @@ def test_reconnect_socket_error(self, r: redis.Redis, method): finally: self.mycleanup() - def test_reconnect_disconnect(self, r: redis.Redis, method): + def test_reconnect_disconnect(self, r: valkey.Valkey, method): """ Test that a manual disconnect() will cause reconnect """ @@ -1086,7 +1086,7 @@ def loop(self): assert got_msg if self.state in (1, 2): self.state = 3 # successful reconnect - except redis.ConnectionError: + except valkey.ConnectionError: assert self.state in (1, 2) self.state = 2 finally: @@ -1113,7 +1113,7 @@ def loop_step_listen(self): @pytest.mark.onlynoncluster class TestBaseException: - def test_base_exception(self, r: redis.Redis): + def test_base_exception(self, r: valkey.Valkey): """ Manually trigger a BaseException inside the parser's .read_response method and verify that it isn't caught @@ -1141,9 +1141,9 @@ def get_msg(): assert msg is not None # timeout waiting for another message which never arrives assert is_connected() - with patch("redis._parsers._RESP2Parser.read_response") as mock1, patch( - "redis._parsers._HiredisParser.read_response" - ) as mock2, patch("redis._parsers._RESP3Parser.read_response") as mock3: + with patch("valkey._parsers._RESP2Parser.read_response") as mock1, patch( + "valkey._parsers._HiredisParser.read_response" + ) as mock2, patch("valkey._parsers._RESP3Parser.read_response") as mock3: mock1.side_effect = BaseException("boom") mock2.side_effect = BaseException("boom") mock3.side_effect = BaseException("boom") diff --git a/tests/test_retry.py b/tests/test_retry.py index e9d30158..3b757822 100644 --- a/tests/test_retry.py +++ b/tests/test_retry.py @@ -1,16 +1,16 @@ from unittest.mock import patch import pytest -from redis.backoff import ExponentialBackoff, NoBackoff -from redis.client import Redis -from redis.connection import Connection, UnixDomainSocketConnection -from redis.exceptions import ( +from valkey.backoff import ExponentialBackoff, NoBackoff +from valkey.client import Valkey +from valkey.connection import Connection, UnixDomainSocketConnection +from valkey.exceptions import ( BusyLoadingError, ConnectionError, ReadOnlyError, TimeoutError, ) -from redis.retry import Retry +from valkey.retry import Retry from .conftest import _get_client @@ -123,11 +123,11 @@ def test_infinite_retry(self): @pytest.mark.onlynoncluster -class TestRedisClientRetry: - "Test the standalone Redis client behavior with retries" +class TestValkeyClientRetry: + "Test the standalone Valkey client behavior with retries" def test_client_retry_on_error_with_success(self, request): - with patch.object(Redis, "parse_response") as parse_response: + with patch.object(Valkey, "parse_response") as parse_response: def mock_parse_response(connection, *args, **options): def ok_response(connection, *args, **options): @@ -137,16 +137,16 @@ def ok_response(connection, *args, **options): raise ReadOnlyError() parse_response.side_effect = mock_parse_response - r = _get_client(Redis, request, retry_on_error=[ReadOnlyError]) + r = _get_client(Valkey, request, retry_on_error=[ReadOnlyError]) assert r.get("foo") == "MOCK_OK" assert parse_response.call_count == 2 def test_client_retry_on_error_raise(self, request): - with patch.object(Redis, "parse_response") as parse_response: + with patch.object(Valkey, "parse_response") as parse_response: parse_response.side_effect = BusyLoadingError() retries = 3 r = _get_client( - Redis, + Valkey, request, retry_on_error=[ReadOnlyError, BusyLoadingError], retry=Retry(NoBackoff(), retries), @@ -158,11 +158,11 @@ def test_client_retry_on_error_raise(self, request): assert parse_response.call_count == retries + 1 def test_client_retry_on_error_different_error_raised(self, request): - with patch.object(Redis, "parse_response") as parse_response: + with patch.object(Valkey, "parse_response") as parse_response: parse_response.side_effect = TimeoutError() retries = 3 r = _get_client( - Redis, + Valkey, request, retry_on_error=[ReadOnlyError], retry=Retry(NoBackoff(), retries), @@ -174,11 +174,11 @@ def test_client_retry_on_error_different_error_raised(self, request): assert parse_response.call_count == 1 def test_client_retry_on_error_and_timeout(self, request): - with patch.object(Redis, "parse_response") as parse_response: + with patch.object(Valkey, "parse_response") as parse_response: parse_response.side_effect = TimeoutError() retries = 3 r = _get_client( - Redis, + Valkey, request, retry_on_error=[ReadOnlyError], retry_on_timeout=True, @@ -191,11 +191,14 @@ def test_client_retry_on_error_and_timeout(self, request): assert parse_response.call_count == retries + 1 def test_client_retry_on_timeout(self, request): - with patch.object(Redis, "parse_response") as parse_response: + with patch.object(Valkey, "parse_response") as parse_response: parse_response.side_effect = TimeoutError() retries = 3 r = _get_client( - Redis, request, retry_on_timeout=True, retry=Retry(NoBackoff(), retries) + Valkey, + request, + retry_on_timeout=True, + retry=Retry(NoBackoff(), retries), ) with pytest.raises(TimeoutError): try: @@ -205,7 +208,7 @@ def test_client_retry_on_timeout(self, request): def test_get_set_retry_object(self, request): retry = Retry(NoBackoff(), 2) - r = _get_client(Redis, request, retry_on_timeout=True, retry=retry) + r = _get_client(Valkey, request, retry_on_timeout=True, retry=retry) exist_conn = r.connection_pool.get_connection("_") assert r.get_retry()._retries == retry._retries assert isinstance(r.get_retry()._backoff, NoBackoff) diff --git a/tests/test_scripting.py b/tests/test_scripting.py index 899dc694..74108c61 100644 --- a/tests/test_scripting.py +++ b/tests/test_scripting.py @@ -1,8 +1,8 @@ import pytest -import redis -from redis import exceptions -from redis.commands.core import Script -from tests.conftest import skip_if_redis_enterprise, skip_if_server_version_lt +import valkey +from tests.conftest import skip_if_server_version_lt, skip_if_valkey_enterprise +from valkey import exceptions +from valkey.commands.core import Script multiply_script = """ local value = redis.call('GET', KEYS[1]) @@ -65,11 +65,11 @@ def test_eval_multiply(self, r): assert r.eval(multiply_script, 1, "a", 3) == 6 @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_eval_ro(self, r): r.set("a", "b") assert r.eval_ro("return redis.call('GET', KEYS[1])", 1, "a") == b"b" - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): r.eval_ro("return redis.call('DEL', KEYS[1])", 1, "a") def test_eval_msgpack(self, r): @@ -79,9 +79,9 @@ def test_eval_msgpack(self, r): def test_eval_same_slot(self, r): """ - In a clustered redis, the script keys must be in the same slot. + In a clustered valkey, the script keys must be in the same slot. - This test isn't very interesting for standalone redis, but it doesn't + This test isn't very interesting for standalone valkey, but it doesn't hurt anything. """ r.set("A{foo}", 2) @@ -99,7 +99,7 @@ def test_eval_same_slot(self, r): @pytest.mark.onlycluster def test_eval_crossslot(self, r): """ - In a clustered redis, the script keys must be in the same slot. + In a clustered valkey, the script keys must be in the same slot. This test should fail, because the two keys we send are in different slots. This test assumes that {foo} and {bar} will not go to the same @@ -115,7 +115,7 @@ def test_eval_crossslot(self, r): local value2 = redis.call('GET', KEYS[2]) return value * value2 """ - with pytest.raises(exceptions.RedisClusterException): + with pytest.raises(exceptions.ValkeyClusterException): r.eval(script, 2, "A{foo}", "B{bar}") @skip_if_server_version_lt("6.2.0") @@ -154,19 +154,19 @@ def test_evalsha(self, r): assert r.evalsha(sha, 1, "a", 3) == 6 @skip_if_server_version_lt("7.0.0") - @skip_if_redis_enterprise() + @skip_if_valkey_enterprise() def test_evalsha_ro(self, r): r.set("a", "b") get_sha = r.script_load("return redis.call('GET', KEYS[1])") del_sha = r.script_load("return redis.call('DEL', KEYS[1])") assert r.evalsha_ro(get_sha, 1, "a") == b"b" - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): r.evalsha_ro(del_sha, 1, "a") def test_evalsha_script_not_loaded(self, r): r.set("a", 2) sha = r.script_load(multiply_script) - # remove the script from Redis's cache + # remove the script from Valkey's cache r.script_flush() with pytest.raises(exceptions.NoScriptError): r.evalsha(sha, 1, "a", 3) @@ -194,7 +194,7 @@ def test_script_object(self, r): assert multiply(keys=["a"], args=[3]) == 6 # At this point, the script should be loaded assert r.script_exists(multiply.sha) == [True] - # Test that the precalculated sha matches the one from redis + # Test that the precalculated sha matches the one from valkey assert multiply.sha == precalculated_sha # Test first evalsha block assert multiply(keys=["a"], args=[3]) == 6 @@ -217,7 +217,7 @@ def test_script_object_in_pipeline(self, r): # The precalculated sha should have been the correct one assert multiply.sha == precalculated_sha - # purge the script from redis's cache and re-run the pipeline + # purge the script from valkey's cache and re-run the pipeline # the multiply script should be reloaded by pipe.execute() r.script_flush() pipe = r.pipeline() diff --git a/tests/test_search.py b/tests/test_search.py index bfe20425..f14eb4a6 100644 --- a/tests/test_search.py +++ b/tests/test_search.py @@ -5,13 +5,13 @@ from io import TextIOWrapper import pytest -import redis -import redis.commands.search -import redis.commands.search.aggregation as aggregations -import redis.commands.search.reducers as reducers -from redis.commands.json.path import Path -from redis.commands.search import Search -from redis.commands.search.field import ( +import valkey +import valkey.commands.search +import valkey.commands.search.aggregation as aggregations +import valkey.commands.search.reducers as reducers +from valkey.commands.json.path import Path +from valkey.commands.search import Search +from valkey.commands.search.field import ( GeoField, GeoShapeField, NumericField, @@ -19,19 +19,21 @@ TextField, VectorField, ) -from redis.commands.search.indexDefinition import IndexDefinition, IndexType -from redis.commands.search.query import GeoFilter, NumericFilter, Query -from redis.commands.search.result import Result -from redis.commands.search.suggestion import Suggestion +from valkey.commands.search.indexDefinition import IndexDefinition, IndexType +from valkey.commands.search.query import GeoFilter, NumericFilter, Query +from valkey.commands.search.result import Result +from valkey.commands.search.suggestion import Suggestion from .conftest import ( _get_client, assert_resp_response, is_resp2_connection, - skip_if_redis_enterprise, + skip_if_valkey_enterprise, skip_ifmodversion_lt, ) +pytestmark = pytest.mark.skip + WILL_PLAY_TEXT = os.path.abspath( os.path.join(os.path.dirname(__file__), "testdata", "will_play_text.csv.bz2") ) @@ -78,7 +80,7 @@ def createIndex(client, num_docs=100, definition=None): (TextField("play", weight=5.0), TextField("txt"), NumericField("chapter")), definition=definition, ) - except redis.ResponseError: + except valkey.ResponseError: client.dropindex(delete_documents=True) return createIndex(client, num_docs=num_docs, definition=definition) @@ -108,12 +110,12 @@ def createIndex(client, num_docs=100, definition=None): @pytest.fixture def client(request): - r = _get_client(redis.Redis, request, decode_responses=True) + r = _get_client(valkey.Valkey, request, decode_responses=True) r.flushdb() return r -@pytest.mark.redismod +@pytest.mark.valkeymod def test_client(client): num_docs = 500 createIndex(client.ft(), num_docs=num_docs) @@ -309,7 +311,7 @@ def test_client(client): client.ft().delete_document("doc-5ghs2") -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster def test_scores(client): client.ft().create_index((TextField("txt"),)) @@ -331,7 +333,7 @@ def test_scores(client): assert "doc1" == res["results"][1]["id"] -@pytest.mark.redismod +@pytest.mark.valkeymod def test_stopwords(client): client.ft().create_index((TextField("txt"),), stopwords=["foo", "bar", "baz"]) client.hset("doc1", mapping={"txt": "foo bar"}) @@ -349,7 +351,7 @@ def test_stopwords(client): assert 1 == res2["total_results"] -@pytest.mark.redismod +@pytest.mark.valkeymod def test_filters(client): client.ft().create_index((TextField("txt"), NumericField("num"), GeoField("loc"))) client.hset( @@ -402,7 +404,7 @@ def test_filters(client): assert ["doc1", "doc2"] == res -@pytest.mark.redismod +@pytest.mark.valkeymod def test_sort_by(client): client.ft().create_index((TextField("txt"), NumericField("num", sortable=True))) client.hset("doc1", mapping={"txt": "foo bar", "num": 1}) @@ -434,7 +436,7 @@ def test_sort_by(client): assert "doc3" == res2["results"][0]["id"] -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.0.0", "search") def test_drop_index(client): """ @@ -453,7 +455,7 @@ def test_drop_index(client): assert i == keep_docs[1] -@pytest.mark.redismod +@pytest.mark.valkeymod def test_example(client): # Creating the index definition and schema client.ft().create_index((TextField("title", weight=5.0), TextField("body"))) @@ -463,7 +465,7 @@ def test_example(client): "doc1", mapping={ "title": "RediSearch", - "body": "Redisearch impements a search engine on top of redis", + "body": "RediSearch impements a search engine on top of valkey", }, ) @@ -474,8 +476,8 @@ def test_example(client): assert res is not None -@pytest.mark.redismod -@skip_if_redis_enterprise() +@pytest.mark.valkeymod +@skip_if_valkey_enterprise() def test_auto_complete(client): n = 0 with open(TITLES_CSV) as f: @@ -524,7 +526,7 @@ def test_auto_complete(client): assert sug.payload.startswith("pl") -@pytest.mark.redismod +@pytest.mark.valkeymod def test_no_index(client): client.ft().create_index( ( @@ -602,20 +604,20 @@ def test_no_index(client): TagField("name", no_index=True, sortable=False) -@pytest.mark.redismod +@pytest.mark.valkeymod def test_explain(client): client.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3"))) res = client.ft().explain("@f3:f3_val @f2:f2_val @f1:f1_val") assert res -@pytest.mark.redismod +@pytest.mark.valkeymod def test_explaincli(client): with pytest.raises(NotImplementedError): client.ft().explain_cli("foo") -@pytest.mark.redismod +@pytest.mark.valkeymod def test_summarize(client): createIndex(client.ft()) waitForIndex(client, getattr(client.ft(), "index_name", "idx")) @@ -658,7 +660,7 @@ def test_summarize(client): ) -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.0.0", "search") def test_alias(client): index1 = getClient(client) @@ -721,7 +723,7 @@ def test_alias(client): alias_client2.search("*").docs[0] -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.xfail(strict=False) def test_alias_basic(client): # Creating a client with one index @@ -770,7 +772,7 @@ def test_alias_basic(client): _ = alias_client2.search("*").docs[0] -@pytest.mark.redismod +@pytest.mark.valkeymod def test_textfield_sortable_nostem(client): # Creating the index definition with sortable and no_stem client.ft().create_index((TextField("txt", sortable=True, no_stem=True),)) @@ -785,7 +787,7 @@ def test_textfield_sortable_nostem(client): assert "NOSTEM" in response["attributes"][0]["flags"] -@pytest.mark.redismod +@pytest.mark.valkeymod def test_alter_schema_add(client): # Creating the index definition and schema client.ft().create_index(TextField("title")) @@ -809,7 +811,7 @@ def test_alter_schema_add(client): assert 1 == res["total_results"] -@pytest.mark.redismod +@pytest.mark.valkeymod def test_spell_check(client): client.ft().create_index((TextField("f1"), TextField("f2"))) @@ -878,7 +880,7 @@ def test_spell_check(client): assert res == {"results": {}} -@pytest.mark.redismod +@pytest.mark.valkeymod def test_dict_operations(client): client.ft().create_index((TextField("f1"), TextField("f2"))) # Add three items @@ -897,7 +899,7 @@ def test_dict_operations(client): client.ft().dict_del("custom_dict", *res) -@pytest.mark.redismod +@pytest.mark.valkeymod def test_phonetic_matcher(client): client.ft().create_index((TextField("name"),)) client.hset("doc1", mapping={"name": "Jon"}) @@ -929,7 +931,7 @@ def test_phonetic_matcher(client): ) -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster def test_scorer(client): client.ft().create_index((TextField("description"),)) @@ -977,7 +979,7 @@ def test_scorer(client): assert 0.0 == res["results"][0]["score"] -@pytest.mark.redismod +@pytest.mark.valkeymod def test_get(client): client.ft().create_index((TextField("f1"), TextField("f2"))) @@ -1000,12 +1002,12 @@ def test_get(client): ] == client.ft().get("doc1", "doc2") -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster @skip_ifmodversion_lt("2.2.0", "search") def test_config(client): assert client.ft().config_set("TIMEOUT", "100") - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): client.ft().config_set("TIMEOUT", "null") res = client.ft().config_get("*") assert "100" == res["TIMEOUT"] @@ -1013,7 +1015,7 @@ def test_config(client): assert "100" == res["TIMEOUT"] -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster def test_aggregations_groupby(client): # Creating the index definition and schema @@ -1031,8 +1033,8 @@ def test_aggregations_groupby(client): "search", mapping={ "title": "RediSearch", - "body": "Redisearch impements a search engine on top of redis", - "parent": "redis", + "body": "RediSearch impements a search engine on top of valkey", + "parent": "valkey", "random_num": 10, }, ) @@ -1041,7 +1043,7 @@ def test_aggregations_groupby(client): mapping={ "title": "RedisAI", "body": "RedisAI executes Deep Learning/Machine Learning models and managing their data.", # noqa - "parent": "redis", + "parent": "valkey", "random_num": 3, }, ) @@ -1050,210 +1052,210 @@ def test_aggregations_groupby(client): mapping={ "title": "RedisJson", "body": "RedisJSON implements ECMA-404 The JSON Data Interchange Standard as a native data type.", # noqa - "parent": "redis", + "parent": "valkey", "random_num": 8, }, ) if is_resp2_connection(client): - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.count() ) res = client.ft().aggregate(req).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "3" - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.count_distinct("@title") ) res = client.ft().aggregate(req).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "3" - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.count_distinctish("@title") ) res = client.ft().aggregate(req).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "3" - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.sum("@random_num") ) res = client.ft().aggregate(req).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "21" # 10+8+3 - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.min("@random_num") ) res = client.ft().aggregate(req).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "3" # min(10,8,3) - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.max("@random_num") ) res = client.ft().aggregate(req).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "10" # max(10,8,3) - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.avg("@random_num") ) res = client.ft().aggregate(req).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" index = res.index("__generated_aliasavgrandom_num") assert res[index + 1] == "7" # (10+3+8)/3 - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.stddev("random_num") ) res = client.ft().aggregate(req).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "3.60555127546" - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.quantile("@random_num", 0.5) ) res = client.ft().aggregate(req).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[3] == "8" # median of 3,8,10 - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.tolist("@title") ) res = client.ft().aggregate(req).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert set(res[3]) == {"RediSearch", "RedisAI", "RedisJson"} - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.first_value("@title").alias("first") ) res = client.ft().aggregate(req).rows[0] - assert res == ["parent", "redis", "first", "RediSearch"] + assert res == ["parent", "valkey", "first", "RediSearch"] - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.random_sample("@title", 2).alias("random") ) res = client.ft().aggregate(req).rows[0] - assert res[1] == "redis" + assert res[1] == "valkey" assert res[2] == "random" assert len(res[3]) == 2 assert res[3][0] in ["RediSearch", "RedisAI", "RedisJson"] else: - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.count() ) res = client.ft().aggregate(req)["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliascount"] == "3" - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.count_distinct("@title") ) res = client.ft().aggregate(req)["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliascount_distincttitle"] == "3" - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.count_distinctish("@title") ) res = client.ft().aggregate(req)["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliascount_distinctishtitle"] == "3" - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.sum("@random_num") ) res = client.ft().aggregate(req)["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliassumrandom_num"] == "21" - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.min("@random_num") ) res = client.ft().aggregate(req)["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliasminrandom_num"] == "3" - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.max("@random_num") ) res = client.ft().aggregate(req)["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliasmaxrandom_num"] == "10" - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.avg("@random_num") ) res = client.ft().aggregate(req)["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliasavgrandom_num"] == "7" - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.stddev("random_num") ) res = client.ft().aggregate(req)["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert ( res["extra_attributes"]["__generated_aliasstddevrandom_num"] == "3.60555127546" ) - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.quantile("@random_num", 0.5) ) res = client.ft().aggregate(req)["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert res["extra_attributes"]["__generated_aliasquantilerandom_num,0.5"] == "8" - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.tolist("@title") ) res = client.ft().aggregate(req)["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert set(res["extra_attributes"]["__generated_aliastolisttitle"]) == { "RediSearch", "RedisAI", "RedisJson", } - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.first_value("@title").alias("first") ) res = client.ft().aggregate(req)["results"][0] - assert res["extra_attributes"] == {"parent": "redis", "first": "RediSearch"} + assert res["extra_attributes"] == {"parent": "valkey", "first": "RediSearch"} - req = aggregations.AggregateRequest("redis").group_by( + req = aggregations.AggregateRequest("valkey").group_by( "@parent", reducers.random_sample("@title", 2).alias("random") ) res = client.ft().aggregate(req)["results"][0] - assert res["extra_attributes"]["parent"] == "redis" + assert res["extra_attributes"]["parent"] == "valkey" assert "random" in res["extra_attributes"].keys() assert len(res["extra_attributes"]["random"]) == 2 assert res["extra_attributes"]["random"][0] in [ @@ -1263,7 +1265,7 @@ def test_aggregations_groupby(client): ] -@pytest.mark.redismod +@pytest.mark.valkeymod def test_aggregations_sort_by_and_limit(client): client.ft().create_index((TextField("t1"), TextField("t2"))) @@ -1322,7 +1324,7 @@ def test_aggregations_sort_by_and_limit(client): assert res["results"][0]["extra_attributes"] == {"t1": "b"} -@pytest.mark.redismod +@pytest.mark.valkeymod def test_aggregations_load(client): client.ft().create_index((TextField("t1"), TextField("t2"))) @@ -1360,7 +1362,7 @@ def test_aggregations_load(client): assert res["results"][0]["extra_attributes"] == {"t1": "hello", "t2": "world"} -@pytest.mark.redismod +@pytest.mark.valkeymod def test_aggregations_apply(client): client.ft().create_index( ( @@ -1395,7 +1397,7 @@ def test_aggregations_apply(client): assert res_set == set(["6373878785249699840", "6373878758592700416"]) -@pytest.mark.redismod +@pytest.mark.valkeymod def test_aggregations_filter(client): client.ft().create_index( (TextField("name", sortable=True), NumericField("age", sortable=True)) @@ -1441,7 +1443,7 @@ def test_aggregations_filter(client): assert res["results"][1]["extra_attributes"] == {"age": "25"} -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.0.0", "search") def test_index_definition(client): """ @@ -1485,9 +1487,9 @@ def test_index_definition(client): createIndex(client.ft(), num_docs=500, definition=definition) -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster -@skip_if_redis_enterprise() +@skip_if_valkey_enterprise() def test_expire(client): client.ft().create_index((TextField("txt", sortable=True),), temporary=4) ttl = client.execute_command("ft.debug", "TTL", "idx") @@ -1498,7 +1500,7 @@ def test_expire(client): time.sleep(0.01) -@pytest.mark.redismod +@pytest.mark.valkeymod def test_skip_initial_scan(client): client.hset("doc1", "foo", "bar") q = Query("@foo:bar") @@ -1511,7 +1513,7 @@ def test_skip_initial_scan(client): assert res["total_results"] == 0 -@pytest.mark.redismod +@pytest.mark.valkeymod def test_summarize_disabled_nooffset(client): client.ft().create_index((TextField("txt"),), no_term_offsets=True) client.hset("doc1", mapping={"txt": "foo bar"}) @@ -1519,7 +1521,7 @@ def test_summarize_disabled_nooffset(client): client.ft().search(Query("foo").summarize(fields=["txt"])) -@pytest.mark.redismod +@pytest.mark.valkeymod def test_summarize_disabled_nohl(client): client.ft().create_index((TextField("txt"),), no_highlight=True) client.hset("doc1", mapping={"txt": "foo bar"}) @@ -1527,7 +1529,7 @@ def test_summarize_disabled_nohl(client): client.ft().search(Query("foo").summarize(fields=["txt"])) -@pytest.mark.redismod +@pytest.mark.valkeymod def test_max_text_fields(client): # Creating the index definition client.ft().create_index((TextField("f0"),)) @@ -1535,7 +1537,7 @@ def test_max_text_fields(client): client.ft().alter_schema_add((TextField(f"f{x}"),)) # Should be too many indexes - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): client.ft().alter_schema_add((TextField(f"f{x}"),)) client.ft().dropindex("idx") @@ -1546,7 +1548,7 @@ def test_max_text_fields(client): client.ft().alter_schema_add((TextField(f"f{x}"),)) -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.0.0", "search") def test_create_client_definition(client): """ @@ -1564,7 +1566,7 @@ def test_create_client_definition(client): assert 495 == int(info["num_docs"]) -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.0.0", "search") def test_create_client_definition_hash(client): """ @@ -1582,7 +1584,7 @@ def test_create_client_definition_hash(client): assert 495 == int(info["num_docs"]) -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.2.0", "search") def test_create_client_definition_json(client): """ @@ -1607,7 +1609,7 @@ def test_create_client_definition_json(client): assert res["total_results"] == 1 -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.2.0", "search") def test_fields_as_name(client): # create index @@ -1635,7 +1637,7 @@ def test_fields_as_name(client): assert "25" == res["results"][0]["extra_attributes"]["just_a_number"] -@pytest.mark.redismod +@pytest.mark.valkeymod def test_casesensitive(client): # create index SCHEMA = (TagField("t", case_sensitive=False),) @@ -1669,7 +1671,7 @@ def test_casesensitive(client): assert "1" == res["results"][0]["id"] -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.2.0", "search") def test_search_return_fields(client): res = client.json().set( @@ -1707,7 +1709,7 @@ def test_search_return_fields(client): assert "telmatosaurus" == total["results"][0]["extra_attributes"]["txt"] -@pytest.mark.redismod +@pytest.mark.valkeymod def test_synupdate(client): definition = IndexDefinition(index_type=IndexType.HASH) client.ft().create_index( @@ -1731,7 +1733,7 @@ def test_synupdate(client): assert res["results"][0]["extra_attributes"]["body"] == "another test" -@pytest.mark.redismod +@pytest.mark.valkeymod def test_syndump(client): definition = IndexDefinition(index_type=IndexType.HASH) client.ft().create_index( @@ -1752,7 +1754,7 @@ def test_syndump(client): } -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.2.0", "search") def test_create_json_with_alias(client): """ @@ -1797,7 +1799,7 @@ def test_create_json_with_alias(client): client.ft().search("@$.name:henry") -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.2.0", "search") def test_json_with_multipath(client): """ @@ -1841,7 +1843,7 @@ def test_json_with_multipath(client): assert res["total_results"] == 1 -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.2.0", "search") def test_json_with_jsonpath(client): definition = IndexDefinition(index_type=IndexType.JSON) @@ -1891,9 +1893,9 @@ def test_json_with_jsonpath(client): assert res["results"][0]["extra_attributes"]["name"] == "RediSearch" -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster -@skip_if_redis_enterprise() +@skip_if_valkey_enterprise() def test_profile(client): client.ft().create_index((TextField("t"),)) client.ft().client.hset("1", "t", "hello") @@ -1940,7 +1942,7 @@ def test_profile(client): assert len(res["results"]) == 2 # check also the search result -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster def test_profile_limited(client): client.ft().create_index((TextField("t"),)) @@ -1977,7 +1979,7 @@ def test_profile_limited(client): assert len(res["results"]) == 3 # check also the search result -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.4.3", "search") def test_profile_query_params(client): client.ft().create_index( @@ -2008,7 +2010,7 @@ def test_profile_query_params(client): assert "0" == res["results"][0]["extra_attributes"]["__v_score"] -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.4.3", "search") def test_vector_field(client): client.flushdb() @@ -2035,7 +2037,7 @@ def test_vector_field(client): assert "0" == res["results"][0]["extra_attributes"]["__v_score"] -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.4.3", "search") def test_vector_field_error(r): r.flushdb() @@ -2049,7 +2051,7 @@ def test_vector_field_error(r): r.ft().create_index((VectorField("v", "SORT", {}),)) -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.4.3", "search") def test_text_params(client): client.flushdb() @@ -2072,7 +2074,7 @@ def test_text_params(client): assert "doc2" == res["results"][1]["id"] -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.4.3", "search") def test_numeric_params(client): client.flushdb() @@ -2096,7 +2098,7 @@ def test_numeric_params(client): assert "doc2" == res["results"][1]["id"] -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.4.3", "search") def test_geo_params(client): client.ft().create_index((GeoField("g"))) @@ -2119,8 +2121,8 @@ def test_geo_params(client): assert "doc3" == res["results"][2]["id"] -@pytest.mark.redismod -@skip_if_redis_enterprise() +@pytest.mark.valkeymod +@skip_if_valkey_enterprise() def test_search_commands_in_pipeline(client): p = client.ft().pipeline() p.create_index((TextField("txt"),)) @@ -2149,18 +2151,18 @@ def test_search_commands_in_pipeline(client): ) -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.onlynoncluster @skip_ifmodversion_lt("2.4.3", "search") def test_dialect_config(client): assert client.ft().config_get("DEFAULT_DIALECT") client.ft().config_set("DEFAULT_DIALECT", 2) assert client.ft().config_get("DEFAULT_DIALECT") == {"DEFAULT_DIALECT": "2"} - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): client.ft().config_set("DEFAULT_DIALECT", 0) -@pytest.mark.redismod +@pytest.mark.valkeymod @skip_ifmodversion_lt("2.4.3", "search") def test_dialect(client): client.ft().create_index( @@ -2175,12 +2177,12 @@ def test_dialect(client): ) ) client.hset("h", "t1", "hello") - with pytest.raises(redis.ResponseError) as err: + with pytest.raises(valkey.ResponseError) as err: client.ft().explain(Query("(*)").dialect(1)) assert "Syntax error" in str(err) assert "WILDCARD" in client.ft().explain(Query("(*)").dialect(2)) - with pytest.raises(redis.ResponseError) as err: + with pytest.raises(valkey.ResponseError) as err: client.ft().explain(Query("$hello").dialect(1)) assert "Syntax error" in str(err) q = Query("$hello").dialect(2) @@ -2189,13 +2191,13 @@ def test_dialect(client): expected = "NUMERIC {0.000000 <= @num <= 10.000000}\n" assert expected in client.ft().explain(Query("@title:(@num:[0 10])").dialect(1)) - with pytest.raises(redis.ResponseError) as err: + with pytest.raises(valkey.ResponseError) as err: client.ft().explain(Query("@title:(@num:[0 10])").dialect(2)) assert "Syntax error" in str(err) -@pytest.mark.redismod -def test_expire_while_search(client: redis.Redis): +@pytest.mark.valkeymod +def test_expire_while_search(client: valkey.Valkey): client.ft().create_index((TextField("txt"),)) client.hset("hset:1", "txt", "a") client.hset("hset:2", "txt", "b") @@ -2216,9 +2218,9 @@ def test_expire_while_search(client: redis.Redis): assert 2 == client.ft().search(Query("*"))["total_results"] -@pytest.mark.redismod +@pytest.mark.valkeymod @pytest.mark.experimental -def test_withsuffixtrie(client: redis.Redis): +def test_withsuffixtrie(client: valkey.Valkey): # create index assert client.ft().create_index((TextField("txt"),)) waitForIndex(client, getattr(client.ft(), "index_name", "idx")) @@ -2258,19 +2260,19 @@ def test_withsuffixtrie(client: redis.Redis): assert "WITHSUFFIXTRIE" in info["attributes"][0]["flags"] -@pytest.mark.redismod -def test_query_timeout(r: redis.Redis): +@pytest.mark.valkeymod +def test_query_timeout(r: valkey.Valkey): q1 = Query("foo").timeout(5000) assert q1.get_args() == ["foo", "TIMEOUT", 5000, "LIMIT", 0, 10] q1 = Query("foo").timeout(0) assert q1.get_args() == ["foo", "TIMEOUT", 0, "LIMIT", 0, 10] q2 = Query("foo").timeout("not_a_number") - with pytest.raises(redis.ResponseError): + with pytest.raises(valkey.ResponseError): r.ft().search(q2) -@pytest.mark.redismod -def test_geoshape(client: redis.Redis): +@pytest.mark.valkeymod +def test_geoshape(client: valkey.Valkey): client.ft().create_index((GeoShapeField("geom", GeoShapeField.FLAT))) waitForIndex(client, getattr(client.ft(), "index_name", "idx")) client.hset("small", "geom", "POLYGON((1 1, 1 100, 100 100, 100 1, 1 1))") diff --git a/tests/test_sentinel.py b/tests/test_sentinel.py index 54b96470..4ae8d11e 100644 --- a/tests/test_sentinel.py +++ b/tests/test_sentinel.py @@ -2,9 +2,9 @@ from unittest import mock import pytest -import redis.sentinel -from redis import exceptions -from redis.sentinel import ( +import valkey.sentinel +from valkey import exceptions +from valkey.sentinel import ( MasterNotFoundError, Sentinel, SentinelConnectionPool, @@ -36,7 +36,7 @@ def sentinel_slaves(self, master_name): def execute_command(self, *args, **kwargs): # wrapper purely to validate the calls don't explode - from redis.client import bool_ok + from valkey.client import bool_ok return bool_ok @@ -72,11 +72,11 @@ def client(self, host, port, **kwargs): @pytest.fixture() def cluster(request, master_ip): def teardown(): - redis.sentinel.Redis = saved_Redis + valkey.sentinel.Valkey = saved_Valkey cluster = SentinelTestCluster(ip=master_ip) - saved_Redis = redis.sentinel.Redis - redis.sentinel.Redis = cluster.client + saved_Valkey = valkey.sentinel.Valkey + valkey.sentinel.Valkey = cluster.client request.addfinalizer(teardown) return cluster diff --git a/tests/test_ssl.py b/tests/test_ssl.py index fc7416db..256ac5b0 100644 --- a/tests/test_ssl.py +++ b/tests/test_ssl.py @@ -3,8 +3,8 @@ from urllib.parse import urlparse import pytest -import redis -from redis.exceptions import ConnectionError, RedisError +import valkey +from valkey.exceptions import ConnectionError, ValkeyError from .conftest import skip_if_cryptography, skip_if_nocryptography from .ssl_utils import get_ssl_filename @@ -14,7 +14,7 @@ class TestSSL: """Tests for SSL connections - This relies on the --redis-ssl-url purely for rebuilding the client + This relies on the --valkey-ssl-url purely for rebuilding the client and connecting to the appropriate port. """ @@ -24,24 +24,24 @@ class TestSSL: SERVER_CERT = get_ssl_filename("server-cert.pem") def test_ssl_with_invalid_cert(self, request): - ssl_url = request.config.option.redis_ssl_url - sslclient = redis.from_url(ssl_url) + ssl_url = request.config.option.valkey_ssl_url + sslclient = valkey.from_url(ssl_url) with pytest.raises(ConnectionError) as e: sslclient.ping() assert "SSL: CERTIFICATE_VERIFY_FAILED" in str(e) sslclient.close() def test_ssl_connection(self, request): - ssl_url = request.config.option.redis_ssl_url + ssl_url = request.config.option.valkey_ssl_url p = urlparse(ssl_url)[1].split(":") - r = redis.Redis(host=p[0], port=p[1], ssl=True, ssl_cert_reqs="none") + r = valkey.Valkey(host=p[0], port=p[1], ssl=True, ssl_cert_reqs="none") assert r.ping() r.close() def test_ssl_connection_without_ssl(self, request): - ssl_url = request.config.option.redis_ssl_url + ssl_url = request.config.option.valkey_ssl_url p = urlparse(ssl_url)[1].split(":") - r = redis.Redis(host=p[0], port=p[1], ssl=False) + r = valkey.Valkey(host=p[0], port=p[1], ssl=False) with pytest.raises(ConnectionError) as e: r.ping() @@ -49,9 +49,9 @@ def test_ssl_connection_without_ssl(self, request): r.close() def test_validating_self_signed_certificate(self, request): - ssl_url = request.config.option.redis_ssl_url + ssl_url = request.config.option.valkey_ssl_url p = urlparse(ssl_url)[1].split(":") - r = redis.Redis( + r = valkey.Valkey( host=p[0], port=p[1], ssl=True, @@ -66,9 +66,9 @@ def test_validating_self_signed_certificate(self, request): def test_validating_self_signed_string_certificate(self, request): with open(self.CA_CERT) as f: cert_data = f.read() - ssl_url = request.config.option.redis_ssl_url + ssl_url = request.config.option.valkey_ssl_url p = urlparse(ssl_url)[1].split(":") - r = redis.Redis( + r = valkey.Valkey( host=p[0], port=p[1], ssl=True, @@ -89,9 +89,9 @@ def test_validating_self_signed_string_certificate(self, request): ], ) def test_ssl_connection_tls12_custom_ciphers(self, request, ssl_ciphers): - ssl_url = request.config.option.redis_ssl_url + ssl_url = request.config.option.valkey_ssl_url p = urlparse(ssl_url)[1].split(":") - r = redis.Redis( + r = valkey.Valkey( host=p[0], port=p[1], ssl=True, @@ -103,9 +103,9 @@ def test_ssl_connection_tls12_custom_ciphers(self, request, ssl_ciphers): r.close() def test_ssl_connection_tls12_custom_ciphers_invalid(self, request): - ssl_url = request.config.option.redis_ssl_url + ssl_url = request.config.option.valkey_ssl_url p = urlparse(ssl_url)[1].split(":") - r = redis.Redis( + r = valkey.Valkey( host=p[0], port=p[1], ssl=True, @@ -113,7 +113,7 @@ def test_ssl_connection_tls12_custom_ciphers_invalid(self, request): ssl_min_version=ssl.TLSVersion.TLSv1_2, ssl_ciphers="foo:bar", ) - with pytest.raises(RedisError) as e: + with pytest.raises(ValkeyError) as e: r.ping() assert "No cipher can be selected" in str(e) r.close() @@ -127,9 +127,9 @@ def test_ssl_connection_tls12_custom_ciphers_invalid(self, request): ) def test_ssl_connection_tls13_custom_ciphers(self, request, ssl_ciphers): # TLSv1.3 does not support changing the ciphers - ssl_url = request.config.option.redis_ssl_url + ssl_url = request.config.option.valkey_ssl_url p = urlparse(ssl_url)[1].split(":") - r = redis.Redis( + r = valkey.Valkey( host=p[0], port=p[1], ssl=True, @@ -137,15 +137,15 @@ def test_ssl_connection_tls13_custom_ciphers(self, request, ssl_ciphers): ssl_min_version=ssl.TLSVersion.TLSv1_2, ssl_ciphers=ssl_ciphers, ) - with pytest.raises(RedisError) as e: + with pytest.raises(ValkeyError) as e: r.ping() assert "No cipher can be selected" in str(e) r.close() def _create_oscp_conn(self, request): - ssl_url = request.config.option.redis_ssl_url + ssl_url = request.config.option.valkey_ssl_url p = urlparse(ssl_url)[1].split(":") - r = redis.Redis( + r = valkey.Valkey( host=p[0], port=p[1], ssl=True, @@ -160,7 +160,7 @@ def _create_oscp_conn(self, request): @skip_if_cryptography() def test_ssl_ocsp_called(self, request): r = self._create_oscp_conn(request) - with pytest.raises(RedisError) as e: + with pytest.raises(ValkeyError) as e: r.ping() assert "cryptography is not installed" in str(e) r.close() @@ -175,7 +175,7 @@ def test_ssl_ocsp_called_withcrypto(self, request): @skip_if_nocryptography() def test_valid_ocsp_cert_http(self): - from redis.ocsp import OCSPVerifier + from valkey.ocsp import OCSPVerifier hostnames = ["github.com", "aws.amazon.com", "ynet.co.il"] for hostname in hostnames: @@ -187,7 +187,7 @@ def test_valid_ocsp_cert_http(self): @skip_if_nocryptography() def test_revoked_ocsp_certificate(self): - from redis.ocsp import OCSPVerifier + from valkey.ocsp import OCSPVerifier context = ssl.create_default_context() hostname = "revoked.badssl.com" @@ -200,7 +200,7 @@ def test_revoked_ocsp_certificate(self): @skip_if_nocryptography() def test_unauthorized_ocsp(self): - from redis.ocsp import OCSPVerifier + from valkey.ocsp import OCSPVerifier context = ssl.create_default_context() hostname = "stackoverflow.com" @@ -212,7 +212,7 @@ def test_unauthorized_ocsp(self): @skip_if_nocryptography() def test_ocsp_not_present_in_response(self): - from redis.ocsp import OCSPVerifier + from valkey.ocsp import OCSPVerifier context = ssl.create_default_context() hostname = "google.co.il" @@ -225,7 +225,7 @@ def test_ocsp_not_present_in_response(self): @skip_if_nocryptography() def test_unauthorized_then_direct(self): - from redis.ocsp import OCSPVerifier + from valkey.ocsp import OCSPVerifier # these certificates on the socket end return unauthorized # then the second call succeeds @@ -241,9 +241,9 @@ def test_unauthorized_then_direct(self): def test_mock_ocsp_staple(self, request): import OpenSSL - ssl_url = request.config.option.redis_ssl_url + ssl_url = request.config.option.valkey_ssl_url p = urlparse(ssl_url)[1].split(":") - r = redis.Redis( + r = valkey.Valkey( host=p[0], port=p[1], ssl=True, @@ -255,7 +255,7 @@ def test_mock_ocsp_staple(self, request): ssl_ocsp_context=p, # just needs to not be none ) - with pytest.raises(RedisError): + with pytest.raises(ValkeyError): r.ping() r.close() @@ -263,7 +263,7 @@ def test_mock_ocsp_staple(self, request): ctx.use_certificate_file(self.CLIENT_CERT) ctx.use_privatekey_file(self.CLIENT_KEY) - r = redis.Redis( + r = valkey.Valkey( host=p[0], port=p[1], ssl=True, @@ -281,7 +281,7 @@ def test_mock_ocsp_staple(self, request): assert "no ocsp response present" in str(e) r.close() - r = redis.Redis( + r = valkey.Valkey( host=p[0], port=p[1], ssl=True, diff --git a/tests/test_timeseries.py b/tests/test_timeseries.py index 6b59967f..02e08000 100644 --- a/tests/test_timeseries.py +++ b/tests/test_timeseries.py @@ -3,10 +3,12 @@ from time import sleep import pytest -import redis +import valkey from .conftest import assert_resp_response, is_resp2_connection, skip_ifmodversion_lt +pytestmark = pytest.mark.skip + @pytest.fixture def client(decoded_r): @@ -17,7 +19,7 @@ def client(decoded_r): def test_create(client): assert client.ts().create(1) assert client.ts().create(2, retention_msecs=5) - assert client.ts().create(3, labels={"Redis": "Labs"}) + assert client.ts().create(3, labels={"Valkey": "Labs"}) assert client.ts().create(4, retention_msecs=20, labels={"Time": "Series"}) info = client.ts().info(4) assert_resp_response( @@ -83,9 +85,9 @@ def test_alter_diplicate_policy(client): def test_add(client): assert 1 == client.ts().add(1, 1, 1) assert 2 == client.ts().add(2, 2, 3, retention_msecs=10) - assert 3 == client.ts().add(3, 3, 2, labels={"Redis": "Labs"}) + assert 3 == client.ts().add(3, 3, 2, labels={"Valkey": "Labs"}) assert 4 == client.ts().add( - 4, 4, 2, retention_msecs=10, labels={"Redis": "Labs", "Time": "Series"} + 4, 4, 2, retention_msecs=10, labels={"Valkey": "Labs", "Time": "Series"} ) assert abs(time.time() - float(client.ts().add(5, "*", 1)) / 1000) < 1.0 @@ -94,7 +96,7 @@ def test_add(client): assert_resp_response( client, 10, info.get("retention_msecs"), info.get("retentionTime") ) - assert "Labs" == info["labels"]["Redis"] + assert "Labs" == info["labels"]["Valkey"] # Test for a chunk size of 128 Bytes on TS.ADD assert client.ts().add("time-serie-1", 1, 10.0, chunk_size=128) @@ -252,7 +254,7 @@ def test_range_advanced(client): @pytest.mark.onlynoncluster @skip_ifmodversion_lt("1.8.0", "timeseries") -def test_range_latest(client: redis.Redis): +def test_range_latest(client: valkey.Valkey): timeseries = client.ts() timeseries.create("t1") timeseries.create("t2") @@ -276,7 +278,7 @@ def test_range_latest(client: redis.Redis): @skip_ifmodversion_lt("1.8.0", "timeseries") -def test_range_bucket_timestamp(client: redis.Redis): +def test_range_bucket_timestamp(client: valkey.Valkey): timeseries = client.ts() timeseries.create("t1") timeseries.add("t1", 15, 1) @@ -309,7 +311,7 @@ def test_range_bucket_timestamp(client: redis.Redis): @skip_ifmodversion_lt("1.8.0", "timeseries") -def test_range_empty(client: redis.Redis): +def test_range_empty(client: valkey.Valkey): timeseries = client.ts() timeseries.create("t1") timeseries.add("t1", 15, 1) @@ -401,7 +403,7 @@ def test_rev_range(client): @pytest.mark.onlynoncluster @skip_ifmodversion_lt("1.8.0", "timeseries") -def test_revrange_latest(client: redis.Redis): +def test_revrange_latest(client: valkey.Valkey): timeseries = client.ts() timeseries.create("t1") timeseries.create("t2") @@ -419,7 +421,7 @@ def test_revrange_latest(client: redis.Redis): @skip_ifmodversion_lt("1.8.0", "timeseries") -def test_revrange_bucket_timestamp(client: redis.Redis): +def test_revrange_bucket_timestamp(client: valkey.Valkey): timeseries = client.ts() timeseries.create("t1") timeseries.add("t1", 15, 1) @@ -452,7 +454,7 @@ def test_revrange_bucket_timestamp(client: redis.Redis): @skip_ifmodversion_lt("1.8.0", "timeseries") -def test_revrange_empty(client: redis.Redis): +def test_revrange_empty(client: valkey.Valkey): timeseries = client.ts() timeseries.create("t1") timeseries.add("t1", 15, 1) @@ -658,7 +660,7 @@ def test_multi_range_advanced(client): @pytest.mark.onlynoncluster @skip_ifmodversion_lt("1.8.0", "timeseries") -def test_mrange_latest(client: redis.Redis): +def test_mrange_latest(client: valkey.Valkey): timeseries = client.ts() timeseries.create("t1") timeseries.create("t2", labels={"is_compaction": "true"}) @@ -805,7 +807,7 @@ def test_multi_reverse_range(client): @pytest.mark.onlynoncluster @skip_ifmodversion_lt("1.8.0", "timeseries") -def test_mrevrange_latest(client: redis.Redis): +def test_mrevrange_latest(client: valkey.Valkey): timeseries = client.ts() timeseries.create("t1") timeseries.create("t2", labels={"is_compaction": "true"}) @@ -844,7 +846,7 @@ def test_get(client): @pytest.mark.onlynoncluster @skip_ifmodversion_lt("1.8.0", "timeseries") -def test_get_latest(client: redis.Redis): +def test_get_latest(client: valkey.Valkey): timeseries = client.ts() timeseries.create("t1") timeseries.create("t2") @@ -896,7 +898,7 @@ def test_mget(client): @pytest.mark.onlynoncluster @skip_ifmodversion_lt("1.8.0", "timeseries") -def test_mget_latest(client: redis.Redis): +def test_mget_latest(client: valkey.Valkey): timeseries = client.ts() timeseries.create("t1") timeseries.create("t2", labels={"is_compaction": "true"}) diff --git a/tests/testdata/titles.csv b/tests/testdata/titles.csv index 6428dd2a..cbc2158a 100644 --- a/tests/testdata/titles.csv +++ b/tests/testdata/titles.csv @@ -4321,7 +4321,7 @@ gustav krupp von bohlen und halbach,1 yasmany tomás,4 notre temps,1 cats %,1 -intramolecular vibrational energy redistribution,1 +intramolecular vibrational energy valkeytribution,1 graduate management admission test,49 robin fleming,1 daniel gadzhev,1 diff --git a/redis/__init__.py b/valkey/__init__.py similarity index 75% rename from redis/__init__.py rename to valkey/__init__.py index 495d2d99..f12f800e 100644 --- a/redis/__init__.py +++ b/valkey/__init__.py @@ -1,18 +1,18 @@ import sys -from redis import asyncio # noqa -from redis.backoff import default_backoff -from redis.client import Redis, StrictRedis -from redis.cluster import RedisCluster -from redis.connection import ( +from valkey import asyncio # noqa +from valkey.backoff import default_backoff +from valkey.client import StrictValkey, Valkey +from valkey.cluster import ValkeyCluster +from valkey.connection import ( BlockingConnectionPool, Connection, ConnectionPool, SSLConnection, UnixDomainSocketConnection, ) -from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider -from redis.exceptions import ( +from valkey.credentials import CredentialProvider, UsernamePasswordCredentialProvider +from valkey.exceptions import ( AuthenticationError, AuthenticationWrongNumberOfArgsError, BusyLoadingError, @@ -23,18 +23,18 @@ OutOfMemoryError, PubSubError, ReadOnlyError, - RedisError, ResponseError, TimeoutError, + ValkeyError, WatchError, ) -from redis.sentinel import ( +from valkey.sentinel import ( Sentinel, SentinelConnectionPool, SentinelManagedConnection, SentinelManagedSSLConnection, ) -from redis.utils import from_url +from valkey.utils import from_url if sys.version_info >= (3, 8): from importlib import metadata @@ -50,7 +50,7 @@ def int_or_str(value): try: - __version__ = metadata.version("redis") + __version__ = metadata.version("valkey") except metadata.PackageNotFoundError: __version__ = "99.99.99" @@ -77,9 +77,9 @@ def int_or_str(value): "OutOfMemoryError", "PubSubError", "ReadOnlyError", - "Redis", - "RedisCluster", - "RedisError", + "Valkey", + "ValkeyCluster", + "ValkeyError", "ResponseError", "Sentinel", "SentinelConnectionPool", @@ -87,7 +87,7 @@ def int_or_str(value): "SentinelManagedSSLConnection", "SSLConnection", "UsernamePasswordCredentialProvider", - "StrictRedis", + "StrictValkey", "TimeoutError", "UnixDomainSocketConnection", "WatchError", diff --git a/redis/_cache.py b/valkey/_cache.py similarity index 87% rename from redis/_cache.py rename to valkey/_cache.py index 90288383..164cf041 100644 --- a/redis/_cache.py +++ b/valkey/_cache.py @@ -6,7 +6,7 @@ from enum import Enum from typing import List, Sequence, Union -from redis.typing import KeyT, ResponseT +from valkey.typing import KeyT, ResponseT class EvictionPolicy(Enum): @@ -198,7 +198,7 @@ def invalidate_key(self, key: KeyT): class _LocalCache(AbstractCache): """ - A caching mechanism for storing redis commands and their responses. + A caching mechanism for storing valkey commands and their responses. Args: max_size (int): The maximum number of commands to be stored in the cache. @@ -234,10 +234,10 @@ def set( keys_in_command: List[KeyT], ): """ - Set a redis command and its response in the cache. + Set a valkey command and its response in the cache. Args: - command (Union[str, Sequence[str]]): The redis command. + command (Union[str, Sequence[str]]): The valkey command. response (ResponseT): The response associated with the command. keys_in_command (List[KeyT]): The list of keys used in the command. """ @@ -254,10 +254,10 @@ def set( def get(self, command: Union[str, Sequence[str]]) -> ResponseT: """ - Get the response for a redis command from the cache. + Get the response for a valkey command from the cache. Args: - command (Union[str, Sequence[str]]): The redis command. + command (Union[str, Sequence[str]]): The valkey command. Returns: ResponseT: The response associated with the command, or None if the command is not in the cache. # noqa @@ -271,10 +271,10 @@ def get(self, command: Union[str, Sequence[str]]) -> ResponseT: def delete_command(self, command: Union[str, Sequence[str]]): """ - Delete a redis command and its metadata from the cache. + Delete a valkey command and its metadata from the cache. Args: - command (Union[str, Sequence[str]]): The redis command to be deleted. + command (Union[str, Sequence[str]]): The valkey command to be deleted. """ if command in self.cache: keys_in_command = self.cache[command].get("keys") @@ -294,17 +294,17 @@ def delete_commands(self, commands: List[Union[str, Sequence[str]]]): self.delete_command(command) def flush(self): - """Clear the entire cache, removing all redis commands and metadata.""" + """Clear the entire cache, removing all valkey commands and metadata.""" self.cache.clear() self.key_commands_map.clear() self.commands_ttl_list = [] def _is_expired(self, command: Union[str, Sequence[str]]) -> bool: """ - Check if a redis command has expired based on its time-to-live. + Check if a valkey command has expired based on its time-to-live. Args: - command (Union[str, Sequence[str]]): The redis command. + command (Union[str, Sequence[str]]): The valkey command. Returns: bool: True if the command has expired, False otherwise. @@ -315,10 +315,10 @@ def _is_expired(self, command: Union[str, Sequence[str]]) -> bool: def _update_access(self, command: Union[str, Sequence[str]]): """ - Update the access information for a redis command based on the eviction policy. + Update the access information for a valkey command based on the eviction policy. Args: - command (Union[str, Sequence[str]]): The redis command. + command (Union[str, Sequence[str]]): The valkey command. """ if self.eviction_policy == EvictionPolicy.LRU: self.cache.move_to_end(command) @@ -331,7 +331,7 @@ def _update_access(self, command: Union[str, Sequence[str]]): pass # Random eviction doesn't require updates def _evict(self): - """Evict a redis command from the cache based on the eviction policy.""" + """Evict a valkey command from the cache based on the eviction policy.""" if self._is_expired(self.commands_ttl_list[0]): self.delete_command(self.commands_ttl_list[0]) elif self.eviction_policy == EvictionPolicy.LRU: @@ -353,7 +353,7 @@ def _update_key_commands_map( Args: keys (List[KeyT]): The list of keys used in the command. - command (Union[str, Sequence[str]]): The redis command. + command (Union[str, Sequence[str]]): The valkey command. """ for key in keys: self.key_commands_map[key].add(command) @@ -362,18 +362,18 @@ def _del_key_commands_map( self, keys: List[KeyT], command: Union[str, Sequence[str]] ): """ - Remove a redis command from the key_commands_map. + Remove a valkey command from the key_commands_map. Args: - keys (List[KeyT]): The list of keys used in the redis command. - command (Union[str, Sequence[str]]): The redis command. + keys (List[KeyT]): The list of keys used in the valkey command. + command (Union[str, Sequence[str]]): The valkey command. """ for key in keys: self.key_commands_map[key].remove(command) def invalidate_key(self, key: KeyT): """ - Invalidate (delete) all redis commands associated with a specific key. + Invalidate (delete) all valkey commands associated with a specific key. Args: key (KeyT): The key to be invalidated. diff --git a/redis/_parsers/__init__.py b/valkey/_parsers/__init__.py similarity index 100% rename from redis/_parsers/__init__.py rename to valkey/_parsers/__init__.py diff --git a/redis/_parsers/base.py b/valkey/_parsers/base.py similarity index 95% rename from redis/_parsers/base.py rename to valkey/_parsers/base.py index 0137539d..c1e38907 100644 --- a/redis/_parsers/base.py +++ b/valkey/_parsers/base.py @@ -19,8 +19,8 @@ NoScriptError, OutOfMemoryError, ReadOnlyError, - RedisError, ResponseError, + ValkeyError, ) from ..typing import EncodableT from .encoders import Encoder @@ -36,11 +36,11 @@ ) # user send an AUTH cmd to a server without authorization configured NO_AUTH_SET_ERROR = { - # Redis >= 6.0 + # Valkey >= 6.0 "AUTH called without any password " "configured for the default user. Are you sure " "your configuration is correct?": AuthenticationError, - # Redis < 6.0 + # Valkey < 6.0 "Client sent AUTH, but no password is set": AuthenticationError, } @@ -50,11 +50,11 @@ class BaseParser(ABC): "ERR": { "max number of clients reached": ConnectionError, "invalid password": AuthenticationError, - # some Redis server versions report invalid command syntax + # some Valkey server versions report invalid command syntax # in lowercase "wrong number of arguments " "for 'auth' command": AuthenticationWrongNumberOfArgsError, - # some Redis server versions report invalid command syntax + # some Valkey server versions report invalid command syntax # in uppercase "wrong number of arguments " "for 'AUTH' command": AuthenticationWrongNumberOfArgsError, @@ -166,7 +166,7 @@ def on_connect(self, connection): """Called when the stream connects""" self._stream = connection._reader if self._stream is None: - raise RedisError("Buffer is closed.") + raise ValkeyError("Buffer is closed.") self.encoder = connection.encoder self._clear() self._connected = True @@ -177,7 +177,7 @@ def on_disconnect(self): async def can_read_destructive(self) -> bool: if not self._connected: - raise RedisError("Buffer is closed.") + raise ValkeyError("Buffer is closed.") if self._buffer: return True try: diff --git a/redis/_parsers/commands.py b/valkey/_parsers/commands.py similarity index 93% rename from redis/_parsers/commands.py rename to valkey/_parsers/commands.py index b5109252..0f4409e4 100644 --- a/redis/_parsers/commands.py +++ b/valkey/_parsers/commands.py @@ -1,10 +1,10 @@ from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union -from redis.exceptions import RedisError, ResponseError -from redis.utils import str_if_bytes +from valkey.exceptions import ResponseError, ValkeyError +from valkey.utils import str_if_bytes if TYPE_CHECKING: - from redis.asyncio.cluster import ClusterNode + from valkey.asyncio.cluster import ClusterNode class AbstractCommandsParser: @@ -55,16 +55,16 @@ def parse_subcommand(self, command, **options): class CommandsParser(AbstractCommandsParser): """ - Parses Redis commands to get command keys. + Parses Valkey commands to get command keys. COMMAND output is used to determine key locations. Commands that do not have a predefined key location are flagged with 'movablekeys', and these commands' keys are determined by the command 'COMMAND GETKEYS'. """ - def __init__(self, redis_connection): + def __init__(self, valkey_connection): self.commands = {} - self.initialize(redis_connection) + self.initialize(valkey_connection) def initialize(self, r): commands = r.command() @@ -79,7 +79,7 @@ def initialize(self, r): # As soon as this PR is merged into Redis, we should reimplement # our logic to use COMMAND INFO changes to determine the key positions # https://github.com/redis/redis/pull/8324 - def get_keys(self, redis_conn, *args): + def get_keys(self, valkey_conn, *args): """ Get the keys from the passed command. @@ -106,15 +106,15 @@ def get_keys(self, redis_conn, *args): else: # We'll try to reinitialize the commands cache, if the engine # version has changed, the commands may not be current - self.initialize(redis_conn) + self.initialize(valkey_conn) if cmd_name not in self.commands: - raise RedisError( - f"{cmd_name.upper()} command doesn't exist in Redis commands" + raise ValkeyError( + f"{cmd_name.upper()} command doesn't exist in Valkey commands" ) command = self.commands.get(cmd_name) if "movablekeys" in command["flags"]: - keys = self._get_moveable_keys(redis_conn, *args) + keys = self._get_moveable_keys(valkey_conn, *args) elif "pubsub" in command["flags"] or command["name"] == "pubsub": keys = self._get_pubsub_keys(*args) else: @@ -144,7 +144,7 @@ def get_keys(self, redis_conn, *args): return keys - def _get_moveable_keys(self, redis_conn, *args): + def _get_moveable_keys(self, valkey_conn, *args): """ NOTE: Due to a bug in redis<7.0, this function does not work properly for EVAL or EVALSHA when the `numkeys` arg is 0. @@ -157,7 +157,7 @@ def _get_moveable_keys(self, redis_conn, *args): # e.g. 'MEMORY USAGE' will be splitted into ['MEMORY', 'USAGE'] pieces = args[0].split() + list(args[1:]) try: - keys = redis_conn.execute_command("COMMAND GETKEYS", *pieces) + keys = valkey_conn.execute_command("COMMAND GETKEYS", *pieces) except ResponseError as e: message = e.__str__() if ( @@ -172,7 +172,7 @@ def _get_moveable_keys(self, redis_conn, *args): class AsyncCommandsParser(AbstractCommandsParser): """ - Parses Redis commands to get command keys. + Parses Valkey commands to get command keys. COMMAND output is used to determine key locations. Commands that do not have a predefined key location are flagged with 'movablekeys', @@ -230,8 +230,8 @@ async def get_keys(self, *args: Any) -> Optional[Tuple[str, ...]]: # version has changed, the commands may not be current await self.initialize() if cmd_name not in self.commands: - raise RedisError( - f"{cmd_name.upper()} command doesn't exist in Redis commands" + raise ValkeyError( + f"{cmd_name.upper()} command doesn't exist in Valkey commands" ) command = self.commands.get(cmd_name) diff --git a/redis/_parsers/encoders.py b/valkey/_parsers/encoders.py similarity index 100% rename from redis/_parsers/encoders.py rename to valkey/_parsers/encoders.py diff --git a/redis/_parsers/helpers.py b/valkey/_parsers/helpers.py similarity index 97% rename from redis/_parsers/helpers.py rename to valkey/_parsers/helpers.py index 74faa5fd..1930c33d 100644 --- a/redis/_parsers/helpers.py +++ b/valkey/_parsers/helpers.py @@ -1,6 +1,6 @@ import datetime -from redis.utils import str_if_bytes +from valkey.utils import str_if_bytes def timestamp_to_datetime(response): @@ -15,7 +15,7 @@ def timestamp_to_datetime(response): def parse_debug_object(response): - "Parse the results of Redis's DEBUG OBJECT command into a Python dict" + "Parse the results of Valkey's DEBUG OBJECT command into a Python dict" # The 'type' of the object is the first item in the response, but isn't # prefixed with a name response = str_if_bytes(response) @@ -23,7 +23,7 @@ def parse_debug_object(response): response = dict(kv.split(":") for kv in response.split()) # parse some expected int values from the string response - # note: this cmd isn't spec'd so these may not appear in all redis versions + # note: this cmd isn't spec'd so these may not appear in all valkey versions int_fields = ("refcount", "serializedlength", "lru", "lru_seconds_idle") for field in int_fields: if field in response: @@ -33,7 +33,7 @@ def parse_debug_object(response): def parse_info(response): - """Parse the result of Redis's INFO command into a Python dict""" + """Parse the result of Valkey's INFO command into a Python dict""" info = {} response = str_if_bytes(response) @@ -379,7 +379,7 @@ def parse_slowlog_get(response, **options): def parse_item(item): result = {"id": item[0], "start_time": int(item[1]), "duration": int(item[2])} - # Redis Enterprise injects another entry at index [3], which has + # Valkey Enterprise injects another entry at index [3], which has # the complexity info (i.e. the value N in case the command has # an O(N) complexity) instead of the command. if isinstance(item[3], list): @@ -473,8 +473,8 @@ def _parse_slots(slot_ranges): def parse_cluster_nodes(response, **options): """ - @see: https://redis.io/commands/cluster-nodes # string / bytes - @see: https://redis.io/commands/cluster-replicas # list of string / bytes + @see: https://valkey.io/commands/cluster-nodes # string / bytes + @see: https://valkey.io/commands/cluster-replicas # list of string / bytes """ if isinstance(response, (str, bytes)): response = response.splitlines() @@ -661,13 +661,13 @@ def parse_client_info(value): def parse_set_result(response, **options): """ - Handle SET result since GET argument is available since Redis 6.2. + Handle SET result since GET argument is available since Valkey 6.2. Parsing SET result into: - BOOL - String when GET argument is used """ if options.get("get"): - # Redis will return a getCommand result. + # Valkey will return a getCommand result. # See `setGenericCommand` in t_string.c return response return response and str_if_bytes(response) == "OK" @@ -677,7 +677,7 @@ def string_keys_to_dict(key_string, callback): return dict.fromkeys(key_string.split(), callback) -_RedisCallbacks = { +_ValkeyCallbacks = { **string_keys_to_dict( "AUTH COPY EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST PSETEX " "PEXPIRE PEXPIREAT RENAMENX SETEX SETNX SMOVE", @@ -770,7 +770,7 @@ def string_keys_to_dict(key_string, callback): } -_RedisCallbacksRESP2 = { +_ValkeyCallbacksRESP2 = { **string_keys_to_dict( "SDIFF SINTER SMEMBERS SUNION", lambda r: r and set(r) or set() ), @@ -817,7 +817,7 @@ def string_keys_to_dict(key_string, callback): } -_RedisCallbacksRESP3 = { +_ValkeyCallbacksRESP3 = { **string_keys_to_dict( "ZRANGE ZINTER ZPOPMAX ZPOPMIN ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE " "ZUNION HGETALL XREADGROUP", diff --git a/redis/_parsers/hiredis.py b/valkey/_parsers/hiredis.py similarity index 97% rename from redis/_parsers/hiredis.py rename to valkey/_parsers/hiredis.py index a52dbbd0..dd381179 100644 --- a/redis/_parsers/hiredis.py +++ b/valkey/_parsers/hiredis.py @@ -8,7 +8,7 @@ else: from async_timeout import timeout as async_timeout -from ..exceptions import ConnectionError, InvalidResponse, RedisError +from ..exceptions import ConnectionError, InvalidResponse, ValkeyError from ..typing import EncodableT from ..utils import HIREDIS_AVAILABLE from .base import AsyncBaseParser, BaseParser @@ -32,7 +32,7 @@ class _HiredisParser(BaseParser): def __init__(self, socket_read_size): if not HIREDIS_AVAILABLE: - raise RedisError("Hiredis is not installed") + raise ValkeyError("Hiredis is not installed") self.socket_read_size = socket_read_size self._buffer = bytearray(socket_read_size) @@ -145,7 +145,7 @@ class _AsyncHiredisParser(AsyncBaseParser): def __init__(self, socket_read_size: int): if not HIREDIS_AVAILABLE: - raise RedisError("Hiredis is not available.") + raise ValkeyError("Hiredis is not available.") super().__init__(socket_read_size=socket_read_size) self._reader = None diff --git a/redis/_parsers/resp2.py b/valkey/_parsers/resp2.py similarity index 100% rename from redis/_parsers/resp2.py rename to valkey/_parsers/resp2.py diff --git a/redis/_parsers/resp3.py b/valkey/_parsers/resp3.py similarity index 98% rename from redis/_parsers/resp3.py rename to valkey/_parsers/resp3.py index 7afa43a0..25167289 100644 --- a/redis/_parsers/resp3.py +++ b/valkey/_parsers/resp3.py @@ -87,7 +87,7 @@ def _read_response(self, disable_decoding=False, push_request=False): ] # set response elif byte == b"~": - # redis can return unhashable types (like dict) in a set, + # valkey can return unhashable types (like dict) in a set, # so we need to first convert to a list, and then try to convert it to a set response = [ self._read_response(disable_decoding=disable_decoding) @@ -229,7 +229,7 @@ async def _read_response( ] # set response elif byte == b"~": - # redis can return unhashable types (like dict) in a set, + # valkey can return unhashable types (like dict) in a set, # so we need to first convert to a list, and then try to convert it to a set response = [ (await self._read_response(disable_decoding=disable_decoding)) diff --git a/redis/_parsers/socket.py b/valkey/_parsers/socket.py similarity index 100% rename from redis/_parsers/socket.py rename to valkey/_parsers/socket.py diff --git a/redis/asyncio/__init__.py b/valkey/asyncio/__init__.py similarity index 74% rename from redis/asyncio/__init__.py rename to valkey/asyncio/__init__.py index 3545ab44..3ad82761 100644 --- a/redis/asyncio/__init__.py +++ b/valkey/asyncio/__init__.py @@ -1,21 +1,21 @@ -from redis.asyncio.client import Redis, StrictRedis -from redis.asyncio.cluster import RedisCluster -from redis.asyncio.connection import ( +from valkey.asyncio.client import StrictValkey, Valkey +from valkey.asyncio.cluster import ValkeyCluster +from valkey.asyncio.connection import ( BlockingConnectionPool, Connection, ConnectionPool, SSLConnection, UnixDomainSocketConnection, ) -from redis.asyncio.sentinel import ( +from valkey.asyncio.sentinel import ( Sentinel, SentinelConnectionPool, SentinelManagedConnection, SentinelManagedSSLConnection, ) -from redis.asyncio.utils import from_url -from redis.backoff import default_backoff -from redis.exceptions import ( +from valkey.asyncio.utils import from_url +from valkey.backoff import default_backoff +from valkey.exceptions import ( AuthenticationError, AuthenticationWrongNumberOfArgsError, BusyLoadingError, @@ -26,9 +26,9 @@ OutOfMemoryError, PubSubError, ReadOnlyError, - RedisError, ResponseError, TimeoutError, + ValkeyError, WatchError, ) @@ -48,16 +48,16 @@ "PubSubError", "OutOfMemoryError", "ReadOnlyError", - "Redis", - "RedisCluster", - "RedisError", + "Valkey", + "ValkeyCluster", + "ValkeyError", "ResponseError", "Sentinel", "SentinelConnectionPool", "SentinelManagedConnection", "SentinelManagedSSLConnection", "SSLConnection", - "StrictRedis", + "StrictValkey", "TimeoutError", "UnixDomainSocketConnection", "WatchError", diff --git a/redis/asyncio/client.py b/valkey/asyncio/client.py similarity index 92% rename from redis/asyncio/client.py rename to valkey/asyncio/client.py index 1845b725..a34e6245 100644 --- a/redis/asyncio/client.py +++ b/valkey/asyncio/client.py @@ -26,50 +26,50 @@ cast, ) -from redis._cache import ( +from valkey._cache import ( DEFAULT_ALLOW_LIST, DEFAULT_DENY_LIST, DEFAULT_EVICTION_POLICY, AbstractCache, ) -from redis._parsers.helpers import ( - _RedisCallbacks, - _RedisCallbacksRESP2, - _RedisCallbacksRESP3, +from valkey._parsers.helpers import ( + _ValkeyCallbacks, + _ValkeyCallbacksRESP2, + _ValkeyCallbacksRESP3, bool_ok, ) -from redis.asyncio.connection import ( +from valkey.asyncio.connection import ( Connection, ConnectionPool, SSLConnection, UnixDomainSocketConnection, ) -from redis.asyncio.lock import Lock -from redis.asyncio.retry import Retry -from redis.client import ( +from valkey.asyncio.lock import Lock +from valkey.asyncio.retry import Retry +from valkey.client import ( EMPTY_RESPONSE, NEVER_DECODE, - AbstractRedis, + AbstractValkey, CaseInsensitiveDict, ) -from redis.commands import ( +from valkey.commands import ( AsyncCoreCommands, - AsyncRedisModuleCommands, AsyncSentinelCommands, + AsyncValkeyModuleCommands, list_or_args, ) -from redis.credentials import CredentialProvider -from redis.exceptions import ( +from valkey.credentials import CredentialProvider +from valkey.exceptions import ( ConnectionError, ExecAbortError, PubSubError, - RedisError, ResponseError, TimeoutError, + ValkeyError, WatchError, ) -from redis.typing import ChannelT, EncodableT, KeyT -from redis.utils import ( +from valkey.typing import ChannelT, EncodableT, KeyT +from valkey.utils import ( HIREDIS_AVAILABLE, _set_info_logger, deprecated_function, @@ -81,10 +81,10 @@ PubSubHandler = Callable[[Dict[str, str]], Awaitable[None]] _KeyT = TypeVar("_KeyT", bound=KeyT) _ArgT = TypeVar("_ArgT", KeyT, EncodableT) -_RedisT = TypeVar("_RedisT", bound="Redis") +_ValkeyT = TypeVar("_ValkeyT", bound="Valkey") _NormalizeKeysT = TypeVar("_NormalizeKeysT", bound=Mapping[ChannelT, object]) if TYPE_CHECKING: - from redis.commands.core import Script + from valkey.commands.core import Script class ResponseCallbackProtocol(Protocol): @@ -98,19 +98,19 @@ async def __call__(self, response: Any, **kwargs): ... ResponseCallbackT = Union[ResponseCallbackProtocol, AsyncResponseCallbackProtocol] -class Redis( - AbstractRedis, AsyncRedisModuleCommands, AsyncCoreCommands, AsyncSentinelCommands +class Valkey( + AbstractValkey, AsyncValkeyModuleCommands, AsyncCoreCommands, AsyncSentinelCommands ): """ - Implementation of the Redis protocol. + Implementation of the Valkey protocol. - This abstract class provides a Python interface to all Redis commands - and an implementation of the Redis protocol. + This abstract class provides a Python interface to all Valkey commands + and an implementation of the Valkey protocol. Pipelines derive from this, implementing how - the commands are sent and received to the Redis server. Based on + the commands are sent and received to the Valkey server. Based on configuration, an instance will either use a ConnectionPool, or - Connection object to talk to redis. + Connection object to talk to valkey. """ response_callbacks: MutableMapping[Union[str, bytes], ResponseCallbackT] @@ -124,20 +124,18 @@ def from_url( **kwargs, ): """ - Return a Redis client object configured from the given URL + Return a Valkey client object configured from the given URL For example:: - redis://[[username]:[password]]@localhost:6379/0 - rediss://[[username]:[password]]@localhost:6379/0 + valkey://[[username]:[password]]@localhost:6379/0 + valkeys://[[username]:[password]]@localhost:6379/0 unix://[username@]/path/to/socket.sock?db=0[&password=password] Three URL schemes are supported: - - `redis://` creates a TCP socket connection. See more at: - - - `rediss://` creates a SSL wrapped TCP socket connection. See more at: - + - `valkey://` creates a TCP socket connection. + - `valkeys://` creates a SSL wrapped TCP socket connection. - ``unix://``: creates a Unix Domain Socket connection. The username, password, hostname, path and all querystring values @@ -147,10 +145,10 @@ def from_url( There are several ways to specify a database number. The first value found will be used: - 1. A ``db`` querystring option, e.g. redis://localhost?db=0 + 1. A ``db`` querystring option, e.g. valkey://localhost?db=0 - 2. If using the redis:// or rediss:// schemes, the path argument - of the url, e.g. redis://localhost/0 + 2. If using the valkey:// or valkeys:// schemes, the path argument + of the url, e.g. valkey://localhost/0 3. A ``db`` keyword argument to this function. @@ -176,7 +174,7 @@ class initializer. In the case of conflicting arguments, querystring '"auto_close_connection_pool" is deprecated ' "since version 5.0.1. " "Please create a ConnectionPool explicitly and " - "provide to the Redis() constructor instead." + "provide to the Valkey() constructor instead." ) ) else: @@ -186,13 +184,13 @@ class initializer. In the case of conflicting arguments, querystring @classmethod def from_pool( - cls: Type["Redis"], + cls: Type["Valkey"], connection_pool: ConnectionPool, - ) -> "Redis": + ) -> "Valkey": """ - Return a Redis client from the given connection pool. - The Redis client will take ownership of the connection pool and - close it when the Redis client is closed. + Return a Valkey client from the given connection pool. + The Valkey client will take ownership of the connection pool and + close it when the Valkey client is closed. """ client = cls( connection_pool=connection_pool, @@ -231,12 +229,12 @@ def __init__( single_connection_client: bool = False, health_check_interval: int = 0, client_name: Optional[str] = None, - lib_name: Optional[str] = "redis-py", + lib_name: Optional[str] = "valkey-py", lib_version: Optional[str] = get_lib_version(), username: Optional[str] = None, retry: Optional[Retry] = None, auto_close_connection_pool: Optional[bool] = None, - redis_connect_func=None, + valkey_connect_func=None, credential_provider: Optional[CredentialProvider] = None, protocol: Optional[int] = 2, cache_enabled: bool = False, @@ -248,7 +246,7 @@ def __init__( cache_allow_list: List[str] = DEFAULT_ALLOW_LIST, ): """ - Initialize a new Redis client. + Initialize a new Valkey client. To specify a retry policy for specific errors, first set `retry_on_error` to a list of the error/s to retry on, then set `retry` to a valid `Retry` object. @@ -264,14 +262,14 @@ def __init__( '"auto_close_connection_pool" is deprecated ' "since version 5.0.1. " "Please create a ConnectionPool explicitly and " - "provide to the Redis() constructor instead." + "provide to the Valkey() constructor instead." ) ) else: auto_close_connection_pool = True if not connection_pool: - # Create internal connection pool, expected to be closed by Redis instance + # Create internal connection pool, expected to be closed by Valkey instance if not retry_on_error: retry_on_error = [] if retry_on_timeout is True: @@ -293,7 +291,7 @@ def __init__( "client_name": client_name, "lib_name": lib_name, "lib_version": lib_version, - "redis_connect_func": redis_connect_func, + "valkey_connect_func": valkey_connect_func, "protocol": protocol, "cache_enabled": cache_enabled, "client_cache": client_cache, @@ -348,16 +346,16 @@ def __init__( self.single_connection_client = single_connection_client self.connection: Optional[Connection] = None - self.response_callbacks = CaseInsensitiveDict(_RedisCallbacks) + self.response_callbacks = CaseInsensitiveDict(_ValkeyCallbacks) if self.connection_pool.connection_kwargs.get("protocol") in ["3", 3]: - self.response_callbacks.update(_RedisCallbacksRESP3) + self.response_callbacks.update(_ValkeyCallbacksRESP3) else: - self.response_callbacks.update(_RedisCallbacksRESP2) + self.response_callbacks.update(_ValkeyCallbacksRESP2) # If using a single connection client, we need to lock creation-of and use-of # the client in order to avoid race conditions such as using asyncio.gather - # on a set of redis commands + # on a set of valkey commands self._single_conn_lock = asyncio.Lock() def __repr__(self): @@ -369,7 +367,7 @@ def __repr__(self): def __await__(self): return self.initialize().__await__() - async def initialize(self: _RedisT) -> _RedisT: + async def initialize(self: _ValkeyT) -> _ValkeyT: if self.single_connection_client: async with self._single_conn_lock: if self.connection is None: @@ -397,19 +395,19 @@ def set_retry(self, retry: "Retry") -> None: def load_external_module(self, funcname, func): """ - This function can be used to add externally defined redis modules, - and their namespaces to the redis client. + This function can be used to add externally defined valkey modules, + and their namespaces to the valkey client. funcname - A string containing the name of the function to create func - The function, being added to this class. - ex: Assume that one has a custom redis module named foomod that - creates command named 'foo.dothing' and 'foo.anotherthing' in redis. + ex: Assume that one has a custom valkey module named foomod that + creates command named 'foo.dothing' and 'foo.anotherthing' in valkey. To load function functions into this namespace: - from redis import Redis + from valkey import Valkey from foomodule import F - r = Redis() + r = Valkey() r.load_external_module("foo", F) r.foo().dothing('your', 'arguments') @@ -494,7 +492,7 @@ def lock( float or integer, both representing the number of seconds to wait. ``lock_class`` forces the specified lock implementation. Note that as - of redis-py 3.0, the only lock class we implement is ``Lock`` (which is + of valkey-py 3.0, the only lock class we implement is ``Lock`` (which is a Lua-based lock). So, it's unlikely you'll need this parameter, unless you have created your own custom lock class. @@ -507,7 +505,7 @@ def lock( thread-1 sets the token to "abc" time: 1, thread-2 blocks trying to acquire `my-lock` using the Lock instance. - time: 5, thread-1 has not yet completed. redis expires the lock + time: 5, thread-1 has not yet completed. valkey expires the lock key. time: 5, thread-2 acquired `my-lock` now that it's available. thread-2 sets the token to "xyz" @@ -546,18 +544,18 @@ def pubsub(self, **kwargs) -> "PubSub": def monitor(self) -> "Monitor": return Monitor(self.connection_pool) - def client(self) -> "Redis": + def client(self) -> "Valkey": return self.__class__( connection_pool=self.connection_pool, single_connection_client=True ) - async def __aenter__(self: _RedisT) -> _RedisT: + async def __aenter__(self: _ValkeyT) -> _ValkeyT: return await self.initialize() async def __aexit__(self, exc_type, exc_value, traceback): await self.aclose() - _DEL_MESSAGE = "Unclosed Redis client" + _DEL_MESSAGE = "Unclosed Valkey client" # passing _warnings and _grl as argument default since they may be gone # by the time __del__ is called at shutdown @@ -577,11 +575,11 @@ def __del__( async def aclose(self, close_connection_pool: Optional[bool] = None) -> None: """ - Closes Redis client connection + Closes Valkey client connection :param close_connection_pool: decides whether to close the connection pool used - by this Redis client, overriding Redis.auto_close_connection_pool. By default, - let Redis.auto_close_connection_pool decide whether to close the connection + by this Valkey client, overriding Valkey.auto_close_connection_pool. By default, + let Valkey.auto_close_connection_pool decide whether to close the connection pool. """ conn = self.connection @@ -655,7 +653,7 @@ async def execute_command(self, *args, **options): async def parse_response( self, connection: Connection, command_name: Union[str, bytes], **options ): - """Parses a response from the Redis server""" + """Parses a response from the Valkey server""" try: if NEVER_DECODE in options: response = await connection.read_response(disable_decoding=True) @@ -696,7 +694,7 @@ def invalidate_key_from_cache(self, key): self.connection_pool.invalidate_key_from_cache(key) -StrictRedis = Redis +StrictValkey = Valkey class MonitorCommandInfo(TypedDict): @@ -710,7 +708,7 @@ class MonitorCommandInfo(TypedDict): class Monitor: """ - Monitor is useful for handling the MONITOR command to the redis server. + Monitor is useful for handling the MONITOR command to the valkey server. next_command() method returns one command from monitor listen() method yields commands from monitor. """ @@ -732,7 +730,7 @@ async def __aenter__(self): # check that monitor returns 'OK', but don't return it to user response = await self.connection.read_response() if not bool_ok(response): - raise RedisError(f"MONITOR failed: {response}") + raise ValkeyError(f"MONITOR failed: {response}") return self async def __aexit__(self, *args): @@ -749,7 +747,7 @@ async def next_command(self) -> MonitorCommandInfo: m = self.monitor_re.match(command_data) db_id, client_info, command = m.groups() command = " ".join(self.command_re.findall(command)) - # Redis escapes double quotes because each piece of the command + # Valkey escapes double quotes because each piece of the command # string is surrounded by double quotes. We don't have that # requirement so remove the escaping and leave the quote. command = command.replace('\\"', '"') @@ -783,7 +781,7 @@ async def listen(self) -> AsyncIterator[MonitorCommandInfo]: class PubSub: """ - PubSub provides publish, subscribe and listen support to Redis channels. + PubSub provides publish, subscribe and listen support to Valkey channels. After subscribing to one or more channels, the listen() method will block until a message arrives on one of the subscribed channels. That message @@ -792,7 +790,7 @@ class PubSub: PUBLISH_MESSAGE_TYPES = ("message", "pmessage") UNSUBSCRIBE_MESSAGE_TYPES = ("unsubscribe", "punsubscribe") - HEALTH_CHECK_MESSAGE = "redis-py-health-check" + HEALTH_CHECK_MESSAGE = "valkey-py-health-check" def __init__( self, @@ -842,7 +840,7 @@ def __del__(self): async def aclose(self): # In case a connection property does not yet exist - # (due to a crash earlier in the Redis() constructor), return + # (due to a crash earlier in the Valkey() constructor), return # immediately as there is nothing to clean-up. if not hasattr(self, "connection"): return @@ -935,7 +933,7 @@ async def _disconnect_raise_connect(self, conn, error): async def _execute(self, conn, command, *args, **kwargs): """ - Connect manually upon disconnection. If the Redis server is down, + Connect manually upon disconnection. If the Valkey server is down, this will fail and raise a ConnectionError as desired. After reconnection, the ``on_connect`` callback should have been called by the # connection to resubscribe us to any channels and @@ -1095,7 +1093,7 @@ async def get_message( def ping(self, message=None) -> Awaitable: """ - Ping the Redis server + Ping the Valkey server """ args = ["PING", message] if message is not None else ["PING"] return self.execute_command(*args) @@ -1174,8 +1172,8 @@ async def run( ) -> None: """Process pub/sub messages using registered callbacks. - This is the equivalent of :py:meth:`redis.PubSub.run_in_thread` in - redis-py, but it is a coroutine. To launch it as a separate task, use + This is the equivalent of :py:meth:`valkey.PubSub.run_in_thread` in + valkey-py, but it is a coroutine. To launch it as a separate task, use ``asyncio.create_task``: >>> task = asyncio.create_task(pubsub.run()) @@ -1228,11 +1226,11 @@ async def __call__(self, e: BaseException, pubsub: PubSub): ... CommandStackT = List[CommandT] -class Pipeline(Redis): # lgtm [py/init-calls-subclass] +class Pipeline(Valkey): # lgtm [py/init-calls-subclass] """ - Pipelines provide a way to transmit multiple commands to the Redis server + Pipelines provide a way to transmit multiple commands to the Valkey server in one transmission. This is convenient for batch processing, such as - saving all the values in a list to Redis. + saving all the values in a list to Valkey. All commands executed within a pipeline are wrapped with MULTI and EXEC calls. This guarantees all commands executed in the pipeline will be @@ -1266,7 +1264,7 @@ def __init__( self.scripts: Set["Script"] = set() self.explicit_transaction = False - async def __aenter__(self: _RedisT) -> _RedisT: + async def __aenter__(self: _ValkeyT) -> _ValkeyT: return self async def __aexit__(self, exc_type, exc_value, traceback): @@ -1321,9 +1319,9 @@ def multi(self): are issued. End the transactional block with `execute`. """ if self.explicit_transaction: - raise RedisError("Cannot issue nested calls to MULTI") + raise ValkeyError("Cannot issue nested calls to MULTI") if self.command_stack: - raise RedisError( + raise ValkeyError( "Commands without an initial WATCH have already been issued" ) self.explicit_transaction = True @@ -1582,14 +1580,14 @@ async def execute(self, raise_on_error: bool = True): async def discard(self): """Flushes all previously queued commands - See: https://redis.io/commands/DISCARD + See: https://valkey.io/commands/discard """ await self.execute_command("DISCARD") async def watch(self, *names: KeyT): """Watches the values at keys ``names``""" if self.explicit_transaction: - raise RedisError("Cannot issue a WATCH after a MULTI") + raise ValkeyError("Cannot issue a WATCH after a MULTI") return await self.execute_command("WATCH", *names) async def unwatch(self): diff --git a/redis/asyncio/cluster.py b/valkey/asyncio/cluster.py similarity index 92% rename from redis/asyncio/cluster.py rename to valkey/asyncio/cluster.py index d5e57560..7183d687 100644 --- a/redis/asyncio/cluster.py +++ b/valkey/asyncio/cluster.py @@ -19,39 +19,44 @@ Union, ) -from redis._cache import ( +from valkey._cache import ( DEFAULT_ALLOW_LIST, DEFAULT_DENY_LIST, DEFAULT_EVICTION_POLICY, AbstractCache, ) -from redis._parsers import AsyncCommandsParser, Encoder -from redis._parsers.helpers import ( - _RedisCallbacks, - _RedisCallbacksRESP2, - _RedisCallbacksRESP3, +from valkey._parsers import AsyncCommandsParser, Encoder +from valkey._parsers.helpers import ( + _ValkeyCallbacks, + _ValkeyCallbacksRESP2, + _ValkeyCallbacksRESP3, ) -from redis.asyncio.client import ResponseCallbackT -from redis.asyncio.connection import Connection, DefaultParser, SSLConnection, parse_url -from redis.asyncio.lock import Lock -from redis.asyncio.retry import Retry -from redis.backoff import default_backoff -from redis.client import EMPTY_RESPONSE, NEVER_DECODE, AbstractRedis -from redis.cluster import ( +from valkey.asyncio.client import ResponseCallbackT +from valkey.asyncio.connection import ( + Connection, + DefaultParser, + SSLConnection, + parse_url, +) +from valkey.asyncio.lock import Lock +from valkey.asyncio.retry import Retry +from valkey.backoff import default_backoff +from valkey.client import EMPTY_RESPONSE, NEVER_DECODE, AbstractValkey +from valkey.cluster import ( PIPELINE_BLOCKED_COMMANDS, PRIMARY, REPLICA, SLOT_ID, - AbstractRedisCluster, + AbstractValkeyCluster, LoadBalancer, block_pipeline_command, get_node_name, parse_cluster_slots, ) -from redis.commands import READ_COMMANDS, AsyncRedisClusterCommands -from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot -from redis.credentials import CredentialProvider -from redis.exceptions import ( +from valkey.commands import READ_COMMANDS, AsyncValkeyClusterCommands +from valkey.crc import VALKEY_CLUSTER_HASH_SLOTS, key_slot +from valkey.credentials import CredentialProvider +from valkey.exceptions import ( AskError, BusyLoadingError, ClusterCrossSlotError, @@ -62,14 +67,14 @@ MasterDownError, MaxConnectionsError, MovedError, - RedisClusterException, ResponseError, SlotNotCoveredError, TimeoutError, TryAgainError, + ValkeyClusterException, ) -from redis.typing import AnyKeyT, EncodableT, KeyT -from redis.utils import ( +from valkey.typing import AnyKeyT, EncodableT, KeyT +from valkey.utils import ( deprecated_function, dict_merge, get_lib_version, @@ -96,9 +101,9 @@ class ClusterParser(DefaultParser): ) -class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommands): +class ValkeyCluster(AbstractValkey, AbstractValkeyCluster, AsyncValkeyClusterCommands): """ - Create a new RedisCluster client. + Create a new ValkeyCluster client. Pass one of parameters: @@ -131,10 +136,10 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand has ``cluster-require-full-coverage`` set to ``yes``, the server will throw a :class:`~.ClusterDownError` for some key-based commands. | When set to ``True``: all slots must be covered to construct the cluster - client. If not all slots are covered, :class:`~.RedisClusterException` will be - thrown. + client. If not all slots are covered, :class:`~.ValkeyClusterException` + will be thrown. | See: - https://redis.io/docs/manual/scaling/#redis-cluster-configuration-parameters + https://valkey.io/docs/manual/scaling/#valkey-cluster-configuration-parameters :param read_from_replicas: | Enable read from replicas in READONLY mode. You can read possibly stale data. When set to true, read commands will be assigned between the primary and @@ -169,9 +174,9 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand reach them, such as when they sit behind a proxy. | Rest of the arguments will be passed to the - :class:`~redis.asyncio.connection.Connection` instances when created + :class:`~valkey.asyncio.connection.Connection` instances when created - :raises RedisClusterException: + :raises ValkeyClusterException: if any arguments are invalid or unknown. Eg: - `db` != 0 or None @@ -181,21 +186,19 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand """ @classmethod - def from_url(cls, url: str, **kwargs: Any) -> "RedisCluster": + def from_url(cls, url: str, **kwargs: Any) -> "ValkeyCluster": """ - Return a Redis client object configured from the given URL. + Return a Valkey client object configured from the given URL. For example:: - redis://[[username]:[password]]@localhost:6379/0 - rediss://[[username]:[password]]@localhost:6379/0 + valkey://[[username]:[password]]@localhost:6379/0 + valkeys://[[username]:[password]]@localhost:6379/0 Three URL schemes are supported: - - `redis://` creates a TCP socket connection. See more at: - - - `rediss://` creates a SSL wrapped TCP socket connection. See more at: - + - `valkey://` creates a TCP socket connection. + - `valkeys://` creates a SSL wrapped TCP socket connection. The username, password, hostname, path and all querystring values are passed through ``urllib.parse.unquote`` in order to replace any percent-encoded values @@ -205,7 +208,7 @@ def from_url(cls, url: str, **kwargs: Any) -> "RedisCluster": arguments can be specified with string values "True"/"False" or "Yes"/"No". Values that cannot be properly cast cause a ``ValueError`` to be raised. Once parsed, the querystring arguments and keyword arguments are passed to - :class:`~redis.asyncio.connection.Connection` when created. + :class:`~valkey.asyncio.connection.Connection` when created. In the case of conflicting arguments, querystring arguments are used. """ kwargs.update(parse_url(url)) @@ -250,7 +253,7 @@ def __init__( username: Optional[str] = None, password: Optional[str] = None, client_name: Optional[str] = None, - lib_name: Optional[str] = "redis-py", + lib_name: Optional[str] = "valkey-py", lib_version: Optional[str] = get_lib_version(), # Encoding related kwargs encoding: str = "utf-8", @@ -285,21 +288,21 @@ def __init__( cache_allow_list: List[str] = DEFAULT_ALLOW_LIST, ) -> None: if db: - raise RedisClusterException( + raise ValkeyClusterException( "Argument 'db' must be 0 or None in cluster mode" ) if path: - raise RedisClusterException( + raise ValkeyClusterException( "Unix domain socket is not supported in cluster mode" ) if (not host or not port) and not startup_nodes: - raise RedisClusterException( - "RedisCluster requires at least one node to discover the cluster.\n" - "Please provide one of the following or use RedisCluster.from_url:\n" - ' - host and port: RedisCluster(host="localhost", port=6379)\n' - " - startup_nodes: RedisCluster(startup_nodes=[" + raise ValkeyClusterException( + "ValkeyCluster requires at least one node to discover the cluster.\n" + "Please provide one of the following or use ValkeyCluster.from_url:\n" + ' - host and port: ValkeyCluster(host="localhost", port=6379)\n' + " - startup_nodes: ValkeyCluster(startup_nodes=[" 'ClusterNode("localhost", 6379), ClusterNode("localhost", 6380)])' ) @@ -354,7 +357,7 @@ def __init__( if read_from_replicas: # Call our on_connect function to configure READONLY mode - kwargs["redis_connect_func"] = self.on_connect + kwargs["valkey_connect_func"] = self.on_connect self.retry = retry if retry or retry_on_error or connection_error_retry_attempts > 0: @@ -368,11 +371,11 @@ def __init__( self.retry.update_supported_errors(retry_on_error) kwargs.update({"retry": self.retry}) - kwargs["response_callbacks"] = _RedisCallbacks.copy() + kwargs["response_callbacks"] = _ValkeyCallbacks.copy() if kwargs.get("protocol") in ["3", 3]: - kwargs["response_callbacks"].update(_RedisCallbacksRESP3) + kwargs["response_callbacks"].update(_ValkeyCallbacksRESP3) else: - kwargs["response_callbacks"].update(_RedisCallbacksRESP2) + kwargs["response_callbacks"].update(_ValkeyCallbacksRESP2) self.connection_kwargs = kwargs if startup_nodes: @@ -413,7 +416,7 @@ def __init__( self._initialize = True self._lock: Optional[asyncio.Lock] = None - async def initialize(self) -> "RedisCluster": + async def initialize(self) -> "ValkeyCluster": """Get all nodes from startup nodes & creates connections if not initialized.""" if self._initialize: if not self._lock: @@ -448,16 +451,16 @@ async def close(self) -> None: """alias for aclose() for backwards compatibility""" await self.aclose() - async def __aenter__(self) -> "RedisCluster": + async def __aenter__(self) -> "ValkeyCluster": return await self.initialize() async def __aexit__(self, exc_type: None, exc_value: None, traceback: None) -> None: await self.aclose() - def __await__(self) -> Generator[Any, None, "RedisCluster"]: + def __await__(self) -> Generator[Any, None, "ValkeyCluster"]: return self.initialize().__await__() - _DEL_MESSAGE = "Unclosed RedisCluster client" + _DEL_MESSAGE = "Unclosed ValkeyCluster client" def __del__( self, @@ -556,7 +559,7 @@ def keyslot(self, key: EncodableT) -> int: """ Find the keyslot for a given key. - See: https://redis.io/docs/manual/scaling/#redis-cluster-data-sharding + See: https://valkey.io/docs/manual/scaling/#valkey-cluster-data-sharding """ return key_slot(self.encoder.encode(key)) @@ -565,7 +568,7 @@ def get_encoder(self) -> Encoder: return self.encoder def get_connection_kwargs(self) -> Dict[str, Optional[Any]]: - """Get the kwargs passed to :class:`~redis.asyncio.connection.Connection`.""" + """Get the kwargs passed to :class:`~valkey.asyncio.connection.Connection`.""" return self.connection_kwargs def get_retry(self) -> Optional["Retry"]: @@ -624,7 +627,7 @@ async def _determine_slot(self, command: str, *args: Any) -> int: # Get the keys in the command # EVAL and EVALSHA are common enough that it's wasteful to go to the - # redis server to parse the keys. Besides, there is a bug in redis<7.0 + # valkey server to parse the keys. Besides, there is a bug in valkey<7.0 # where `self._get_command_keys()` fails anyway. So, we special case # EVAL/EVALSHA. # - issue: https://github.com/redis/redis/issues/9493 @@ -632,23 +635,23 @@ async def _determine_slot(self, command: str, *args: Any) -> int: if command.upper() in ("EVAL", "EVALSHA"): # command syntax: EVAL "script body" num_keys ... if len(args) < 2: - raise RedisClusterException( + raise ValkeyClusterException( f"Invalid args in command: {command, *args}" ) keys = args[2 : 2 + int(args[1])] # if there are 0 keys, that means the script can be run on any node # so we can just return a random slot if not keys: - return random.randrange(0, REDIS_CLUSTER_HASH_SLOTS) + return random.randrange(0, VALKEY_CLUSTER_HASH_SLOTS) else: keys = await self.commands_parser.get_keys(command, *args) if not keys: # FCALL can call a function with 0 keys, that means the function # can be run on any node so we can just return a random slot if command.upper() in ("FCALL", "FCALL_RO"): - return random.randrange(0, REDIS_CLUSTER_HASH_SLOTS) - raise RedisClusterException( - "No way to dispatch this command to Redis Cluster. " + return random.randrange(0, VALKEY_CLUSTER_HASH_SLOTS) + raise ValkeyClusterException( + "No way to dispatch this command to Valkey Cluster. " "Missing key.\nYou can execute the command by specifying " f"target nodes.\nCommand: {args}" ) @@ -661,7 +664,7 @@ async def _determine_slot(self, command: str, *args: Any) -> int: # the same slot slots = {self.keyslot(key) for key in keys} if len(slots) != 1: - raise RedisClusterException( + raise ValkeyClusterException( f"{command} - all keys must map to the same key slot" ) @@ -703,9 +706,9 @@ async def execute_command(self, *args: EncodableT, **kwargs: Any) -> Any: - target_nodes: :attr:`NODE_FLAGS` or :class:`~.ClusterNode` or List[:class:`~.ClusterNode`] or Dict[Any, :class:`~.ClusterNode`] - - Rest of the kwargs are passed to the Redis connection + - Rest of the kwargs are passed to the Valkey connection - :raises RedisClusterException: if target_nodes is not provided & the command + :raises ValkeyClusterException: if target_nodes is not provided & the command can't be mapped to a slot """ command = args[0] @@ -737,7 +740,7 @@ async def execute_command(self, *args: EncodableT, **kwargs: Any) -> Any: *args, node_flag=passed_targets ) if not target_nodes: - raise RedisClusterException( + raise ValkeyClusterException( f"No targets were found to execute {args} command on" ) @@ -779,7 +782,7 @@ async def _execute_command( ) -> Any: asking = moved = False redirect_addr = None - ttl = self.RedisClusterRequestTTL + ttl = self.ValkeyClusterRequestTTL while ttl > 0: ttl -= 1 @@ -825,7 +828,7 @@ async def _execute_command( # 'reinitialize_steps' counter will increase faster when # the same client object is shared between multiple threads. To # reduce the frequency you can set this variable in the - # RedisCluster constructor. + # ValkeyCluster constructor. self.reinitialize_counter += 1 if ( self.reinitialize_steps @@ -841,7 +844,7 @@ async def _execute_command( redirect_addr = get_node_name(host=e.host, port=e.port) asking = True except TryAgainError: - if ttl < self.RedisClusterRequestTTL / 2: + if ttl < self.ValkeyClusterRequestTTL / 2: await asyncio.sleep(0.05) raise ClusterError("TTL exhausted.") @@ -854,13 +857,13 @@ def pipeline( Cluster implementation of pipeline does not support transaction or shard_hint. - :raises RedisClusterException: if transaction or shard_hint are truthy values + :raises ValkeyClusterException: if transaction or shard_hint are truthy values """ if shard_hint: - raise RedisClusterException("shard_hint is deprecated in cluster mode") + raise ValkeyClusterException("shard_hint is deprecated in cluster mode") if transaction: - raise RedisClusterException("transaction is deprecated in cluster mode") + raise ValkeyClusterException("transaction is deprecated in cluster mode") return ClusterPipeline(self) @@ -897,7 +900,7 @@ def lock( float or integer, both representing the number of seconds to wait. ``lock_class`` forces the specified lock implementation. Note that as - of redis-py 3.0, the only lock class we implement is ``Lock`` (which is + of valkey-py 3.0, the only lock class we implement is ``Lock`` (which is a Lua-based lock). So, it's unlikely you'll need this parameter, unless you have created your own custom lock class. @@ -910,7 +913,7 @@ def lock( thread-1 sets the token to "abc" time: 1, thread-2 blocks trying to acquire `my-lock` using the Lock instance. - time: 5, thread-1 has not yet completed. redis expires the lock + time: 5, thread-1 has not yet completed. valkey expires the lock key. time: 5, thread-2 acquired `my-lock` now that it's available. thread-2 sets the token to "xyz" @@ -955,7 +958,7 @@ class ClusterNode: """ Create a new ClusterNode. - Each ClusterNode manages multiple :class:`~redis.asyncio.connection.Connection` + Each ClusterNode manages multiple :class:`~valkey.asyncio.connection.Connection` objects for the (host, port). """ @@ -1283,7 +1286,7 @@ async def initialize(self) -> None: try: cluster_slots = await startup_node.execute_command("CLUSTER SLOTS") except ResponseError: - raise RedisClusterException( + raise ValkeyClusterException( "Cluster mode is not enabled on this node" ) startup_nodes_reachable = True @@ -1358,14 +1361,14 @@ async def initialize(self) -> None: ) if len(disagreements) > 5: - raise RedisClusterException( + raise ValkeyClusterException( f"startup_nodes could not agree on a valid " f'slots cache: {", ".join(disagreements)}' ) # Validate if all slots are covered or if we should try next startup node fully_covered = True - for i in range(REDIS_CLUSTER_HASH_SLOTS): + for i in range(VALKEY_CLUSTER_HASH_SLOTS): if i not in tmp_slots: fully_covered = False break @@ -1373,8 +1376,8 @@ async def initialize(self) -> None: break if not startup_nodes_reachable: - raise RedisClusterException( - f"Redis Cluster cannot be connected. Please provide at least " + raise ValkeyClusterException( + f"Valkey Cluster cannot be connected. Please provide at least " f"one reachable node: {str(exception)}" ) from exception @@ -1382,9 +1385,9 @@ async def initialize(self) -> None: if not fully_covered and self.require_full_coverage: # Despite the requirement that the slots be covered, there # isn't a full coverage - raise RedisClusterException( + raise ValkeyClusterException( f"All slots are not covered after query all startup_nodes. " - f"{len(tmp_slots)} of {REDIS_CLUSTER_HASH_SLOTS} " + f"{len(tmp_slots)} of {VALKEY_CLUSTER_HASH_SLOTS} " f"covered..." ) @@ -1431,7 +1434,9 @@ def invalidate_key_from_cache(self, key): node.invalidate_key_from_cache(key) -class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommands): +class ClusterPipeline( + AbstractValkey, AbstractValkeyCluster, AsyncValkeyClusterCommands +): """ Create a new ClusterPipeline object. @@ -1465,12 +1470,12 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm - :class:`~.AskError` :param client: - | Existing :class:`~.RedisCluster` client + | Existing :class:`~.ValkeyCluster` client """ __slots__ = ("_command_stack", "_client") - def __init__(self, client: RedisCluster) -> None: + def __init__(self, client: ValkeyCluster) -> None: self._client = client self._command_stack: List["PipelineCommand"] = [] @@ -1516,7 +1521,7 @@ def execute_command( - target_nodes: :attr:`NODE_FLAGS` or :class:`~.ClusterNode` or List[:class:`~.ClusterNode`] or Dict[Any, :class:`~.ClusterNode`] - - Rest of the kwargs are passed to the Redis connection + - Rest of the kwargs are passed to the Valkey connection """ kwargs.pop("keys", None) # the keys are used only for client side caching self._command_stack.append( @@ -1539,7 +1544,7 @@ async def execute( | Whether to retry each failed command individually in case of redirection errors - :raises RedisClusterException: if target_nodes is not provided & the command + :raises ValkeyClusterException: if target_nodes is not provided & the command can't be mapped to a slot """ if not self._command_stack: @@ -1574,7 +1579,7 @@ async def execute( async def _execute( self, - client: "RedisCluster", + client: "ValkeyCluster", stack: List["PipelineCommand"], raise_on_error: bool = True, allow_redirections: bool = True, @@ -1593,11 +1598,11 @@ async def _execute( *cmd.args, node_flag=passed_targets ) if not target_nodes: - raise RedisClusterException( + raise ValkeyClusterException( f"No targets were found to execute {cmd.args} command on" ) if len(target_nodes) > 1: - raise RedisClusterException(f"Too many targets for command {cmd.args}") + raise ValkeyClusterException(f"Too many targets for command {cmd.args}") node = target_nodes[0] if node.name not in nodes: nodes[node.name] = (node, []) diff --git a/redis/asyncio/connection.py b/valkey/asyncio/connection.py similarity index 93% rename from redis/asyncio/connection.py rename to valkey/asyncio/connection.py index 96f18876..5237918f 100644 --- a/redis/asyncio/connection.py +++ b/valkey/asyncio/connection.py @@ -34,21 +34,21 @@ else: from async_timeout import timeout as async_timeout -from redis.asyncio.retry import Retry -from redis.backoff import NoBackoff -from redis.connection import DEFAULT_RESP_VERSION -from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider -from redis.exceptions import ( +from valkey.asyncio.retry import Retry +from valkey.backoff import NoBackoff +from valkey.connection import DEFAULT_RESP_VERSION +from valkey.credentials import CredentialProvider, UsernamePasswordCredentialProvider +from valkey.exceptions import ( AuthenticationError, AuthenticationWrongNumberOfArgsError, ConnectionError, DataError, - RedisError, ResponseError, TimeoutError, + ValkeyError, ) -from redis.typing import EncodableT, KeysT, ResponseT -from redis.utils import HIREDIS_AVAILABLE, get_lib_version, str_if_bytes +from valkey.typing import EncodableT, KeysT, ResponseT +from valkey.utils import HIREDIS_AVAILABLE, get_lib_version, str_if_bytes from .._cache import ( DEFAULT_ALLOW_LIST, @@ -98,7 +98,7 @@ async def __call__(self, connection: "AbstractConnection"): ... class AbstractConnection: - """Manages communication to and from a Redis server""" + """Manages communication to and from a Valkey server""" __slots__ = ( "db", @@ -110,7 +110,7 @@ class AbstractConnection: "password", "socket_timeout", "socket_connect_timeout", - "redis_connect_func", + "valkey_connect_func", "retry_on_timeout", "retry_on_error", "health_check_interval", @@ -148,11 +148,11 @@ def __init__( socket_read_size: int = 65536, health_check_interval: float = 0, client_name: Optional[str] = None, - lib_name: Optional[str] = "redis-py", + lib_name: Optional[str] = "valkey-py", lib_version: Optional[str] = get_lib_version(), username: Optional[str] = None, retry: Optional[Retry] = None, - redis_connect_func: Optional[ConnectCallbackT] = None, + valkey_connect_func: Optional[ConnectCallbackT] = None, encoder_class: Type[Encoder] = Encoder, credential_provider: Optional[CredentialProvider] = None, protocol: Optional[int] = 2, @@ -203,7 +203,7 @@ def __init__( self.health_check_interval = health_check_interval self.next_health_check: float = -1 self.encoder = encoder_class(encoding, encoding_errors, decode_responses) - self.redis_connect_func = redis_connect_func + self.valkey_connect_func = valkey_connect_func self._reader: Optional[asyncio.StreamReader] = None self._writer: Optional[asyncio.StreamWriter] = None self._socket_read_size = socket_read_size @@ -227,7 +227,7 @@ def __init__( self.client_cache = client_cache if client_cache is not None else _cache if self.client_cache is not None: if self.protocol not in [3, "3"]: - raise RedisError( + raise ValkeyError( "client caching is only supported with protocol version 3 or higher" ) self.cache_deny_list = cache_deny_list @@ -295,7 +295,7 @@ def set_parser(self, parser_class: Type[BaseParser]) -> None: self._parser = parser_class(socket_read_size=self._socket_read_size) async def connect(self): - """Connects to the Redis server if not already connected""" + """Connects to the Valkey server if not already connected""" if self.is_connected: return try: @@ -312,17 +312,17 @@ async def connect(self): raise ConnectionError(exc) from exc try: - if not self.redis_connect_func: + if not self.valkey_connect_func: # Use the default on_connect function await self.on_connect() else: - # Use the passed function redis_connect_func + # Use the passed function valkey_connect_func ( - await self.redis_connect_func(self) - if asyncio.iscoroutinefunction(self.redis_connect_func) - else self.redis_connect_func(self) + await self.valkey_connect_func(self) + if asyncio.iscoroutinefunction(self.valkey_connect_func) + else self.valkey_connect_func(self) ) - except RedisError: + except ValkeyError: # clean up after any error in on_connect await self.disconnect() raise @@ -386,7 +386,7 @@ async def on_connect(self) -> None: try: auth_response = await self.read_response() except AuthenticationWrongNumberOfArgsError: - # a username and password were specified but the Redis + # a username and password were specified but the Valkey # server seems to be < 6.0.0 which expects a single password # arg. retry auth with just the password. # https://github.com/andymccurdy/redis-py/issues/1274 @@ -442,7 +442,7 @@ async def on_connect(self) -> None: raise ConnectionError("Invalid Database") async def disconnect(self, nowait: bool = False) -> None: - """Disconnects from the Redis server""" + """Disconnects from the Valkey server""" try: async with async_timeout(self.socket_connect_timeout): self._parser.on_disconnect() @@ -531,7 +531,7 @@ async def send_packed_command( raise async def send_command(self, *args: Any, **kwargs: Any) -> None: - """Pack and send a command to the Redis server""" + """Pack and send a command to the Valkey server""" await self.send_packed_command( self.pack_command(*args), check_health=kwargs.get("check_health", True) ) @@ -608,10 +608,10 @@ async def read_response( return response def pack_command(self, *args: EncodableT) -> List[bytes]: - """Pack a series of arguments into the Redis protocol""" + """Pack a series of arguments into the Valkey protocol""" output = [] # the client might have included 1 or more literal arguments in - # the command name, e.g., 'CONFIG GET'. The Redis server expects these + # the command name, e.g., 'CONFIG GET'. The Valkey server expects these # arguments to be sent separately, so split the first argument # manually. These arguments should be bytestrings so that they are # not encoded. @@ -654,7 +654,7 @@ def pack_command(self, *args: EncodableT) -> List[bytes]: return output def pack_commands(self, commands: Iterable[Iterable[EncodableT]]) -> List[bytes]: - """Pack multiple commands into the Redis protocol""" + """Pack multiple commands into the Valkey protocol""" output: List[bytes] = [] pieces: List[bytes] = [] buffer_length = 0 @@ -691,7 +691,7 @@ def _cache_invalidation_process( self, data: List[Union[str, Optional[List[str]]]] ) -> None: """ - Invalidate (delete) all redis commands associated with a specific key. + Invalidate (delete) all valkey commands associated with a specific key. `data` is a list of strings, where the first string is the invalidation message and the second string is the list of keys to invalidate. (if the list of keys is None, then all keys are invalidated) @@ -744,7 +744,7 @@ def invalidate_key_from_cache(self, key): class Connection(AbstractConnection): - "Manages TCP communication to and from a Redis server" + "Manages TCP communication to and from a Valkey server" def __init__( self, @@ -822,7 +822,7 @@ def _error_message(self, exception: BaseException) -> str: class SSLConnection(Connection): - """Manages SSL connections to and from the Redis server(s). + """Manages SSL connections to and from the Valkey server(s). This class extends the Connection class, adding SSL functionality, and making use of ssl.SSLContext (https://docs.python.org/3/library/ssl.html#ssl.SSLContext) """ @@ -839,7 +839,7 @@ def __init__( ssl_ciphers: Optional[str] = None, **kwargs, ): - self.ssl_context: RedisSSLContext = RedisSSLContext( + self.ssl_context: ValkeySSLContext = ValkeySSLContext( keyfile=ssl_keyfile, certfile=ssl_certfile, cert_reqs=ssl_cert_reqs, @@ -885,7 +885,7 @@ def min_version(self): return self.ssl_context.min_version -class RedisSSLContext: +class ValkeySSLContext: __slots__ = ( "keyfile", "certfile", @@ -920,7 +920,7 @@ def __init__( "required": ssl.CERT_REQUIRED, } if cert_reqs not in CERT_REQS: - raise RedisError( + raise ValkeyError( f"Invalid SSL Certificate Requirements Flag: {cert_reqs}" ) self.cert_reqs = CERT_REQS[cert_reqs] @@ -949,7 +949,7 @@ def get(self) -> ssl.SSLContext: class UnixDomainSocketConnection(AbstractConnection): - "Manages UDS communication to and from a Redis server" + "Manages UDS communication to and from a Valkey server" def __init__(self, *, path: str = "", **kwargs): self.path = path @@ -1043,13 +1043,13 @@ def parse_url(url: str) -> ConnectKwargs: if parsed.password: kwargs["password"] = unquote(parsed.password) - # We only support redis://, rediss:// and unix:// schemes. + # We only support valkey://, valkeys:// and unix:// schemes. if parsed.scheme == "unix": if parsed.path: kwargs["path"] = unquote(parsed.path) kwargs["connection_class"] = UnixDomainSocketConnection - elif parsed.scheme in ("redis", "rediss"): + elif parsed.scheme in ("valkey", "valkeys"): if parsed.hostname: kwargs["host"] = unquote(parsed.hostname) if parsed.port: @@ -1063,12 +1063,12 @@ def parse_url(url: str) -> ConnectKwargs: except (AttributeError, ValueError): pass - if parsed.scheme == "rediss": + if parsed.scheme == "valkeys": kwargs["connection_class"] = SSLConnection else: - valid_schemes = "redis://, rediss://, unix://" + valid_schemes = "valkey://, valkeys://, unix://" raise ValueError( - f"Redis URL must specify one of the following schemes ({valid_schemes})" + f"Valkey URL must specify one of the following schemes ({valid_schemes})" ) return kwargs @@ -1080,11 +1080,11 @@ def parse_url(url: str) -> ConnectKwargs: class ConnectionPool: """ Create a connection pool. ``If max_connections`` is set, then this - object raises :py:class:`~redis.ConnectionError` when the pool's + object raises :py:class:`~valkey.ConnectionError` when the pool's limit is reached. By default, TCP connections are created unless ``connection_class`` - is specified. Use :py:class:`~redis.UnixDomainSocketConnection` for + is specified. Use :py:class:`~valkey.UnixDomainSocketConnection` for unix sockets. Any additional keyword arguments are passed to the constructor of @@ -1098,16 +1098,14 @@ def from_url(cls: Type[_CP], url: str, **kwargs) -> _CP: For example:: - redis://[[username]:[password]]@localhost:6379/0 - rediss://[[username]:[password]]@localhost:6379/0 + valkey://[[username]:[password]]@localhost:6379/0 + valkeys://[[username]:[password]]@localhost:6379/0 unix://[username@]/path/to/socket.sock?db=0[&password=password] Three URL schemes are supported: - - `redis://` creates a TCP socket connection. See more at: - - - `rediss://` creates a SSL wrapped TCP socket connection. See more at: - + - `valkey://` creates a TCP socket connection. + - `valkeys://` creates a SSL wrapped TCP socket connection. - ``unix://``: creates a Unix Domain Socket connection. The username, password, hostname, path and all querystring values @@ -1117,10 +1115,10 @@ def from_url(cls: Type[_CP], url: str, **kwargs) -> _CP: There are several ways to specify a database number. The first value found will be used: - 1. A ``db`` querystring option, e.g. redis://localhost?db=0 + 1. A ``db`` querystring option, e.g. valkey://localhost?db=0 - 2. If using the redis:// or rediss:// schemes, the path argument - of the url, e.g. redis://localhost/0 + 2. If using the valkey:// or valkeys:// schemes, the path argument + of the url, e.g. valkey://localhost/0 3. A ``db`` keyword argument to this function. @@ -1289,18 +1287,18 @@ class BlockingConnectionPool(ConnectionPool): """ A blocking connection pool:: - >>> from redis.asyncio import Redis, BlockingConnectionPool - >>> client = Redis.from_pool(BlockingConnectionPool()) + >>> from valkey.asyncio import Valkey, BlockingConnectionPool + >>> client = Valkey.from_pool(BlockingConnectionPool()) It performs the same function as the default - :py:class:`~redis.asyncio.ConnectionPool` implementation, in that, + :py:class:`~valkey.asyncio.ConnectionPool` implementation, in that, it maintains a pool of reusable connections that can be shared by - multiple async redis clients. + multiple async valkey clients. The difference is that, in the event that a client tries to get a connection from the pool when all of connections are in use, rather than - raising a :py:class:`~redis.ConnectionError` (as the default - :py:class:`~redis.asyncio.ConnectionPool` implementation does), it + raising a :py:class:`~valkey.ConnectionError` (as the default + :py:class:`~valkey.asyncio.ConnectionPool` implementation does), it blocks the current `Task` for a specified number of seconds until a connection becomes available. diff --git a/redis/asyncio/lock.py b/valkey/asyncio/lock.py similarity index 91% rename from redis/asyncio/lock.py rename to valkey/asyncio/lock.py index e1d11a88..c7f9351c 100644 --- a/redis/asyncio/lock.py +++ b/valkey/asyncio/lock.py @@ -4,15 +4,15 @@ from types import SimpleNamespace from typing import TYPE_CHECKING, Awaitable, Optional, Union -from redis.exceptions import LockError, LockNotOwnedError +from valkey.exceptions import LockError, LockNotOwnedError if TYPE_CHECKING: - from redis.asyncio import Redis, RedisCluster + from valkey.asyncio import Valkey, ValkeyCluster class Lock: """ - A shared, distributed Lock. Using Redis for locking allows the Lock + A shared, distributed Lock. Using Valkey for locking allows the Lock to be shared across processes and/or machines. It's left to the user to resolve deadlock issues and make sure @@ -77,7 +77,7 @@ class Lock: def __init__( self, - redis: Union["Redis", "RedisCluster"], + valkey: Union["Valkey", "ValkeyCluster"], name: Union[str, bytes, memoryview], timeout: Optional[float] = None, sleep: float = 0.1, @@ -86,8 +86,8 @@ def __init__( thread_local: bool = True, ): """ - Create a new Lock instance named ``name`` using the Redis client - supplied by ``redis``. + Create a new Lock instance named ``name`` using the Valkey client + supplied by ``valkey``. ``timeout`` indicates a maximum life for the lock in seconds. By default, it will remain locked until release() is called. @@ -118,7 +118,7 @@ def __init__( thread-1 sets the token to "abc" time: 1, thread-2 blocks trying to acquire `my-lock` using the Lock instance. - time: 5, thread-1 has not yet completed. redis expires the lock + time: 5, thread-1 has not yet completed. valkey expires the lock key. time: 5, thread-2 acquired `my-lock` now that it's available. thread-2 sets the token to "xyz" @@ -135,7 +135,7 @@ def __init__( is that these cases aren't common and as such default to using thread local storage. """ - self.redis = redis + self.valkey = valkey self.name = name self.timeout = timeout self.sleep = sleep @@ -148,7 +148,7 @@ def __init__( def register_scripts(self): cls = self.__class__ - client = self.redis + client = self.valkey if cls.lua_release is None: cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT) if cls.lua_extend is None: @@ -171,7 +171,7 @@ async def acquire( token: Optional[Union[str, bytes]] = None, ): """ - Use Redis to hold a shared, distributed lock named ``name``. + Use Valkey to hold a shared, distributed lock named ``name``. Returns True once the lock is acquired. If ``blocking`` is False, always return immediately. If the lock @@ -190,10 +190,10 @@ async def acquire( token = uuid.uuid1().hex.encode() else: try: - encoder = self.redis.connection_pool.get_encoder() + encoder = self.valkey.connection_pool.get_encoder() except AttributeError: # Cluster - encoder = self.redis.get_encoder() + encoder = self.valkey.get_encoder() token = encoder.encode(token) if blocking is None: blocking = self.blocking @@ -219,7 +219,7 @@ async def do_acquire(self, token: Union[str, bytes]) -> bool: timeout = int(self.timeout * 1000) else: timeout = None - if await self.redis.set(self.name, token, nx=True, px=timeout): + if await self.valkey.set(self.name, token, nx=True, px=timeout): return True return False @@ -227,21 +227,21 @@ async def locked(self) -> bool: """ Returns True if this key is locked by any process, otherwise False. """ - return await self.redis.get(self.name) is not None + return await self.valkey.get(self.name) is not None async def owned(self) -> bool: """ Returns True if this key is locked by this lock, otherwise False. """ - stored_token = await self.redis.get(self.name) + stored_token = await self.valkey.get(self.name) # need to always compare bytes to bytes # TODO: this can be simplified when the context manager is finished if stored_token and not isinstance(stored_token, bytes): try: - encoder = self.redis.connection_pool.get_encoder() + encoder = self.valkey.connection_pool.get_encoder() except AttributeError: # Cluster - encoder = self.redis.get_encoder() + encoder = self.valkey.get_encoder() stored_token = encoder.encode(stored_token) return self.local.token is not None and stored_token == self.local.token @@ -256,7 +256,7 @@ def release(self) -> Awaitable[None]: async def do_release(self, expected_token: bytes) -> None: if not bool( await self.lua_release( - keys=[self.name], args=[expected_token], client=self.redis + keys=[self.name], args=[expected_token], client=self.valkey ) ): raise LockNotOwnedError("Cannot release a lock that's no longer owned") @@ -286,7 +286,7 @@ async def do_extend(self, additional_time, replace_ttl) -> bool: await self.lua_extend( keys=[self.name], args=[self.local.token, additional_time, replace_ttl and "1" or "0"], - client=self.redis, + client=self.valkey, ) ): raise LockNotOwnedError("Cannot extend a lock that's no longer owned") @@ -306,7 +306,7 @@ async def do_reacquire(self) -> bool: timeout = int(self.timeout * 1000) if not bool( await self.lua_reacquire( - keys=[self.name], args=[self.local.token, timeout], client=self.redis + keys=[self.name], args=[self.local.token, timeout], client=self.valkey ) ): raise LockNotOwnedError("Cannot reacquire a lock that's no longer owned") diff --git a/redis/asyncio/retry.py b/valkey/asyncio/retry.py similarity index 88% rename from redis/asyncio/retry.py rename to valkey/asyncio/retry.py index 7c5e3b0e..a263f889 100644 --- a/redis/asyncio/retry.py +++ b/valkey/asyncio/retry.py @@ -1,10 +1,10 @@ from asyncio import sleep from typing import TYPE_CHECKING, Any, Awaitable, Callable, Tuple, Type, TypeVar -from redis.exceptions import ConnectionError, RedisError, TimeoutError +from valkey.exceptions import ConnectionError, TimeoutError, ValkeyError if TYPE_CHECKING: - from redis.backoff import AbstractBackoff + from valkey.backoff import AbstractBackoff T = TypeVar("T") @@ -19,7 +19,7 @@ def __init__( self, backoff: "AbstractBackoff", retries: int, - supported_errors: Tuple[Type[RedisError], ...] = ( + supported_errors: Tuple[Type[ValkeyError], ...] = ( ConnectionError, TimeoutError, ), @@ -44,7 +44,7 @@ def update_supported_errors(self, specified_errors: list): ) async def call_with_retry( - self, do: Callable[[], Awaitable[T]], fail: Callable[[RedisError], Any] + self, do: Callable[[], Awaitable[T]], fail: Callable[[ValkeyError], Any] ) -> T: """ Execute an operation that might fail and returns its result, or diff --git a/redis/asyncio/sentinel.py b/valkey/asyncio/sentinel.py similarity index 88% rename from redis/asyncio/sentinel.py rename to valkey/asyncio/sentinel.py index 6fd233ad..f9ccd3d8 100644 --- a/redis/asyncio/sentinel.py +++ b/valkey/asyncio/sentinel.py @@ -3,16 +3,21 @@ import weakref from typing import AsyncIterator, Iterable, Mapping, Optional, Sequence, Tuple, Type -from redis.asyncio.client import Redis -from redis.asyncio.connection import ( +from valkey.asyncio.client import Valkey +from valkey.asyncio.connection import ( Connection, ConnectionPool, EncodableT, SSLConnection, ) -from redis.commands import AsyncSentinelCommands -from redis.exceptions import ConnectionError, ReadOnlyError, ResponseError, TimeoutError -from redis.utils import str_if_bytes +from valkey.commands import AsyncSentinelCommands +from valkey.exceptions import ( + ConnectionError, + ReadOnlyError, + ResponseError, + TimeoutError, +) +from valkey.utils import str_if_bytes class MasterNotFoundError(ConnectionError): @@ -170,9 +175,9 @@ async def rotate_slaves(self) -> AsyncIterator: class Sentinel(AsyncSentinelCommands): """ - Redis Sentinel cluster client + Valkey Sentinel cluster client - >>> from redis.sentinel import Sentinel + >>> from valkey.sentinel import Sentinel >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) >>> await master.set('foo', 'bar') @@ -189,12 +194,12 @@ class Sentinel(AsyncSentinelCommands): ``sentinel_kwargs`` is a dictionary of connection arguments used when connecting to sentinel instances. Any argument that can be passed to - a normal Redis connection can be specified here. If ``sentinel_kwargs`` is + a normal Valkey connection can be specified here. If ``sentinel_kwargs`` is not specified, any socket_timeout and socket_keepalive options specified in ``connection_kwargs`` will be used. ``connection_kwargs`` are keyword arguments that will be used when - establishing a connection to a Redis server. + establishing a connection to a Valkey server. """ def __init__( @@ -213,7 +218,7 @@ def __init__( self.sentinel_kwargs = sentinel_kwargs self.sentinels = [ - Redis(host=hostname, port=port, **self.sentinel_kwargs) + Valkey(host=hostname, port=port, **self.sentinel_kwargs) for hostname, port in sentinels ] self.min_other_sentinels = min_other_sentinels @@ -262,7 +267,7 @@ def check_master_state(self, state: dict, service_name: str) -> bool: async def discover_master(self, service_name: str): """ - Asks sentinel servers for the Redis master's address corresponding + Asks sentinel servers for the Valkey master's address corresponding to the service labeled ``service_name``. Returns a pair (address, port) or raises MasterNotFoundError if no @@ -317,55 +322,55 @@ async def discover_slaves( def master_for( self, service_name: str, - redis_class: Type[Redis] = Redis, + valkey_class: Type[Valkey] = Valkey, connection_pool_class: Type[SentinelConnectionPool] = SentinelConnectionPool, **kwargs, ): """ - Returns a redis client instance for the ``service_name`` master. + Returns a valkey client instance for the ``service_name`` master. - A :py:class:`~redis.sentinel.SentinelConnectionPool` class is + A :py:class:`~valkey.sentinel.SentinelConnectionPool` class is used to retrieve the master's address before establishing a new connection. NOTE: If the master's address has changed, any cached connections to the old master are closed. - By default clients will be a :py:class:`~redis.Redis` instance. - Specify a different class to the ``redis_class`` argument if you + By default clients will be a :py:class:`~valkey.Valkey` instance. + Specify a different class to the ``valkey_class`` argument if you desire something different. The ``connection_pool_class`` specifies the connection pool to - use. The :py:class:`~redis.sentinel.SentinelConnectionPool` + use. The :py:class:`~valkey.sentinel.SentinelConnectionPool` will be used by default. All other keyword arguments are merged with any connection_kwargs passed to this class and passed to the connection pool as keyword - arguments to be used to initialize Redis connections. + arguments to be used to initialize Valkey connections. """ kwargs["is_master"] = True connection_kwargs = dict(self.connection_kwargs) connection_kwargs.update(kwargs) connection_pool = connection_pool_class(service_name, self, **connection_kwargs) - # The Redis object "owns" the pool - return redis_class.from_pool(connection_pool) + # The Valkey object "owns" the pool + return valkey_class.from_pool(connection_pool) def slave_for( self, service_name: str, - redis_class: Type[Redis] = Redis, + valkey_class: Type[Valkey] = Valkey, connection_pool_class: Type[SentinelConnectionPool] = SentinelConnectionPool, **kwargs, ): """ - Returns redis client instance for the ``service_name`` slave(s). + Returns valkey client instance for the ``service_name`` slave(s). A SentinelConnectionPool class is used to retrieve the slave's address before establishing a new connection. - By default clients will be a :py:class:`~redis.Redis` instance. - Specify a different class to the ``redis_class`` argument if you + By default clients will be a :py:class:`~valkey.Valkey` instance. + Specify a different class to the ``valkey_class`` argument if you desire something different. The ``connection_pool_class`` specifies the connection pool to use. @@ -373,12 +378,12 @@ def slave_for( All other keyword arguments are merged with any connection_kwargs passed to this class and passed to the connection pool as keyword - arguments to be used to initialize Redis connections. + arguments to be used to initialize Valkey connections. """ kwargs["is_master"] = False connection_kwargs = dict(self.connection_kwargs) connection_kwargs.update(kwargs) connection_pool = connection_pool_class(service_name, self, **connection_kwargs) - # The Redis object "owns" the pool - return redis_class.from_pool(connection_pool) + # The Valkey object "owns" the pool + return valkey_class.from_pool(connection_pool) diff --git a/redis/asyncio/utils.py b/valkey/asyncio/utils.py similarity index 55% rename from redis/asyncio/utils.py rename to valkey/asyncio/utils.py index 5a55b36a..7f8242ca 100644 --- a/redis/asyncio/utils.py +++ b/valkey/asyncio/utils.py @@ -1,24 +1,24 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from redis.asyncio.client import Pipeline, Redis + from valkey.asyncio.client import Pipeline, Valkey def from_url(url, **kwargs): """ - Returns an active Redis client generated from the given database URL. + Returns an active Valkey client generated from the given database URL. Will attempt to extract the database id from the path url fragment, if none is provided. """ - from redis.asyncio.client import Redis + from valkey.asyncio.client import Valkey - return Redis.from_url(url, **kwargs) + return Valkey.from_url(url, **kwargs) class pipeline: - def __init__(self, redis_obj: "Redis"): - self.p: "Pipeline" = redis_obj.pipeline() + def __init__(self, valkey_obj: "Valkey"): + self.p: "Pipeline" = valkey_obj.pipeline() async def __aenter__(self) -> "Pipeline": return self.p diff --git a/redis/backoff.py b/valkey/backoff.py similarity index 100% rename from redis/backoff.py rename to valkey/backoff.py diff --git a/redis/client.py b/valkey/client.py similarity index 94% rename from redis/client.py rename to valkey/client.py index 02fc7243..4886ba9c 100755 --- a/redis/client.py +++ b/valkey/client.py @@ -6,44 +6,44 @@ from itertools import chain from typing import Any, Callable, Dict, List, Optional, Type, Union -from redis._cache import ( +from valkey._cache import ( DEFAULT_ALLOW_LIST, DEFAULT_DENY_LIST, DEFAULT_EVICTION_POLICY, AbstractCache, ) -from redis._parsers.encoders import Encoder -from redis._parsers.helpers import ( - _RedisCallbacks, - _RedisCallbacksRESP2, - _RedisCallbacksRESP3, +from valkey._parsers.encoders import Encoder +from valkey._parsers.helpers import ( + _ValkeyCallbacks, + _ValkeyCallbacksRESP2, + _ValkeyCallbacksRESP3, bool_ok, ) -from redis.commands import ( +from valkey.commands import ( CoreCommands, - RedisModuleCommands, SentinelCommands, + ValkeyModuleCommands, list_or_args, ) -from redis.connection import ( +from valkey.connection import ( AbstractConnection, ConnectionPool, SSLConnection, UnixDomainSocketConnection, ) -from redis.credentials import CredentialProvider -from redis.exceptions import ( +from valkey.credentials import CredentialProvider +from valkey.exceptions import ( ConnectionError, ExecAbortError, PubSubError, - RedisError, ResponseError, TimeoutError, + ValkeyError, WatchError, ) -from redis.lock import Lock -from redis.retry import Retry -from redis.utils import ( +from valkey.lock import Lock +from valkey.retry import Retry +from valkey.utils import ( HIREDIS_AVAILABLE, _set_info_logger, get_lib_version, @@ -85,42 +85,40 @@ def update(self, data): super().update(data) -class AbstractRedis: +class AbstractValkey: pass -class Redis(RedisModuleCommands, CoreCommands, SentinelCommands): +class Valkey(ValkeyModuleCommands, CoreCommands, SentinelCommands): """ - Implementation of the Redis protocol. + Implementation of the Valkey protocol. - This abstract class provides a Python interface to all Redis commands - and an implementation of the Redis protocol. + This abstract class provides a Python interface to all Valkey commands + and an implementation of the Valkey protocol. Pipelines derive from this, implementing how - the commands are sent and received to the Redis server. Based on + the commands are sent and received to the Valkey server. Based on configuration, an instance will either use a ConnectionPool, or - Connection object to talk to redis. + Connection object to talk to valkey. It is not safe to pass PubSub or Pipeline objects between threads. """ @classmethod - def from_url(cls, url: str, **kwargs) -> "Redis": + def from_url(cls, url: str, **kwargs) -> "Valkey": """ - Return a Redis client object configured from the given URL + Return a Valkey client object configured from the given URL For example:: - redis://[[username]:[password]]@localhost:6379/0 - rediss://[[username]:[password]]@localhost:6379/0 + valkey://[[username]:[password]]@localhost:6379/0 + valkeys://[[username]:[password]]@localhost:6379/0 unix://[username@]/path/to/socket.sock?db=0[&password=password] Three URL schemes are supported: - - `redis://` creates a TCP socket connection. See more at: - - - `rediss://` creates a SSL wrapped TCP socket connection. See more at: - + - `valkey://` creates a TCP socket connection. + - `valkeys://` creates a SSL wrapped TCP socket connection. - ``unix://``: creates a Unix Domain Socket connection. The username, password, hostname, path and all querystring values @@ -130,9 +128,9 @@ def from_url(cls, url: str, **kwargs) -> "Redis": There are several ways to specify a database number. The first value found will be used: - 1. A ``db`` querystring option, e.g. redis://localhost?db=0 - 2. If using the redis:// or rediss:// schemes, the path argument - of the url, e.g. redis://localhost/0 + 1. A ``db`` querystring option, e.g. valkey://localhost?db=0 + 2. If using the valkey:// or valkeys:// schemes, the path argument + of the url, e.g. valkey://localhost/0 3. A ``db`` keyword argument to this function. If none of these options are specified, the default db=0 is used. @@ -157,13 +155,13 @@ class initializer. In the case of conflicting arguments, querystring @classmethod def from_pool( - cls: Type["Redis"], + cls: Type["Valkey"], connection_pool: ConnectionPool, - ) -> "Redis": + ) -> "Valkey": """ - Return a Redis client from the given connection pool. - The Redis client will take ownership of the connection pool and - close it when the Redis client is closed. + Return a Valkey client from the given connection pool. + The Valkey client will take ownership of the connection pool and + close it when the Valkey client is closed. """ client = cls( connection_pool=connection_pool, @@ -209,11 +207,11 @@ def __init__( single_connection_client=False, health_check_interval=0, client_name=None, - lib_name="redis-py", + lib_name="valkey-py", lib_version=get_lib_version(), username=None, retry=None, - redis_connect_func=None, + valkey_connect_func=None, credential_provider: Optional[CredentialProvider] = None, protocol: Optional[int] = 2, cache_enabled: bool = False, @@ -225,7 +223,7 @@ def __init__( cache_allow_list: List[str] = DEFAULT_ALLOW_LIST, ) -> None: """ - Initialize a new Redis client. + Initialize a new Valkey client. To specify a retry policy for specific errors, first set `retry_on_error` to a list of the error/s to retry on, then set `retry` to a valid `Retry` object. @@ -234,7 +232,7 @@ def __init__( Args: single_connection_client: - if `True`, connection pool is not used. In that case `Redis` + if `True`, connection pool is not used. In that case `Valkey` instance use is not thread safe. """ if not connection_pool: @@ -271,7 +269,7 @@ def __init__( "client_name": client_name, "lib_name": lib_name, "lib_version": lib_version, - "redis_connect_func": redis_connect_func, + "valkey_connect_func": valkey_connect_func, "credential_provider": credential_provider, "protocol": protocol, "cache_enabled": cache_enabled, @@ -332,12 +330,12 @@ def __init__( if single_connection_client: self.connection = self.connection_pool.get_connection("_") - self.response_callbacks = CaseInsensitiveDict(_RedisCallbacks) + self.response_callbacks = CaseInsensitiveDict(_ValkeyCallbacks) if self.connection_pool.connection_kwargs.get("protocol") in ["3", 3]: - self.response_callbacks.update(_RedisCallbacksRESP3) + self.response_callbacks.update(_ValkeyCallbacksRESP3) else: - self.response_callbacks.update(_RedisCallbacksRESP2) + self.response_callbacks.update(_ValkeyCallbacksRESP2) def __repr__(self) -> str: return ( @@ -366,19 +364,19 @@ def set_response_callback(self, command: str, callback: Callable) -> None: def load_external_module(self, funcname, func) -> None: """ - This function can be used to add externally defined redis modules, - and their namespaces to the redis client. + This function can be used to add externally defined valkey modules, + and their namespaces to the valkey client. funcname - A string containing the name of the function to create func - The function, being added to this class. - ex: Assume that one has a custom redis module named foomod that - creates command named 'foo.dothing' and 'foo.anotherthing' in redis. + ex: Assume that one has a custom valkey module named foomod that + creates command named 'foo.dothing' and 'foo.anotherthing' in valkey. To load function functions into this namespace: - from redis import Redis + from valkey import Valkey from foomodule import F - r = Redis() + r = Valkey() r.load_external_module("foo", F) r.foo().dothing('your', 'arguments') @@ -456,7 +454,7 @@ def lock( float or integer, both representing the number of seconds to wait. ``lock_class`` forces the specified lock implementation. Note that as - of redis-py 3.0, the only lock class we implement is ``Lock`` (which is + of valkey-py 3.0, the only lock class we implement is ``Lock`` (which is a Lua-based lock). So, it's unlikely you'll need this parameter, unless you have created your own custom lock class. @@ -469,7 +467,7 @@ def lock( thread-1 sets the token to "abc" time: 1, thread-2 blocks trying to acquire `my-lock` using the Lock instance. - time: 5, thread-1 has not yet completed. redis expires the lock + time: 5, thread-1 has not yet completed. valkey expires the lock key. time: 5, thread-2 acquired `my-lock` now that it's available. thread-2 sets the token to "xyz" @@ -524,7 +522,7 @@ def __del__(self): def close(self): # In case a connection property does not yet exist - # (due to a crash earlier in the Redis() constructor), return + # (due to a crash earlier in the Valkey() constructor), return # immediately as there is nothing to clean-up. if not hasattr(self, "connection"): return @@ -583,7 +581,7 @@ def execute_command(self, *args, **options): pool.release(conn) def parse_response(self, connection, command_name, **options): - """Parses a response from the Redis server""" + """Parses a response from the Valkey server""" try: if NEVER_DECODE in options: response = connection.read_response(disable_decoding=True) @@ -621,12 +619,12 @@ def invalidate_key_from_cache(self, key): self.connection_pool.invalidate_key_from_cache(key) -StrictRedis = Redis +StrictValkey = Valkey class Monitor: """ - Monitor is useful for handling the MONITOR command to the redis server. + Monitor is useful for handling the MONITOR command to the valkey server. next_command() method returns one command from monitor listen() method yields commands from monitor. """ @@ -643,7 +641,7 @@ def __enter__(self): # check that monitor returns 'OK', but don't return it to user response = self.connection.read_response() if not bool_ok(response): - raise RedisError(f"MONITOR failed: {response}") + raise ValkeyError(f"MONITOR failed: {response}") return self def __exit__(self, *args): @@ -659,7 +657,7 @@ def next_command(self): m = self.monitor_re.match(command_data) db_id, client_info, command = m.groups() command = " ".join(self.command_re.findall(command)) - # Redis escapes double quotes because each piece of the command + # Valkey escapes double quotes because each piece of the command # string is surrounded by double quotes. We don't have that # requirement so remove the escaping and leave the quote. command = command.replace('\\"', '"') @@ -693,7 +691,7 @@ def listen(self): class PubSub: """ - PubSub provides publish, subscribe and listen support to Redis channels. + PubSub provides publish, subscribe and listen support to Valkey channels. After subscribing to one or more channels, the listen() method will block until a message arrives on one of the subscribed channels. That message @@ -702,7 +700,7 @@ class PubSub: PUBLISH_MESSAGE_TYPES = ("message", "pmessage", "smessage") UNSUBSCRIBE_MESSAGE_TYPES = ("unsubscribe", "punsubscribe", "sunsubscribe") - HEALTH_CHECK_MESSAGE = "redis-py-health-check" + HEALTH_CHECK_MESSAGE = "valkey-py-health-check" def __init__( self, @@ -852,7 +850,7 @@ def _disconnect_raise_connect(self, conn, error) -> None: def _execute(self, conn, command, *args, **kwargs): """ - Connect manually upon disconnection. If the Redis server is down, + Connect manually upon disconnection. If the Valkey server is down, this will fail and raise a ConnectionError as desired. After reconnection, the ``on_connect`` callback should have been called by the # connection to resubscribe us to any channels and @@ -893,7 +891,7 @@ def try_read(): def is_health_check_response(self, response) -> bool: """ Check if the response is a health check response. - If there are no subscriptions redis responds to PING command with a + If there are no subscriptions valkey responds to PING command with a bulk response, instead of a multi-bulk with "pong" and the response. """ return response in [ @@ -1080,7 +1078,7 @@ def get_message( def ping(self, message: Union[str, None] = None) -> bool: """ - Ping the Redis server + Ping the Valkey server """ args = ["PING", message] if message is not None else ["PING"] return self.execute_command(*args) @@ -1223,11 +1221,11 @@ def stop(self) -> None: self._running.clear() -class Pipeline(Redis): +class Pipeline(Valkey): """ - Pipelines provide a way to transmit multiple commands to the Redis server + Pipelines provide a way to transmit multiple commands to the Valkey server in one transmission. This is convenient for batch processing, such as - saving all the values in a list to Redis. + saving all the values in a list to Valkey. All commands executed within a pipeline are wrapped with MULTI and EXEC calls. This guarantees all commands executed in the pipeline will be @@ -1306,9 +1304,9 @@ def multi(self) -> None: are issued. End the transactional block with `execute`. """ if self.explicit_transaction: - raise RedisError("Cannot issue nested calls to MULTI") + raise ValkeyError("Cannot issue nested calls to MULTI") if self.command_stack: - raise RedisError( + raise ValkeyError( "Commands without an initial WATCH have already been issued" ) self.explicit_transaction = True @@ -1478,7 +1476,7 @@ def annotate_exception(self, exception, number, command): exception.args = (msg,) + exception.args[1:] def parse_response(self, connection, command_name, **options): - result = Redis.parse_response(self, connection, command_name, **options) + result = Valkey.parse_response(self, connection, command_name, **options) if command_name in self.UNWATCH_COMMANDS: self.watching = False elif command_name == "WATCH": @@ -1556,14 +1554,14 @@ def execute(self, raise_on_error=True): def discard(self): """ Flushes all previously queued commands - See: https://redis.io/commands/DISCARD + See: https://valkey.io/commands/discard """ self.execute_command("DISCARD") def watch(self, *names): """Watches the values at keys ``names``""" if self.explicit_transaction: - raise RedisError("Cannot issue a WATCH after a MULTI") + raise ValkeyError("Cannot issue a WATCH after a MULTI") return self.execute_command("WATCH", *names) def unwatch(self) -> bool: diff --git a/redis/cluster.py b/valkey/cluster.py similarity index 89% rename from redis/cluster.py rename to valkey/cluster.py index e792d518..eaef0a1b 100644 --- a/redis/cluster.py +++ b/valkey/cluster.py @@ -6,15 +6,15 @@ from collections import OrderedDict from typing import Any, Callable, Dict, List, Optional, Tuple, Union -from redis._parsers import CommandsParser, Encoder -from redis._parsers.helpers import parse_scan -from redis.backoff import default_backoff -from redis.client import CaseInsensitiveDict, PubSub, Redis -from redis.commands import READ_COMMANDS, RedisClusterCommands -from redis.commands.helpers import list_or_args -from redis.connection import ConnectionPool, DefaultParser, parse_url -from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot -from redis.exceptions import ( +from valkey._parsers import CommandsParser, Encoder +from valkey._parsers.helpers import parse_scan +from valkey.backoff import default_backoff +from valkey.client import CaseInsensitiveDict, PubSub, Valkey +from valkey.commands import READ_COMMANDS, ValkeyClusterCommands +from valkey.commands.helpers import list_or_args +from valkey.connection import ConnectionPool, DefaultParser, parse_url +from valkey.crc import VALKEY_CLUSTER_HASH_SLOTS, key_slot +from valkey.exceptions import ( AskError, AuthenticationError, ClusterCrossSlotError, @@ -24,16 +24,16 @@ DataError, MasterDownError, MovedError, - RedisClusterException, - RedisError, ResponseError, SlotNotCoveredError, TimeoutError, TryAgainError, + ValkeyClusterException, + ValkeyError, ) -from redis.lock import Lock -from redis.retry import Retry -from redis.utils import ( +from valkey.lock import Lock +from valkey.retry import Retry +from valkey.utils import ( HIREDIS_AVAILABLE, dict_merge, list_keys_to_dict, @@ -47,8 +47,8 @@ def get_node_name(host: str, port: Union[str, int]) -> str: return f"{host}:{port}" -def get_connection(redis_node, *args, **options): - return redis_node.connection or redis_node.connection_pool.get_connection( +def get_connection(valkey_node, *args, **options): + return valkey_node.connection or valkey_node.connection_pool.get_connection( args[0], **options ) @@ -130,7 +130,7 @@ def parse_cluster_myshardid(resp, **options): REPLICA = "replica" SLOT_ID = "slot-id" -REDIS_ALLOWED_KEYS = ( +VALKEY_ALLOWED_KEYS = ( "charset", "connection_class", "connection_pool", @@ -147,7 +147,7 @@ def parse_cluster_myshardid(resp, **options): "lib_version", "max_connections", "nodes_flag", - "redis_connect_func", + "valkey_connect_func", "password", "port", "queue_class", @@ -185,7 +185,7 @@ def cleanup_kwargs(**kwargs): connection_kwargs = { k: v for k, v in kwargs.items() - if k in REDIS_ALLOWED_KEYS and k not in KWARGS_DISABLED_KEYS + if k in VALKEY_ALLOWED_KEYS and k not in KWARGS_DISABLED_KEYS } return connection_kwargs @@ -205,8 +205,8 @@ class ClusterParser(DefaultParser): ) -class AbstractRedisCluster: - RedisClusterRequestTTL = 16 +class AbstractValkeyCluster: + ValkeyClusterRequestTTL = 16 PRIMARIES = "primaries" REPLICAS = "replicas" @@ -449,24 +449,22 @@ def replace_default_node(self, target_node: "ClusterNode" = None) -> None: self.nodes_manager.default_node = random.choice(replicas) -class RedisCluster(AbstractRedisCluster, RedisClusterCommands): +class ValkeyCluster(AbstractValkeyCluster, ValkeyClusterCommands): @classmethod def from_url(cls, url, **kwargs): """ - Return a Redis client object configured from the given URL + Return a Valkey client object configured from the given URL For example:: - redis://[[username]:[password]]@localhost:6379/0 - rediss://[[username]:[password]]@localhost:6379/0 + valkey://[[username]:[password]]@localhost:6379/0 + valkeys://[[username]:[password]]@localhost:6379/0 unix://[username@]/path/to/socket.sock?db=0[&password=password] Three URL schemes are supported: - - `redis://` creates a TCP socket connection. See more at: - - - `rediss://` creates a SSL wrapped TCP socket connection. See more at: - + - `valkey://` creates a TCP socket connection. + - `valkeys://` creates a SSL wrapped TCP socket connection. - ``unix://``: creates a Unix Domain Socket connection. The username, password, hostname, path and all querystring values @@ -476,9 +474,9 @@ def from_url(cls, url, **kwargs): There are several ways to specify a database number. The first value found will be used: - 1. A ``db`` querystring option, e.g. redis://localhost?db=0 - 2. If using the redis:// or rediss:// schemes, the path argument - of the url, e.g. redis://localhost/0 + 1. A ``db`` querystring option, e.g. valkey://localhost?db=0 + 2. If using the valkey:// or valkeys:// schemes, the path argument + of the url, e.g. valkey://localhost/0 3. A ``db`` keyword argument to this function. If none of these options are specified, the default db=0 is used. @@ -510,7 +508,7 @@ def __init__( **kwargs, ): """ - Initialize a new RedisCluster client. + Initialize a new ValkeyCluster client. :param startup_nodes: List of nodes from which initial bootstrapping can be done @@ -524,9 +522,9 @@ def __init__( and at least one node has 'cluster-require-full-coverage' set to 'yes,' the server will throw a ClusterDownError for some key-based commands. See - - https://redis.io/topics/cluster-tutorial#redis-cluster-configuration-parameters + https://valkey.io/topics/cluster-tutorial#valkey-cluster-configuration-parameters When set to True: all slots must be covered to construct the - cluster client. If not all slots are covered, RedisClusterException + cluster client. If not all slots are covered, ValkeyClusterException will be thrown. :param read_from_replicas: Enable read from replicas in READONLY mode. You can read possibly @@ -534,7 +532,7 @@ def __init__( When set to true, read commands will be assigned between the primary and its replications in a Round-Robin manner. :param dynamic_startup_nodes: - Set the RedisCluster's startup nodes to all of the discovered nodes. + Set the ValkeyCluster's startup nodes to all of the discovered nodes. If true (default value), the cluster's discovered nodes will be used to determine the cluster nodes-slots mapping in the next topology refresh. It will remove the initial passed startup nodes if their endpoints aren't @@ -563,19 +561,18 @@ def __init__( reach them, such as when they sit behind a proxy. :**kwargs: - Extra arguments that will be sent into Redis instance when created - (See Official redis-py doc for supported kwargs - [https://github.com/andymccurdy/redis-py/blob/master/redis/client.py]) + Extra arguments that will be sent into Valkey instance when created + (See Official valkey-py doc for supported kwargs) Some kwargs are not supported and will raise a - RedisClusterException: - - db (Redis do not support database SELECT in cluster mode) + ValkeyClusterException: + - db (Valkey do not support database SELECT in cluster mode) """ if startup_nodes is None: startup_nodes = [] if "db" in kwargs: # Argument 'db' is not possible to use in cluster mode - raise RedisClusterException( + raise ValkeyClusterException( "Argument 'db' is not possible to use in cluster mode" ) @@ -585,13 +582,13 @@ def __init__( from_url = True url_options = parse_url(url) if "path" in url_options: - raise RedisClusterException( - "RedisCluster does not currently support Unix Domain " + raise ValkeyClusterException( + "ValkeyCluster does not currently support Unix Domain " "Socket connections" ) if "db" in url_options and url_options["db"] != 0: # Argument 'db' is not possible to use in cluster mode - raise RedisClusterException( + raise ValkeyClusterException( "A ``db`` querystring option can only be 0 in cluster mode" ) kwargs.update(url_options) @@ -602,22 +599,22 @@ def __init__( startup_nodes.append(ClusterNode(host, port)) elif len(startup_nodes) == 0: # No startup node was provided - raise RedisClusterException( - "RedisCluster requires at least one node to discover the " + raise ValkeyClusterException( + "ValkeyCluster requires at least one node to discover the " "cluster. Please provide one of the followings:\n" "1. host and port, for example:\n" - " RedisCluster(host='localhost', port=6379)\n" + " ValkeyCluster(host='localhost', port=6379)\n" "2. list of startup nodes, for example:\n" - " RedisCluster(startup_nodes=[ClusterNode('localhost', 6379)," + " ValkeyCluster(startup_nodes=[ClusterNode('localhost', 6379)," " ClusterNode('localhost', 6378)])" ) # Update the connection arguments - # Whenever a new connection is established, RedisCluster's on_connect + # Whenever a new connection is established, ValkeyCluster's on_connect # method should be run # If the user passed on_connect function we'll save it and run it - # inside the RedisCluster.on_connect() function - self.user_on_connect_func = kwargs.pop("redis_connect_func", None) - kwargs.update({"redis_connect_func": self.on_connect}) + # inside the ValkeyCluster.on_connect() function + self.user_on_connect_func = kwargs.pop("valkey_connect_func", None) + kwargs.update({"valkey_connect_func": self.on_connect}) kwargs = cleanup_kwargs(**kwargs) if retry: self.retry = retry @@ -663,9 +660,9 @@ def __del__(self): def disconnect_connection_pools(self): for node in self.get_nodes(): - if node.redis_connection: + if node.valkey_connection: try: - node.redis_connection.connection_pool.disconnect() + node.valkey_connection.connection_pool.disconnect() except OSError: # Client was already disconnected. do nothing pass @@ -691,12 +688,12 @@ def on_connect(self, connection): if self.user_on_connect_func is not None: self.user_on_connect_func(connection) - def get_redis_connection(self, node): - if not node.redis_connection: + def get_valkey_connection(self, node): + if not node.valkey_connection: with self._lock: - if not node.redis_connection: - self.nodes_manager.create_redis_connections([node]) - return node.redis_connection + if not node.valkey_connection: + self.nodes_manager.create_valkey_connections([node]) + return node.valkey_connection def get_node(self, host=None, port=None, node_name=None): return self.nodes_manager.get_node(host, port, node_name) @@ -756,24 +753,24 @@ def get_retry(self) -> Optional["Retry"]: def set_retry(self, retry: "Retry") -> None: self.retry = retry for node in self.get_nodes(): - node.redis_connection.set_retry(retry) + node.valkey_connection.set_retry(retry) def monitor(self, target_node=None): """ Returns a Monitor object for the specified target node. The default cluster node will be selected if no target node was specified. - Monitor is useful for handling the MONITOR command to the redis server. + Monitor is useful for handling the MONITOR command to the valkey server. next_command() method returns one command from monitor listen() method yields commands from monitor. """ if target_node is None: target_node = self.get_default_node() - if target_node.redis_connection is None: - raise RedisClusterException( - f"Cluster Node {target_node.name} has no redis_connection" + if target_node.valkey_connection is None: + raise ValkeyClusterException( + f"Cluster Node {target_node.name} has no valkey_connection" ) - return target_node.redis_connection.monitor() + return target_node.valkey_connection.monitor() def pubsub(self, node=None, host=None, port=None, **kwargs): """ @@ -792,10 +789,10 @@ def pipeline(self, transaction=None, shard_hint=None): when calling execute() will only return the result stack. """ if shard_hint: - raise RedisClusterException("shard_hint is deprecated in cluster mode") + raise ValkeyClusterException("shard_hint is deprecated in cluster mode") if transaction: - raise RedisClusterException("transaction is deprecated in cluster mode") + raise ValkeyClusterException("transaction is deprecated in cluster mode") return ClusterPipeline( nodes_manager=self.nodes_manager, @@ -842,7 +839,7 @@ def lock( float or integer, both representing the number of seconds to wait. ``lock_class`` forces the specified lock implementation. Note that as - of redis-py 3.0, the only lock class we implement is ``Lock`` (which is + of valkey-py 3.0, the only lock class we implement is ``Lock`` (which is a Lua-based lock). So, it's unlikely you'll need this parameter, unless you have created your own custom lock class. @@ -855,7 +852,7 @@ def lock( thread-1 sets the token to "abc" time: 1, thread-2 blocks trying to acquire `my-lock` using the Lock instance. - time: 5, thread-1 has not yet completed. redis expires the lock + time: 5, thread-1 has not yet completed. valkey expires the lock key. time: 5, thread-2 acquired `my-lock` now that it's available. thread-2 sets the token to "xyz" @@ -949,21 +946,21 @@ def _get_command_keys(self, *args): Get the keys in the command. If the command has no keys in in, None is returned. - NOTE: Due to a bug in redis<7.0, this function does not work properly + NOTE: Due to a bug in valkey<7.0, this function does not work properly for EVAL or EVALSHA when the `numkeys` arg is 0. - issue: https://github.com/redis/redis/issues/9493 - fix: https://github.com/redis/redis/pull/9733 So, don't use this function with EVAL or EVALSHA. """ - redis_conn = self.get_default_node().redis_connection - return self.commands_parser.get_keys(redis_conn, *args) + valkey_conn = self.get_default_node().valkey_connection + return self.commands_parser.get_keys(valkey_conn, *args) def determine_slot(self, *args): """ Figure out what slot to use based on args. - Raises a RedisClusterException if there's a missing key and we can't + Raises a ValkeyClusterException if there's a missing key and we can't determine what slots to map the command to; or, if the keys don't all map to the same key slot. """ @@ -975,19 +972,19 @@ def determine_slot(self, *args): # Get the keys in the command # EVAL and EVALSHA are common enough that it's wasteful to go to the - # redis server to parse the keys. Besides, there is a bug in redis<7.0 + # valkey server to parse the keys. Besides, there is a bug in valkey<7.0 # where `self._get_command_keys()` fails anyway. So, we special case # EVAL/EVALSHA. if command.upper() in ("EVAL", "EVALSHA"): # command syntax: EVAL "script body" num_keys ... if len(args) <= 2: - raise RedisClusterException(f"Invalid args in command: {args}") + raise ValkeyClusterException(f"Invalid args in command: {args}") num_actual_keys = int(args[2]) eval_keys = args[3 : 3 + num_actual_keys] # if there are 0 keys, that means the script can be run on any node # so we can just return a random slot if len(eval_keys) == 0: - return random.randrange(0, REDIS_CLUSTER_HASH_SLOTS) + return random.randrange(0, VALKEY_CLUSTER_HASH_SLOTS) keys = eval_keys else: keys = self._get_command_keys(*args) @@ -995,9 +992,9 @@ def determine_slot(self, *args): # FCALL can call a function with 0 keys, that means the function # can be run on any node so we can just return a random slot if command.upper() in ("FCALL", "FCALL_RO"): - return random.randrange(0, REDIS_CLUSTER_HASH_SLOTS) - raise RedisClusterException( - "No way to dispatch this command to Redis Cluster. " + return random.randrange(0, VALKEY_CLUSTER_HASH_SLOTS) + raise ValkeyClusterException( + "No way to dispatch this command to Valkey Cluster. " "Missing key.\nYou can execute the command by specifying " f"target nodes.\nCommand: {args}" ) @@ -1010,7 +1007,7 @@ def determine_slot(self, *args): # the same slot slots = {self.keyslot(key) for key in keys} if len(slots) != 1: - raise RedisClusterException( + raise ValkeyClusterException( f"{command} - all keys must map to the same key slot" ) @@ -1096,7 +1093,7 @@ def execute_command(self, *args, **kwargs): *args, **kwargs, nodes_flag=passed_targets ) if not target_nodes: - raise RedisClusterException( + raise ValkeyClusterException( f"No targets were found to execute {args} command on" ) if ( @@ -1127,12 +1124,12 @@ def _execute_command(self, target_node, *args, **kwargs): """ keys = kwargs.pop("keys", None) command = args[0] - redis_node = None + valkey_node = None connection = None redirect_addr = None asking = False moved = False - ttl = int(self.RedisClusterRequestTTL) + ttl = int(self.ValkeyClusterRequestTTL) while ttl > 0: ttl -= 1 @@ -1148,18 +1145,18 @@ def _execute_command(self, target_node, *args, **kwargs): ) moved = False - redis_node = self.get_redis_connection(target_node) - connection = get_connection(redis_node, *args, **kwargs) + valkey_node = self.get_valkey_connection(target_node) + connection = get_connection(valkey_node, *args, **kwargs) if asking: connection.send_command("ASKING") - redis_node.parse_response(connection, "ASKING", **kwargs) + valkey_node.parse_response(connection, "ASKING", **kwargs) asking = False response_from_cache = connection._get_from_local_cache(args) if response_from_cache is not None: return response_from_cache else: connection.send_command(*args) - response = redis_node.parse_response(connection, command, **kwargs) + response = valkey_node.parse_response(connection, command, **kwargs) if command in self.cluster_response_callbacks: response = self.cluster_response_callbacks[command]( response, **kwargs @@ -1182,7 +1179,7 @@ def _execute_command(self, target_node, *args, **kwargs): # to reinitialize the cluster self.nodes_manager.startup_nodes.pop(target_node.name, None) # Reset the cluster node's connection - target_node.redis_connection = None + target_node.valkey_connection = None self.nodes_manager.initialize() raise e except MovedError as e: @@ -1193,7 +1190,7 @@ def _execute_command(self, target_node, *args, **kwargs): # 'reinitialize_steps' counter will increase faster when # the same client object is shared between multiple threads. To # reduce the frequency you can set this variable in the - # RedisCluster constructor. + # ValkeyCluster constructor. self.reinitialize_counter += 1 if self._should_reinitialized(): self.nodes_manager.initialize() @@ -1203,7 +1200,7 @@ def _execute_command(self, target_node, *args, **kwargs): self.nodes_manager.update_moved_exception(e) moved = True except TryAgainError: - if ttl < self.RedisClusterRequestTTL / 2: + if ttl < self.ValkeyClusterRequestTTL / 2: time.sleep(0.05) except AskError as e: redirect_addr = get_node_name(host=e.host, port=e.port) @@ -1223,7 +1220,7 @@ def _execute_command(self, target_node, *args, **kwargs): raise e finally: if connection is not None: - redis_node.connection_pool.release(connection) + valkey_node.connection_pool.release(connection) raise ClusterError("TTL exhausted.") @@ -1233,7 +1230,7 @@ def close(self): if self.nodes_manager: self.nodes_manager.close() except AttributeError: - # RedisCluster's __init__ can fail before nodes_manager is set + # ValkeyCluster's __init__ can fail before nodes_manager is set pass def _process_result(self, command, res, **kwargs): @@ -1258,8 +1255,8 @@ def _process_result(self, command, res, **kwargs): def load_external_module(self, funcname, func): """ - This function can be used to add externally defined redis modules, - and their namespaces to the redis client. + This function can be used to add externally defined valkey modules, + and their namespaces to the valkey client. ``funcname`` - A string containing the name of the function to create ``func`` - The function, being added to this class. @@ -1280,7 +1277,7 @@ def invalidate_key_from_cache(self, key): class ClusterNode: - def __init__(self, host, port, server_type=None, redis_connection=None): + def __init__(self, host, port, server_type=None, valkey_connection=None): if host == "localhost": host = socket.gethostbyname(host) @@ -1288,7 +1285,7 @@ def __init__(self, host, port, server_type=None, redis_connection=None): self.port = port self.name = get_node_name(host, port) self.server_type = server_type - self.redis_connection = redis_connection + self.valkey_connection = valkey_connection def __repr__(self): return ( @@ -1296,27 +1293,27 @@ def __repr__(self): f"port={self.port}," f"name={self.name}," f"server_type={self.server_type}," - f"redis_connection={self.redis_connection}]" + f"valkey_connection={self.valkey_connection}]" ) def __eq__(self, obj): return isinstance(obj, ClusterNode) and obj.name == self.name def __del__(self): - if self.redis_connection is not None: - self.redis_connection.close() + if self.valkey_connection is not None: + self.valkey_connection.close() def flush_cache(self): - if self.redis_connection is not None: - self.redis_connection.flush_cache() + if self.valkey_connection is not None: + self.valkey_connection.flush_cache() def delete_command_from_cache(self, command): - if self.redis_connection is not None: - self.redis_connection.delete_command_from_cache(command) + if self.valkey_connection is not None: + self.valkey_connection.delete_command_from_cache(command) def invalidate_key_from_cache(self, key): - if self.redis_connection is not None: - self.redis_connection.invalidate_key_from_cache(key) + if self.valkey_connection is not None: + self.valkey_connection.invalidate_key_from_cache(key) class LoadBalancer: @@ -1483,29 +1480,29 @@ def populate_startup_nodes(self, nodes): def check_slots_coverage(self, slots_cache): # Validate if all slots are covered or if we should try next # startup node - for i in range(0, REDIS_CLUSTER_HASH_SLOTS): + for i in range(0, VALKEY_CLUSTER_HASH_SLOTS): if i not in slots_cache: return False return True - def create_redis_connections(self, nodes): + def create_valkey_connections(self, nodes): """ - This function will create a redis connection to all nodes in :nodes: + This function will create a valkey connection to all nodes in :nodes: """ for node in nodes: - if node.redis_connection is None: - node.redis_connection = self.create_redis_node( + if node.valkey_connection is None: + node.valkey_connection = self.create_valkey_node( host=node.host, port=node.port, **self.connection_kwargs ) - def create_redis_node(self, host, port, **kwargs): + def create_valkey_node(self, host, port, **kwargs): if self.from_url: - # Create a redis node with a costumed connection pool + # Create a valkey node with a costumed connection pool kwargs.update({"host": host}) kwargs.update({"port": port}) - r = Redis(connection_pool=self.connection_pool_class(**kwargs)) + r = Valkey(connection_pool=self.connection_pool_class(**kwargs)) else: - r = Redis(host=host, port=port, **kwargs) + r = Valkey(host=host, port=port, **kwargs) return r def _get_or_create_cluster_node(self, host, port, role, tmp_nodes_cache): @@ -1517,7 +1514,7 @@ def _get_or_create_cluster_node(self, host, port, role, tmp_nodes_cache): # exists in the current nodes cache and has a valid connection so we can # reuse it target_node = self.nodes_cache.get(node_name) - if target_node is None or target_node.redis_connection is None: + if target_node is None or target_node.valkey_connection is None: # create new cluster node for this cluster target_node = ClusterNode(host, port, role) if target_node.server_type != role: @@ -1527,7 +1524,7 @@ def _get_or_create_cluster_node(self, host, port, role, tmp_nodes_cache): def initialize(self): """ - Initializes the nodes cache, slots cache and redis connections. + Initializes the nodes cache, slots cache and valkey connections. :startup_nodes: Responsible for discovering other nodes in the cluster """ @@ -1541,19 +1538,19 @@ def initialize(self): exception = None for startup_node in self.startup_nodes.values(): try: - if startup_node.redis_connection: - r = startup_node.redis_connection + if startup_node.valkey_connection: + r = startup_node.valkey_connection else: - # Create a new Redis connection - r = self.create_redis_node( + # Create a new Valkey connection + r = self.create_valkey_node( startup_node.host, startup_node.port, **kwargs ) - self.startup_nodes[startup_node.name].redis_connection = r + self.startup_nodes[startup_node.name].valkey_connection = r # Make sure cluster mode is enabled on this node try: cluster_slots = str_if_bytes(r.execute_command("CLUSTER SLOTS")) except ResponseError: - raise RedisClusterException( + raise ValkeyClusterException( "Cluster mode is not enabled on this node" ) startup_nodes_reachable = True @@ -1620,7 +1617,7 @@ def initialize(self): ) if len(disagreements) > 5: - raise RedisClusterException( + raise ValkeyClusterException( f"startup_nodes could not agree on a valid " f'slots cache: {", ".join(disagreements)}' ) @@ -1632,21 +1629,21 @@ def initialize(self): break if not startup_nodes_reachable: - raise RedisClusterException( - f"Redis Cluster cannot be connected. Please provide at least " + raise ValkeyClusterException( + f"Valkey Cluster cannot be connected. Please provide at least " f"one reachable node: {str(exception)}" ) from exception - # Create Redis connections to all nodes - self.create_redis_connections(list(tmp_nodes_cache.values())) + # Create Valkey connections to all nodes + self.create_valkey_connections(list(tmp_nodes_cache.values())) # Check if the slots are not fully covered if not fully_covered and self._require_full_coverage: # Despite the requirement that the slots be covered, there # isn't a full coverage - raise RedisClusterException( + raise ValkeyClusterException( f"All slots are not covered after query all startup_nodes. " - f"{len(tmp_slots)} of {REDIS_CLUSTER_HASH_SLOTS} " + f"{len(tmp_slots)} of {VALKEY_CLUSTER_HASH_SLOTS} " f"covered..." ) @@ -1664,8 +1661,8 @@ def initialize(self): def close(self): self.default_node = None for node in self.nodes_cache.values(): - if node.redis_connection: - node.redis_connection.close() + if node.valkey_connection: + node.valkey_connection.close() def reset(self): try: @@ -1703,12 +1700,12 @@ class ClusterPubSub(PubSub): IMPORTANT: before using ClusterPubSub, read about the known limitations with pubsub in Cluster mode and learn how to workaround them: - https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html + https://valkey-py-cluster.readthedocs.io/en/stable/pubsub.html """ def __init__( self, - redis_cluster, + valkey_cluster, node=None, host=None, port=None, @@ -1723,24 +1720,24 @@ def __init__( 2. Selecting a node that handles the keyslot: If read_from_replicas is set to true, a replica can be selected. - :type redis_cluster: RedisCluster + :type valkey_cluster: ValkeyCluster :type node: ClusterNode :type host: str :type port: int """ self.node = None - self.set_pubsub_node(redis_cluster, node, host, port) + self.set_pubsub_node(valkey_cluster, node, host, port) connection_pool = ( None if self.node is None - else redis_cluster.get_redis_connection(self.node).connection_pool + else valkey_cluster.get_valkey_connection(self.node).connection_pool ) - self.cluster = redis_cluster + self.cluster = valkey_cluster self.node_pubsub_mapping = {} self._pubsubs_generator = self._pubsubs_generator() super().__init__( connection_pool=connection_pool, - encoder=redis_cluster.encoder, + encoder=valkey_cluster.encoder, push_handler_func=push_handler_func, **kwargs, ) @@ -1751,11 +1748,11 @@ def set_pubsub_node(self, cluster, node=None, host=None, port=None): When none of the node, host, or port are specified - the node is set to None and will be determined by the keyslot of the channel in the first command to be executed. - RedisClusterException will be thrown if the passed node does not exist + ValkeyClusterException will be thrown if the passed node does not exist in the cluster. If host is passed without port, or vice versa, a DataError will be thrown. - :type cluster: RedisCluster + :type cluster: ValkeyCluster :type node: ClusterNode :type host: str :type port: int @@ -1784,13 +1781,13 @@ def get_pubsub_node(self): """ return self.node - def _raise_on_invalid_node(self, redis_cluster, node, host, port): + def _raise_on_invalid_node(self, valkey_cluster, node, host, port): """ - Raise a RedisClusterException if the node is None or doesn't exist in + Raise a ValkeyClusterException if the node is None or doesn't exist in the cluster. """ - if node is None or redis_cluster.get_node(node_name=node.name) is None: - raise RedisClusterException( + if node is None or valkey_cluster.get_node(node_name=node.name) is None: + raise ValkeyClusterException( f"Node {host}:{port} doesn't exist in the cluster" ) @@ -1798,7 +1795,7 @@ def execute_command(self, *args): """ Execute a subscribe/unsubscribe command. - Taken code from redis-py and tweak to make it work within a cluster. + Taken code from valkey-py and tweak to make it work within a cluster. """ # NOTE: don't parse the response in this function -- it could pull a # legitimate message off the stack if the connection is already @@ -1818,8 +1815,8 @@ def execute_command(self, *args): # Get a random node node = self.cluster.get_random_node() self.node = node - redis_connection = self.cluster.get_redis_connection(node) - self.connection_pool = redis_connection.connection_pool + valkey_connection = self.cluster.get_valkey_connection(node) + self.connection_pool = valkey_connection.connection_pool self.connection = self.connection_pool.get_connection( "pubsub", self.shard_hint ) @@ -1835,7 +1832,7 @@ def _get_node_pubsub(self, node): try: return self.node_pubsub_mapping[node.name] except KeyError: - pubsub = node.redis_connection.pubsub( + pubsub = node.valkey_connection.pubsub( push_handler_func=self.push_handler_func ) self.node_pubsub_mapping[node.name] = pubsub @@ -1914,12 +1911,12 @@ def sunsubscribe(self, *args): p.pending_unsubscribe_shard_channels ) - def get_redis_connection(self): + def get_valkey_connection(self): """ - Get the Redis connection of the pubsub connected node. + Get the Valkey connection of the pubsub connected node. """ if self.node is not None: - return self.node.redis_connection + return self.node.valkey_connection def disconnect(self): """ @@ -1931,9 +1928,9 @@ def disconnect(self): pubsub.connection.disconnect() -class ClusterPipeline(RedisCluster): +class ClusterPipeline(ValkeyCluster): """ - Support for Redis pipeline + Support for Valkey pipeline in cluster mode """ @@ -2128,11 +2125,11 @@ def _send_cluster_commands( self, stack, raise_on_error=True, allow_redirections=True ): """ - Send a bunch of cluster commands to the redis cluster. + Send a bunch of cluster commands to the valkey cluster. `allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses automatically. If set - to false it will raise RedisClusterException. + to false it will raise ValkeyClusterException. """ # the first time sending the commands we send all of # the commands that were queued up. @@ -2160,11 +2157,11 @@ def _send_cluster_commands( *c.args, node_flag=passed_targets ) if not target_nodes: - raise RedisClusterException( + raise ValkeyClusterException( f"No targets were found to execute {c.args} command on" ) if len(target_nodes) > 1: - raise RedisClusterException( + raise ValkeyClusterException( f"Too many targets for command {c.args}" ) @@ -2177,9 +2174,9 @@ def _send_cluster_commands( # we can build a list of commands for each node. node_name = node.name if node_name not in nodes: - redis_node = self.get_redis_connection(node) + valkey_node = self.get_valkey_connection(node) try: - connection = get_connection(redis_node, c.args) + connection = get_connection(valkey_node, c.args) except ConnectionError: for n in nodes.values(): n.connection_pool.release(n.connection) @@ -2190,8 +2187,8 @@ def _send_cluster_commands( self.replace_default_node() raise nodes[node_name] = NodeCommands( - redis_node.parse_response, - redis_node.connection_pool, + valkey_node.parse_response, + valkey_node.connection_pool, connection, ) nodes[node_name].append(c) @@ -2214,7 +2211,7 @@ def _send_cluster_commands( for n in node_commands: n.read() finally: - # release all of the redis connections we allocated earlier + # release all of the valkey connections we allocated earlier # back into the connection pool. # we used to do this step as part of a try/finally block, # but it is really dangerous to @@ -2278,7 +2275,7 @@ def _send_cluster_commands( # send each command individually like we # do in the main client. c.result = super().execute_command(*c.args, **c.options) - except RedisError as e: + except ValkeyError as e: c.result = e # turn the response back into a simple flat array that corresponds @@ -2299,7 +2296,7 @@ def _send_cluster_commands( def _fail_on_redirect(self, allow_redirections): """ """ if not allow_redirections: - raise RedisClusterException( + raise ValkeyClusterException( "ASK & MOVED redirection not allowed in this pipeline" ) @@ -2308,37 +2305,37 @@ def exists(self, *keys): def eval(self): """ """ - raise RedisClusterException("method eval() is not implemented") + raise ValkeyClusterException("method eval() is not implemented") def multi(self): """ """ - raise RedisClusterException("method multi() is not implemented") + raise ValkeyClusterException("method multi() is not implemented") def immediate_execute_command(self, *args, **options): """ """ - raise RedisClusterException( + raise ValkeyClusterException( "method immediate_execute_command() is not implemented" ) def _execute_transaction(self, *args, **kwargs): """ """ - raise RedisClusterException("method _execute_transaction() is not implemented") + raise ValkeyClusterException("method _execute_transaction() is not implemented") def load_scripts(self): """ """ - raise RedisClusterException("method load_scripts() is not implemented") + raise ValkeyClusterException("method load_scripts() is not implemented") def watch(self, *names): """ """ - raise RedisClusterException("method watch() is not implemented") + raise ValkeyClusterException("method watch() is not implemented") def unwatch(self): """ """ - raise RedisClusterException("method unwatch() is not implemented") + raise ValkeyClusterException("method unwatch() is not implemented") def script_load_for_pipeline(self, *args, **kwargs): """ """ - raise RedisClusterException( + raise ValkeyClusterException( "method script_load_for_pipeline() is not implemented" ) @@ -2347,7 +2344,7 @@ def delete(self, *names): "Delete a key specified by ``names``" """ if len(names) != 1: - raise RedisClusterException( + raise ValkeyClusterException( "deleting multiple keys is not implemented in pipeline command" ) @@ -2358,7 +2355,7 @@ def unlink(self, *names): "Unlink a key specified by ``names``" """ if len(names) != 1: - raise RedisClusterException( + raise ValkeyClusterException( "unlinking multiple keys is not implemented in pipeline command" ) @@ -2372,9 +2369,9 @@ def block_pipeline_command(name: str) -> Callable[..., Any]: """ def inner(*args, **kwargs): - raise RedisClusterException( + raise ValkeyClusterException( f"ERROR: Calling pipelined function {name} is blocked " - f"when running redis in cluster mode..." + f"when running valkey in cluster mode..." ) return inner @@ -2488,7 +2485,7 @@ def append(self, c): def write(self): """ - Code borrowed from Redis so it can be fixed + Code borrowed from Valkey so it can be fixed """ connection = self.connection commands = self.commands @@ -2517,8 +2514,8 @@ def read(self): # like a connection error. Trying to parse # a response on a connection that # is no longer open will result in a - # connection error raised by redis-py. - # but redis-py doesn't check in parse_response + # connection error raised by valkey-py. + # but valkey-py doesn't check in parse_response # that the sock object is # still set and if you try to # read from a closed connection, it will @@ -2537,5 +2534,5 @@ def read(self): for c in self.commands: c.result = e return - except RedisError: + except ValkeyError: c.result = sys.exc_info()[1] diff --git a/redis/commands/__init__.py b/valkey/commands/__init__.py similarity index 51% rename from redis/commands/__init__.py rename to valkey/commands/__init__.py index a94d9764..ecefe5f9 100644 --- a/redis/commands/__init__.py +++ b/valkey/commands/__init__.py @@ -1,18 +1,18 @@ -from .cluster import READ_COMMANDS, AsyncRedisClusterCommands, RedisClusterCommands +from .cluster import READ_COMMANDS, AsyncValkeyClusterCommands, ValkeyClusterCommands from .core import AsyncCoreCommands, CoreCommands from .helpers import list_or_args -from .redismodules import AsyncRedisModuleCommands, RedisModuleCommands from .sentinel import AsyncSentinelCommands, SentinelCommands +from .valkeymodules import AsyncValkeyModuleCommands, ValkeyModuleCommands __all__ = [ "AsyncCoreCommands", - "AsyncRedisClusterCommands", - "AsyncRedisModuleCommands", + "AsyncValkeyClusterCommands", + "AsyncValkeyModuleCommands", "AsyncSentinelCommands", "CoreCommands", "READ_COMMANDS", - "RedisClusterCommands", - "RedisModuleCommands", + "ValkeyClusterCommands", + "ValkeyModuleCommands", "SentinelCommands", "list_or_args", ] diff --git a/redis/commands/bf/__init__.py b/valkey/commands/bf/__init__.py similarity index 99% rename from redis/commands/bf/__init__.py rename to valkey/commands/bf/__init__.py index 959358f8..b0ca008a 100644 --- a/redis/commands/bf/__init__.py +++ b/valkey/commands/bf/__init__.py @@ -1,4 +1,4 @@ -from redis._parsers.helpers import bool_ok +from valkey._parsers.helpers import bool_ok from ..helpers import get_protocol_version, parse_to_list from .commands import * # noqa diff --git a/redis/commands/bf/commands.py b/valkey/commands/bf/commands.py similarity index 79% rename from redis/commands/bf/commands.py rename to valkey/commands/bf/commands.py index 447f8445..0e70cd3a 100644 --- a/redis/commands/bf/commands.py +++ b/valkey/commands/bf/commands.py @@ -1,6 +1,6 @@ -from redis.client import NEVER_DECODE -from redis.exceptions import ModuleError -from redis.utils import HIREDIS_AVAILABLE, deprecated_function +from valkey.client import NEVER_DECODE +from valkey.exceptions import ModuleError +from valkey.utils import HIREDIS_AVAILABLE, deprecated_function BF_RESERVE = "BF.RESERVE" BF_ADD = "BF.ADD" @@ -65,7 +65,7 @@ def create(self, key, errorRate, capacity, expansion=None, noScale=None): Create a new Bloom Filter `key` with desired probability of false positives `errorRate` expected entries to be inserted as `capacity`. Default expansion value is 2. By default, filter is auto-scaling. - For more information see `BF.RESERVE `_. + For more information see `BF.RESERVE `_. """ # noqa params = [key, errorRate, capacity] self.append_expansion(params, expansion) @@ -77,14 +77,14 @@ def create(self, key, errorRate, capacity, expansion=None, noScale=None): def add(self, key, item): """ Add to a Bloom Filter `key` an `item`. - For more information see `BF.ADD `_. + For more information see `BF.ADD `_. """ # noqa return self.execute_command(BF_ADD, key, item) def madd(self, key, *items): """ Add to a Bloom Filter `key` multiple `items`. - For more information see `BF.MADD `_. + For more information see `BF.MADD `_. """ # noqa return self.execute_command(BF_MADD, key, *items) @@ -104,7 +104,7 @@ def insert( If `nocreate` remain `None` and `key` does not exist, a new Bloom Filter `key` will be created with desired probability of false positives `errorRate` and expected entries to be inserted as `size`. - For more information see `BF.INSERT `_. + For more information see `BF.INSERT `_. """ # noqa params = [key] self.append_capacity(params, capacity) @@ -119,14 +119,14 @@ def insert( def exists(self, key, item): """ Check whether an `item` exists in Bloom Filter `key`. - For more information see `BF.EXISTS `_. + For more information see `BF.EXISTS `_. """ # noqa return self.execute_command(BF_EXISTS, key, item) def mexists(self, key, *items): """ Check whether `items` exist in Bloom Filter `key`. - For more information see `BF.MEXISTS `_. + For more information see `BF.MEXISTS `_. """ # noqa return self.execute_command(BF_MEXISTS, key, *items) @@ -137,7 +137,7 @@ def scandump(self, key, iter): This is useful for large bloom filters which cannot fit into the normal SAVE and RESTORE model. The first time this command is called, the value of `iter` should be 0. This command will return successive (iter, data) pairs until (0, NULL) to indicate completion. - For more information see `BF.SCANDUMP `_. + For more information see `BF.SCANDUMP `_. """ # noqa if HIREDIS_AVAILABLE: raise ModuleError("This command cannot be used when hiredis is available.") @@ -154,14 +154,14 @@ def loadchunk(self, key, iter, data): See the SCANDUMP command for example usage. This command will overwrite any bloom filter stored under key. Ensure that the bloom filter will not be modified between invocations. - For more information see `BF.LOADCHUNK `_. + For more information see `BF.LOADCHUNK `_. """ # noqa return self.execute_command(BF_LOADCHUNK, key, iter, data) def info(self, key): """ Return capacity, size, number of filters, number of items inserted, and expansion rate. - For more information see `BF.INFO `_. + For more information see `BF.INFO `_. """ # noqa return self.execute_command(BF_INFO, key) @@ -169,7 +169,7 @@ def card(self, key): """ Returns the cardinality of a Bloom filter - number of items that were added to a Bloom filter and detected as unique (items that caused at least one bit to be set in at least one sub-filter). - For more information see `BF.CARD `_. + For more information see `BF.CARD `_. """ # noqa return self.execute_command(BF_CARD, key) @@ -182,7 +182,7 @@ def create( ): """ Create a new Cuckoo Filter `key` an initial `capacity` items. - For more information see `CF.RESERVE `_. + For more information see `CF.RESERVE `_. """ # noqa params = [key, capacity] self.append_expansion(params, expansion) @@ -195,7 +195,7 @@ def create( def add(self, key, item): """ Add an `item` to a Cuckoo Filter `key`. - For more information see `CF.ADD `_. + For more information see `CF.ADD `_. """ # noqa return self.execute_command(CF_ADD, key, item) @@ -203,7 +203,7 @@ def addnx(self, key, item): """ Add an `item` to a Cuckoo Filter `key` only if item does not yet exist. Command might be slower that `add`. - For more information see `CF.ADDNX `_. + For more information see `CF.ADDNX `_. """ # noqa return self.execute_command(CF_ADDNX, key, item) @@ -212,7 +212,7 @@ def insert(self, key, items, capacity=None, nocreate=None): Add multiple `items` to a Cuckoo Filter `key`, allowing the filter to be created with a custom `capacity` if it does not yet exist. `items` must be provided as a list. - For more information see `CF.INSERT `_. + For more information see `CF.INSERT `_. """ # noqa params = [key] self.append_capacity(params, capacity) @@ -225,7 +225,7 @@ def insertnx(self, key, items, capacity=None, nocreate=None): Add multiple `items` to a Cuckoo Filter `key` only if they do not exist yet, allowing the filter to be created with a custom `capacity` if it does not yet exist. `items` must be provided as a list. - For more information see `CF.INSERTNX `_. + For more information see `CF.INSERTNX `_. """ # noqa params = [key] self.append_capacity(params, capacity) @@ -236,28 +236,28 @@ def insertnx(self, key, items, capacity=None, nocreate=None): def exists(self, key, item): """ Check whether an `item` exists in Cuckoo Filter `key`. - For more information see `CF.EXISTS `_. + For more information see `CF.EXISTS `_. """ # noqa return self.execute_command(CF_EXISTS, key, item) def mexists(self, key, *items): """ Check whether an `items` exist in Cuckoo Filter `key`. - For more information see `CF.MEXISTS `_. + For more information see `CF.MEXISTS `_. """ # noqa return self.execute_command(CF_MEXISTS, key, *items) def delete(self, key, item): """ Delete `item` from `key`. - For more information see `CF.DEL `_. + For more information see `CF.DEL `_. """ # noqa return self.execute_command(CF_DEL, key, item) def count(self, key, item): """ Return the number of times an `item` may be in the `key`. - For more information see `CF.COUNT `_. + For more information see `CF.COUNT `_. """ # noqa return self.execute_command(CF_COUNT, key, item) @@ -270,7 +270,7 @@ def scandump(self, key, iter): The first time this command is called, the value of `iter` should be 0. This command will return successive (iter, data) pairs until (0, NULL) to indicate completion. - For more information see `CF.SCANDUMP `_. + For more information see `CF.SCANDUMP `_. """ # noqa return self.execute_command(CF_SCANDUMP, key, iter) @@ -280,7 +280,7 @@ def loadchunk(self, key, iter, data): This command will overwrite any Cuckoo filter stored under key. Ensure that the Cuckoo filter will not be modified between invocations. - For more information see `CF.LOADCHUNK `_. + For more information see `CF.LOADCHUNK `_. """ # noqa return self.execute_command(CF_LOADCHUNK, key, iter, data) @@ -288,7 +288,7 @@ def info(self, key): """ Return size, number of buckets, number of filter, number of items inserted, number of items deleted, bucket size, expansion rate, and max iteration. - For more information see `CF.INFO `_. + For more information see `CF.INFO `_. """ # noqa return self.execute_command(CF_INFO, key) @@ -300,14 +300,14 @@ def reserve(self, key, k, width, depth, decay): """ Create a new Top-K Filter `key` with desired probability of false positives `errorRate` expected entries to be inserted as `size`. - For more information see `TOPK.RESERVE `_. + For more information see `TOPK.RESERVE `_. """ # noqa return self.execute_command(TOPK_RESERVE, key, k, width, depth, decay) def add(self, key, *items): """ Add one `item` or more to a Top-K Filter `key`. - For more information see `TOPK.ADD `_. + For more information see `TOPK.ADD `_. """ # noqa return self.execute_command(TOPK_ADD, key, *items) @@ -315,7 +315,7 @@ def incrby(self, key, items, increments): """ Add/increase `items` to a Top-K Sketch `key` by ''increments''. Both `items` and `increments` are lists. - For more information see `TOPK.INCRBY `_. + For more information see `TOPK.INCRBY `_. Example: @@ -328,15 +328,15 @@ def incrby(self, key, items, increments): def query(self, key, *items): """ Check whether one `item` or more is a Top-K item at `key`. - For more information see `TOPK.QUERY `_. + For more information see `TOPK.QUERY `_. """ # noqa return self.execute_command(TOPK_QUERY, key, *items) - @deprecated_function(version="4.4.0", reason="deprecated since redisbloom 2.4.0") + @deprecated_function(version="4.4.0", reason="deprecated since valkeybloom 2.4.0") def count(self, key, *items): """ Return count for one `item` or more from `key`. - For more information see `TOPK.COUNT `_. + For more information see `TOPK.COUNT `_. """ # noqa return self.execute_command(TOPK_COUNT, key, *items) @@ -345,7 +345,7 @@ def list(self, key, withcount=False): Return full list of items in Top-K list of `key`. If `withcount` set to True, return full list of items with probabilistic count in Top-K list of `key`. - For more information see `TOPK.LIST `_. + For more information see `TOPK.LIST `_. """ # noqa params = [key] if withcount: @@ -355,7 +355,7 @@ def list(self, key, withcount=False): def info(self, key): """ Return k, width, depth and decay values of `key`. - For more information see `TOPK.INFO `_. + For more information see `TOPK.INFO `_. """ # noqa return self.execute_command(TOPK_INFO, key) @@ -364,14 +364,14 @@ class TDigestCommands: def create(self, key, compression=100): """ Allocate the memory and initialize the t-digest. - For more information see `TDIGEST.CREATE `_. + For more information see `TDIGEST.CREATE `_. """ # noqa return self.execute_command(TDIGEST_CREATE, key, "COMPRESSION", compression) def reset(self, key): """ Reset the sketch `key` to zero - empty out the sketch and re-initialize it. - For more information see `TDIGEST.RESET `_. + For more information see `TDIGEST.RESET `_. """ # noqa return self.execute_command(TDIGEST_RESET, key) @@ -379,7 +379,7 @@ def add(self, key, values): """ Adds one or more observations to a t-digest sketch `key`. - For more information see `TDIGEST.ADD `_. + For more information see `TDIGEST.ADD `_. """ # noqa return self.execute_command(TDIGEST_ADD, key, *values) @@ -391,7 +391,7 @@ def merge(self, destination_key, num_keys, *keys, compression=None, override=Fal If `destination_key` already exists its values are merged with the input keys. If you wish to override the destination key contents use the `OVERRIDE` parameter. - For more information see `TDIGEST.MERGE `_. + For more information see `TDIGEST.MERGE `_. """ # noqa params = [destination_key, num_keys, *keys] if compression is not None: @@ -403,14 +403,14 @@ def merge(self, destination_key, num_keys, *keys, compression=None, override=Fal def min(self, key): """ Return minimum value from the sketch `key`. Will return DBL_MAX if the sketch is empty. - For more information see `TDIGEST.MIN `_. + For more information see `TDIGEST.MIN `_. """ # noqa return self.execute_command(TDIGEST_MIN, key) def max(self, key): """ Return maximum value from the sketch `key`. Will return DBL_MIN if the sketch is empty. - For more information see `TDIGEST.MAX `_. + For more information see `TDIGEST.MAX `_. """ # noqa return self.execute_command(TDIGEST_MAX, key) @@ -419,14 +419,14 @@ def quantile(self, key, quantile, *quantiles): Returns estimates of one or more cutoffs such that a specified fraction of the observations added to this t-digest would be less than or equal to each of the specified cutoffs. (Multiple quantiles can be returned with one call) - For more information see `TDIGEST.QUANTILE `_. + For more information see `TDIGEST.QUANTILE `_. """ # noqa return self.execute_command(TDIGEST_QUANTILE, key, quantile, *quantiles) def cdf(self, key, value, *values): """ Return double fraction of all points added which are <= value. - For more information see `TDIGEST.CDF `_. + For more information see `TDIGEST.CDF `_. """ # noqa return self.execute_command(TDIGEST_CDF, key, value, *values) @@ -434,7 +434,7 @@ def info(self, key): """ Return Compression, Capacity, Merged Nodes, Unmerged Nodes, Merged Weight, Unmerged Weight and Total Compressions. - For more information see `TDIGEST.INFO `_. + For more information see `TDIGEST.INFO `_. """ # noqa return self.execute_command(TDIGEST_INFO, key) @@ -442,7 +442,7 @@ def trimmed_mean(self, key, low_cut_quantile, high_cut_quantile): """ Return mean value from the sketch, excluding observation values outside the low and high cutoff quantiles. - For more information see `TDIGEST.TRIMMED_MEAN `_. + For more information see `TDIGEST.TRIMMED_MEAN `_. """ # noqa return self.execute_command( TDIGEST_TRIMMED_MEAN, key, low_cut_quantile, high_cut_quantile @@ -453,7 +453,7 @@ def rank(self, key, value, *values): Retrieve the estimated rank of value (the number of observations in the sketch that are smaller than value + half the number of observations that are equal to value). - For more information see `TDIGEST.RANK `_. + For more information see `TDIGEST.RANK `_. """ # noqa return self.execute_command(TDIGEST_RANK, key, value, *values) @@ -462,7 +462,7 @@ def revrank(self, key, value, *values): Retrieve the estimated rank of value (the number of observations in the sketch that are larger than value + half the number of observations that are equal to value). - For more information see `TDIGEST.REVRANK `_. + For more information see `TDIGEST.REVRANK `_. """ # noqa return self.execute_command(TDIGEST_REVRANK, key, value, *values) @@ -470,7 +470,7 @@ def byrank(self, key, rank, *ranks): """ Retrieve an estimation of the value with the given rank. - For more information see `TDIGEST.BY_RANK `_. + For more information see `TDIGEST.BY_RANK `_. """ # noqa return self.execute_command(TDIGEST_BYRANK, key, rank, *ranks) @@ -478,7 +478,7 @@ def byrevrank(self, key, rank, *ranks): """ Retrieve an estimation of the value with the given reverse rank. - For more information see `TDIGEST.BY_REVRANK `_. + For more information see `TDIGEST.BY_REVRANK `_. """ # noqa return self.execute_command(TDIGEST_BYREVRANK, key, rank, *ranks) @@ -489,14 +489,14 @@ class CMSCommands: def initbydim(self, key, width, depth): """ Initialize a Count-Min Sketch `key` to dimensions (`width`, `depth`) specified by user. - For more information see `CMS.INITBYDIM `_. + For more information see `CMS.INITBYDIM `_. """ # noqa return self.execute_command(CMS_INITBYDIM, key, width, depth) def initbyprob(self, key, error, probability): """ Initialize a Count-Min Sketch `key` to characteristics (`error`, `probability`) specified by user. - For more information see `CMS.INITBYPROB `_. + For more information see `CMS.INITBYPROB `_. """ # noqa return self.execute_command(CMS_INITBYPROB, key, error, probability) @@ -504,7 +504,7 @@ def incrby(self, key, items, increments): """ Add/increase `items` to a Count-Min Sketch `key` by ''increments''. Both `items` and `increments` are lists. - For more information see `CMS.INCRBY `_. + For more information see `CMS.INCRBY `_. Example: @@ -517,7 +517,7 @@ def incrby(self, key, items, increments): def query(self, key, *items): """ Return count for an `item` from `key`. Multiple items can be queried with one call. - For more information see `CMS.QUERY `_. + For more information see `CMS.QUERY `_. """ # noqa return self.execute_command(CMS_QUERY, key, *items) @@ -527,7 +527,7 @@ def merge(self, destKey, numKeys, srcKeys, weights=[]): All sketches must have identical width and depth. `Weights` can be used to multiply certain sketches. Default weight is 1. Both `srcKeys` and `weights` are lists. - For more information see `CMS.MERGE `_. + For more information see `CMS.MERGE `_. """ # noqa params = [destKey, numKeys] params += srcKeys @@ -537,6 +537,6 @@ def merge(self, destKey, numKeys, srcKeys, weights=[]): def info(self, key): """ Return width, depth and total count of the sketch. - For more information see `CMS.INFO `_. + For more information see `CMS.INFO `_. """ # noqa return self.execute_command(CMS_INFO, key) diff --git a/redis/commands/bf/info.py b/valkey/commands/bf/info.py similarity index 100% rename from redis/commands/bf/info.py rename to valkey/commands/bf/info.py diff --git a/redis/commands/cluster.py b/valkey/commands/cluster.py similarity index 85% rename from redis/commands/cluster.py rename to valkey/commands/cluster.py index f31b88bc..a9fbbba9 100644 --- a/redis/commands/cluster.py +++ b/valkey/commands/cluster.py @@ -14,9 +14,9 @@ Union, ) -from redis.crc import key_slot -from redis.exceptions import RedisClusterException, RedisError -from redis.typing import ( +from valkey.crc import key_slot +from valkey.exceptions import ValkeyClusterException, ValkeyError +from valkey.typing import ( AnyKeyT, ClusterCommandsProtocol, EncodableT, @@ -44,13 +44,13 @@ ScriptCommands, ) from .helpers import list_or_args -from .redismodules import AsyncRedisModuleCommands, RedisModuleCommands +from .valkeymodules import AsyncValkeyModuleCommands, ValkeyModuleCommands if TYPE_CHECKING: - from redis.asyncio.cluster import TargetNodesT + from valkey.asyncio.cluster import TargetNodesT # Not complete, but covers the major ones -# https://redis.io/commands +# https://valkey.io/commands READ_COMMANDS = frozenset( [ "BITCOUNT", @@ -163,7 +163,7 @@ def mget_nonatomic(self, keys: KeysT, *args: KeyT) -> List[Optional[Any]]: Returns a list of values ordered identically to ``keys`` - For more information see https://redis.io/commands/mget + For more information see https://valkey.io/commands/mget """ # Concatenate all keys into a list @@ -188,7 +188,7 @@ def mset_nonatomic(self, mapping: Mapping[AnyKeyT, EncodableT]) -> List[bool]: for the keys of every slot. This operation will not be atomic if keys belong to more than one slot. - For more information see https://redis.io/commands/mset + For more information see https://valkey.io/commands/mset """ # Partition the keys by slot @@ -215,7 +215,7 @@ def exists(self, *keys: KeyT) -> ResponseT: whole cluster. The keys are first split up into slots and then an EXISTS command is sent for every slot - For more information see https://redis.io/commands/exists + For more information see https://valkey.io/commands/exists """ return self._split_command_across_slots("EXISTS", *keys) @@ -228,7 +228,7 @@ def delete(self, *keys: KeyT) -> ResponseT: Non-existent keys are ignored. Returns the number of keys that were deleted. - For more information see https://redis.io/commands/del + For more information see https://valkey.io/commands/del """ return self._split_command_across_slots("DEL", *keys) @@ -243,7 +243,7 @@ def touch(self, *keys: KeyT) -> ResponseT: Non-existent keys are ignored. Returns the number of keys that were touched. - For more information see https://redis.io/commands/touch + For more information see https://valkey.io/commands/touch """ return self._split_command_across_slots("TOUCH", *keys) @@ -257,7 +257,7 @@ def unlink(self, *keys: KeyT) -> ResponseT: Non-existent keys are ignored. Returns the number of keys that were unlinked. - For more information see https://redis.io/commands/unlink + For more information see https://valkey.io/commands/unlink """ return self._split_command_across_slots("UNLINK", *keys) @@ -275,7 +275,7 @@ async def mget_nonatomic(self, keys: KeysT, *args: KeyT) -> List[Optional[Any]]: Returns a list of values ordered identically to ``keys`` - For more information see https://redis.io/commands/mget + For more information see https://valkey.io/commands/mget """ # Concatenate all keys into a list @@ -300,7 +300,7 @@ async def mset_nonatomic(self, mapping: Mapping[AnyKeyT, EncodableT]) -> List[bo for the keys of every slot. This operation will not be atomic if keys belong to more than one slot. - For more information see https://redis.io/commands/mset + For more information see https://valkey.io/commands/mset """ # Partition the keys by slot @@ -343,9 +343,9 @@ async def _execute_pipeline_by_slot( class ClusterManagementCommands(ManagementCommands): """ - A class for Redis Cluster management commands + A class for Valkey Cluster management commands - The class inherits from Redis's core ManagementCommands class and do the + The class inherits from Valkey's core ManagementCommands class and do the required adjustments to work with cluster mode """ @@ -353,25 +353,25 @@ def slaveof(self, *args, **kwargs) -> NoReturn: """ Make the server a replica of another instance, or promote it as master. - For more information see https://redis.io/commands/slaveof + For more information see https://valkey.io/commands/slaveof """ - raise RedisClusterException("SLAVEOF is not supported in cluster mode") + raise ValkeyClusterException("SLAVEOF is not supported in cluster mode") def replicaof(self, *args, **kwargs) -> NoReturn: """ Make the server a replica of another instance, or promote it as master. - For more information see https://redis.io/commands/replicaof + For more information see https://valkey.io/commands/replicaof """ - raise RedisClusterException("REPLICAOF is not supported in cluster mode") + raise ValkeyClusterException("REPLICAOF is not supported in cluster mode") def swapdb(self, *args, **kwargs) -> NoReturn: """ - Swaps two Redis databases. + Swaps two Valkey databases. - For more information see https://redis.io/commands/swapdb + For more information see https://valkey.io/commands/swapdb """ - raise RedisClusterException("SWAPDB is not supported in cluster mode") + raise ValkeyClusterException("SWAPDB is not supported in cluster mode") def cluster_myid(self, target_node: "TargetNodesT") -> ResponseT: """ @@ -380,7 +380,7 @@ def cluster_myid(self, target_node: "TargetNodesT") -> ResponseT: :target_node: 'ClusterNode' The node to execute the command on - For more information check https://redis.io/commands/cluster-myid/ + For more information check https://valkey.io/commands/cluster-myid/ """ return self.execute_command("CLUSTER MYID", target_nodes=target_node) @@ -393,7 +393,7 @@ def cluster_addslots( :target_node: 'ClusterNode' The node to execute the command on - For more information see https://redis.io/commands/cluster-addslots + For more information see https://valkey.io/commands/cluster-addslots """ return self.execute_command( "CLUSTER ADDSLOTS", *slots, target_nodes=target_node @@ -411,7 +411,7 @@ def cluster_addslotsrange( :target_node: 'ClusterNode' The node to execute the command on - For more information see https://redis.io/commands/cluster-addslotsrange + For more information see https://valkey.io/commands/cluster-addslotsrange """ return self.execute_command( "CLUSTER ADDSLOTSRANGE", *slots, target_nodes=target_node @@ -422,7 +422,7 @@ def cluster_countkeysinslot(self, slot_id: int) -> ResponseT: Return the number of local keys in the specified hash slot Send to node based on specified slot_id - For more information see https://redis.io/commands/cluster-countkeysinslot + For more information see https://valkey.io/commands/cluster-countkeysinslot """ return self.execute_command("CLUSTER COUNTKEYSINSLOT", slot_id) @@ -431,7 +431,8 @@ def cluster_count_failure_report(self, node_id: str) -> ResponseT: Return the number of failure reports active for a given node Sends to a random node - For more information see https://redis.io/commands/cluster-count-failure-reports + For more information see + https://valkey.io/commands/cluster-count-failure-reports """ return self.execute_command("CLUSTER COUNT-FAILURE-REPORTS", node_id) @@ -442,7 +443,7 @@ def cluster_delslots(self, *slots: EncodableT) -> List[bool]: Returns a list of the results for each processed slot. - For more information see https://redis.io/commands/cluster-delslots + For more information see https://valkey.io/commands/cluster-delslots """ return [self.execute_command("CLUSTER DELSLOTS", slot) for slot in slots] @@ -453,7 +454,7 @@ def cluster_delslotsrange(self, *slots: EncodableT) -> ResponseT: from the node, while CLUSTER DELSLOTSRANGE takes a list of slot ranges to remove from the node. - For more information see https://redis.io/commands/cluster-delslotsrange + For more information see https://valkey.io/commands/cluster-delslotsrange """ return self.execute_command("CLUSTER DELSLOTSRANGE", *slots) @@ -467,11 +468,11 @@ def cluster_failover( :target_node: 'ClusterNode' The node to execute the command on - For more information see https://redis.io/commands/cluster-failover + For more information see https://valkey.io/commands/cluster-failover """ if option: if option.upper() not in ["FORCE", "TAKEOVER"]: - raise RedisError( + raise ValkeyError( f"Invalid option for CLUSTER FAILOVER command: {option}" ) else: @@ -483,11 +484,11 @@ def cluster_failover( def cluster_info(self, target_nodes: Optional["TargetNodesT"] = None) -> ResponseT: """ - Provides info about Redis Cluster node state. + Provides info about Valkey Cluster node state. The command will be sent to a random node in the cluster if no target node is specified. - For more information see https://redis.io/commands/cluster-info + For more information see https://valkey.io/commands/cluster-info """ return self.execute_command("CLUSTER INFO", target_nodes=target_nodes) @@ -496,7 +497,7 @@ def cluster_keyslot(self, key: str) -> ResponseT: Returns the hash slot of the specified key Sends to random node in the cluster - For more information see https://redis.io/commands/cluster-keyslot + For more information see https://valkey.io/commands/cluster-keyslot """ return self.execute_command("CLUSTER KEYSLOT", key) @@ -507,7 +508,7 @@ def cluster_meet( Force a node cluster to handshake with another node. Sends to specified node. - For more information see https://redis.io/commands/cluster-meet + For more information see https://valkey.io/commands/cluster-meet """ return self.execute_command( "CLUSTER MEET", host, port, target_nodes=target_nodes @@ -518,7 +519,7 @@ def cluster_nodes(self) -> ResponseT: Get Cluster config for the node. Sends to random node in the cluster - For more information see https://redis.io/commands/cluster-nodes + For more information see https://valkey.io/commands/cluster-nodes """ return self.execute_command("CLUSTER NODES") @@ -528,7 +529,7 @@ def cluster_replicate( """ Reconfigure a node as a slave of the specified master node - For more information see https://redis.io/commands/cluster-replicate + For more information see https://valkey.io/commands/cluster-replicate """ return self.execute_command( "CLUSTER REPLICATE", node_id, target_nodes=target_nodes @@ -538,12 +539,12 @@ def cluster_reset( self, soft: bool = True, target_nodes: Optional["TargetNodesT"] = None ) -> ResponseT: """ - Reset a Redis Cluster node + Reset a Valkey Cluster node If 'soft' is True then it will send 'SOFT' argument If 'soft' is False then it will send 'HARD' argument - For more information see https://redis.io/commands/cluster-reset + For more information see https://valkey.io/commands/cluster-reset """ return self.execute_command( "CLUSTER RESET", b"SOFT" if soft else b"HARD", target_nodes=target_nodes @@ -555,7 +556,7 @@ def cluster_save_config( """ Forces the node to save cluster state on disk - For more information see https://redis.io/commands/cluster-saveconfig + For more information see https://valkey.io/commands/cluster-saveconfig """ return self.execute_command("CLUSTER SAVECONFIG", target_nodes=target_nodes) @@ -563,7 +564,7 @@ def cluster_get_keys_in_slot(self, slot: int, num_keys: int) -> ResponseT: """ Returns the number of keys in the specified cluster slot - For more information see https://redis.io/commands/cluster-getkeysinslot + For more information see https://valkey.io/commands/cluster-getkeysinslot """ return self.execute_command("CLUSTER GETKEYSINSLOT", slot, num_keys) @@ -573,7 +574,7 @@ def cluster_set_config_epoch( """ Set the configuration epoch in a new node - For more information see https://redis.io/commands/cluster-set-config-epoch + For more information see https://valkey.io/commands/cluster-set-config-epoch """ return self.execute_command( "CLUSTER SET-CONFIG-EPOCH", epoch, target_nodes=target_nodes @@ -588,23 +589,23 @@ def cluster_setslot( :target_node: 'ClusterNode' The node to execute the command on - For more information see https://redis.io/commands/cluster-setslot + For more information see https://valkey.io/commands/cluster-setslot """ if state.upper() in ("IMPORTING", "NODE", "MIGRATING"): return self.execute_command( "CLUSTER SETSLOT", slot_id, state, node_id, target_nodes=target_node ) elif state.upper() == "STABLE": - raise RedisError('For "stable" state please use ' "cluster_setslot_stable") + raise ValkeyError('For "stable" state please use ' "cluster_setslot_stable") else: - raise RedisError(f"Invalid slot state: {state}") + raise ValkeyError(f"Invalid slot state: {state}") def cluster_setslot_stable(self, slot_id: int) -> ResponseT: """ Clears migrating / importing state from the slot. It determines by it self what node the slot is in and sends it there. - For more information see https://redis.io/commands/cluster-setslot + For more information see https://valkey.io/commands/cluster-setslot """ return self.execute_command("CLUSTER SETSLOT", slot_id, "STABLE") @@ -615,7 +616,7 @@ def cluster_replicas( Provides a list of replica nodes replicating from the specified primary target node. - For more information see https://redis.io/commands/cluster-replicas + For more information see https://valkey.io/commands/cluster-replicas """ return self.execute_command( "CLUSTER REPLICAS", node_id, target_nodes=target_nodes @@ -625,7 +626,7 @@ def cluster_slots(self, target_nodes: Optional["TargetNodesT"] = None) -> Respon """ Get array of Cluster slot to node mappings - For more information see https://redis.io/commands/cluster-slots + For more information see https://valkey.io/commands/cluster-slots """ return self.execute_command("CLUSTER SLOTS", target_nodes=target_nodes) @@ -633,7 +634,7 @@ def cluster_shards(self, target_nodes=None): """ Returns details about the shards of the cluster. - For more information see https://redis.io/commands/cluster-shards + For more information see https://valkey.io/commands/cluster-shards """ return self.execute_command("CLUSTER SHARDS", target_nodes=target_nodes) @@ -641,19 +642,19 @@ def cluster_myshardid(self, target_nodes=None): """ Returns the shard ID of the node. - For more information see https://redis.io/commands/cluster-myshardid/ + For more information see https://valkey.io/commands/cluster-myshardid/ """ return self.execute_command("CLUSTER MYSHARDID", target_nodes=target_nodes) def cluster_links(self, target_node: "TargetNodesT") -> ResponseT: """ - Each node in a Redis Cluster maintains a pair of long-lived TCP link with each + Each node in a Valkey Cluster maintains a pair of long-lived TCP link with each peer in the cluster: One for sending outbound messages towards the peer and one for receiving inbound messages from the peer. This command outputs information of all such peer links as an array. - For more information see https://redis.io/commands/cluster-links + For more information see https://valkey.io/commands/cluster-links """ return self.execute_command("CLUSTER LINKS", target_nodes=target_node) @@ -673,7 +674,7 @@ def readonly(self, target_nodes: Optional["TargetNodesT"] = None) -> ResponseT: The command will be sent to the default cluster node if target_nodes is not specified. - For more information see https://redis.io/commands/readonly + For more information see https://valkey.io/commands/readonly """ if target_nodes == "replicas" or target_nodes == "all": # read_from_replicas will only be enabled if the READONLY command @@ -687,7 +688,7 @@ def readwrite(self, target_nodes: Optional["TargetNodesT"] = None) -> ResponseT: The command will be sent to the default cluster node if target_nodes is not specified. - For more information see https://redis.io/commands/readwrite + For more information see https://valkey.io/commands/readwrite """ # Reset read from replicas flag self.read_from_replicas = False @@ -704,9 +705,9 @@ class AsyncClusterManagementCommands( ClusterManagementCommands, AsyncManagementCommands ): """ - A class for Redis Cluster management commands + A class for Valkey Cluster management commands - The class inherits from Redis's core ManagementCommands class and do the + The class inherits from Valkey's core ManagementCommands class and do the required adjustments to work with cluster mode """ @@ -717,7 +718,7 @@ async def cluster_delslots(self, *slots: EncodableT) -> List[bool]: Returns a list of the results for each processed slot. - For more information see https://redis.io/commands/cluster-delslots + For more information see https://valkey.io/commands/cluster-delslots """ return await asyncio.gather( *( @@ -729,9 +730,9 @@ async def cluster_delslots(self, *slots: EncodableT) -> List[bool]: class ClusterDataAccessCommands(DataAccessCommands): """ - A class for Redis Cluster Data Access Commands + A class for Valkey Cluster Data Access Commands - The class inherits from Redis's core DataAccessCommand class and do the + The class inherits from Valkey's core DataAccessCommand class and do the required adjustments to work with cluster mode """ @@ -764,7 +765,7 @@ def stralgo( ``withmatchlen`` Returns the matches with the len of the match. Can be provided only when ``idx`` set to True. - For more information see https://redis.io/commands/stralgo + For more information see https://valkey.io/commands/stralgo """ target_nodes = kwargs.pop("target_nodes", None) if specific_argument == "strings" and target_nodes is None: @@ -822,9 +823,9 @@ class AsyncClusterDataAccessCommands( ClusterDataAccessCommands, AsyncDataAccessCommands ): """ - A class for Redis Cluster Data Access Commands + A class for Valkey Cluster Data Access Commands - The class inherits from Redis's core DataAccessCommand class and do the + The class inherits from Valkey's core DataAccessCommand class and do the required adjustments to work with cluster mode """ @@ -866,7 +867,7 @@ async def scan_iter( } -class RedisClusterCommands( +class ValkeyClusterCommands( ClusterMultiKeyCommands, ClusterManagementCommands, ACLCommands, @@ -876,10 +877,10 @@ class RedisClusterCommands( FunctionCommands, GearsCommands, ModuleCommands, - RedisModuleCommands, + ValkeyModuleCommands, ): """ - A class for all Redis Cluster commands + A class for all Valkey Cluster commands For key-based commands, the target node(s) will be internally determined by the keys' hash slot. @@ -894,11 +895,11 @@ class RedisClusterCommands( - 'dict(any:clusterNodes)' for example: - r.cluster_info(target_nodes=RedisCluster.ALL_NODES) + r.cluster_info(target_nodes=ValkeyCluster.ALL_NODES) """ -class AsyncRedisClusterCommands( +class AsyncValkeyClusterCommands( AsyncClusterMultiKeyCommands, AsyncClusterManagementCommands, AsyncACLCommands, @@ -907,10 +908,10 @@ class AsyncRedisClusterCommands( AsyncFunctionCommands, AsyncGearsCommands, AsyncModuleCommands, - AsyncRedisModuleCommands, + AsyncValkeyModuleCommands, ): """ - A class for all Redis Cluster commands + A class for all Valkey Cluster commands For key-based commands, the target node(s) will be internally determined by the keys' hash slot. @@ -925,5 +926,5 @@ class AsyncRedisClusterCommands( - 'dict(any:clusterNodes)' for example: - r.cluster_info(target_nodes=RedisCluster.ALL_NODES) + r.cluster_info(target_nodes=ValkeyCluster.ALL_NODES) """ diff --git a/redis/commands/core.py b/valkey/commands/core.py similarity index 87% rename from redis/commands/core.py rename to valkey/commands/core.py index 0656fd81..deb1aab7 100644 --- a/redis/commands/core.py +++ b/valkey/commands/core.py @@ -21,8 +21,8 @@ Union, ) -from redis.exceptions import ConnectionError, DataError, NoScriptError, RedisError -from redis.typing import ( +from valkey.exceptions import ConnectionError, DataError, NoScriptError, ValkeyError +from valkey.typing import ( AbsExpiryT, AnyKeyT, BitfieldOffsetT, @@ -46,14 +46,14 @@ from .helpers import list_or_args if TYPE_CHECKING: - from redis.asyncio.client import Redis as AsyncRedis - from redis.client import Redis + from valkey.asyncio.client import Valkey as AsyncValkey + from valkey.client import Valkey class ACLCommands(CommandsProtocol): """ - Redis Access Control List (ACL) commands. - see: https://redis.io/topics/acl + Valkey Access Control List (ACL) commands. + see: https://valkey.io/topics/acl """ def acl_cat(self, category: Union[str, None] = None, **kwargs) -> ResponseT: @@ -64,7 +64,7 @@ def acl_cat(self, category: Union[str, None] = None, **kwargs) -> ResponseT: If ``category`` is supplied, returns a list of all commands within that category. - For more information see https://redis.io/commands/acl-cat + For more information see https://valkey.io/commands/acl-cat """ pieces: list[EncodableT] = [category] if category else [] return self.execute_command("ACL CAT", *pieces, **kwargs) @@ -73,7 +73,7 @@ def acl_dryrun(self, username, *args, **kwargs): """ Simulate the execution of a given command by a given ``username``. - For more information see https://redis.io/commands/acl-dryrun + For more information see https://valkey.io/commands/acl-dryrun """ return self.execute_command("ACL DRYRUN", username, *args, **kwargs) @@ -81,7 +81,7 @@ def acl_deluser(self, *username: str, **kwargs) -> ResponseT: """ Delete the ACL for the specified ``username``s - For more information see https://redis.io/commands/acl-deluser + For more information see https://valkey.io/commands/acl-deluser """ return self.execute_command("ACL DELUSER", *username, **kwargs) @@ -89,7 +89,7 @@ def acl_genpass(self, bits: Union[int, None] = None, **kwargs) -> ResponseT: """Generate a random password value. If ``bits`` is supplied then use this number of bits, rounded to the next multiple of 4. - See: https://redis.io/commands/acl-genpass + See: https://valkey.io/commands/acl-genpass """ pieces = [] if bits is not None: @@ -110,7 +110,7 @@ def acl_getuser(self, username: str, **kwargs) -> ResponseT: If ``username`` does not exist, return None - For more information see https://redis.io/commands/acl-getuser + For more information see https://valkey.io/commands/acl-getuser """ return self.execute_command("ACL GETUSER", username, **kwargs) @@ -118,7 +118,7 @@ def acl_help(self, **kwargs) -> ResponseT: """The ACL HELP command returns helpful text describing the different subcommands. - For more information see https://redis.io/commands/acl-help + For more information see https://valkey.io/commands/acl-help """ return self.execute_command("ACL HELP", **kwargs) @@ -126,7 +126,7 @@ def acl_list(self, **kwargs) -> ResponseT: """ Return a list of all ACLs on the server - For more information see https://redis.io/commands/acl-list + For more information see https://valkey.io/commands/acl-list """ return self.execute_command("ACL LIST", **kwargs) @@ -136,7 +136,7 @@ def acl_log(self, count: Union[int, None] = None, **kwargs) -> ResponseT: :param int count: Get logs[0:count]. :rtype: List. - For more information see https://redis.io/commands/acl-log + For more information see https://valkey.io/commands/acl-log """ args = [] if count is not None: @@ -151,7 +151,7 @@ def acl_log_reset(self, **kwargs) -> ResponseT: Reset ACL logs. :rtype: Boolean. - For more information see https://redis.io/commands/acl-log + For more information see https://valkey.io/commands/acl-log """ args = [b"RESET"] return self.execute_command("ACL LOG", *args, **kwargs) @@ -163,7 +163,7 @@ def acl_load(self, **kwargs) -> ResponseT: Note that the server must be configured with the ``aclfile`` directive to be able to load ACL rules from an aclfile. - For more information see https://redis.io/commands/acl-load + For more information see https://valkey.io/commands/acl-load """ return self.execute_command("ACL LOAD", **kwargs) @@ -174,7 +174,7 @@ def acl_save(self, **kwargs) -> ResponseT: Note that the server must be configured with the ``aclfile`` directive to be able to save ACL rules to an aclfile. - For more information see https://redis.io/commands/acl-save + For more information see https://valkey.io/commands/acl-save """ return self.execute_command("ACL SAVE", **kwargs) @@ -203,7 +203,7 @@ def acl_setuser( the existing ACL is completely overwritten and replaced with the specified values. - For more information, see https://redis.io/commands/acl-setuser + For more information, see https://valkey.io/commands/acl-setuser Args: username: The name of the user whose ACL is to be created or updated. @@ -371,14 +371,14 @@ def acl_setuser( def acl_users(self, **kwargs) -> ResponseT: """Returns a list of all registered users on the server. - For more information see https://redis.io/commands/acl-users + For more information see https://valkey.io/commands/acl-users """ return self.execute_command("ACL USERS", **kwargs) def acl_whoami(self, **kwargs) -> ResponseT: """Get the username for the current connection - For more information see https://redis.io/commands/acl-whoami + For more information see https://valkey.io/commands/acl-whoami """ return self.execute_command("ACL WHOAMI", **kwargs) @@ -388,15 +388,15 @@ def acl_whoami(self, **kwargs) -> ResponseT: class ManagementCommands(CommandsProtocol): """ - Redis management commands + Valkey management commands """ def auth(self, password: str, username: Optional[str] = None, **kwargs): """ - Authenticates the user. If you do not pass username, Redis will try to + Authenticates the user. If you do not pass username, Valkey will try to authenticate for the "default" user. If you do pass username, it will authenticate for the given user. - For more information see https://redis.io/commands/auth + For more information see https://valkey.io/commands/auth """ pieces = [] if username is not None: @@ -405,18 +405,18 @@ def auth(self, password: str, username: Optional[str] = None, **kwargs): return self.execute_command("AUTH", *pieces, **kwargs) def bgrewriteaof(self, **kwargs): - """Tell the Redis server to rewrite the AOF file from data in memory. + """Tell the Valkey server to rewrite the AOF file from data in memory. - For more information see https://redis.io/commands/bgrewriteaof + For more information see https://valkey.io/commands/bgrewriteaof """ return self.execute_command("BGREWRITEAOF", **kwargs) def bgsave(self, schedule: bool = True, **kwargs) -> ResponseT: """ - Tell the Redis server to save its data to disk. Unlike save(), + Tell the Valkey server to save its data to disk. Unlike save(), this method is asynchronous and returns immediately. - For more information see https://redis.io/commands/bgsave + For more information see https://valkey.io/commands/bgsave """ pieces = [] if schedule: @@ -425,18 +425,18 @@ def bgsave(self, schedule: bool = True, **kwargs) -> ResponseT: def role(self) -> ResponseT: """ - Provide information on the role of a Redis instance in + Provide information on the role of a Valkey instance in the context of replication, by returning if the instance is currently a master, slave, or sentinel. - For more information see https://redis.io/commands/role + For more information see https://valkey.io/commands/role """ return self.execute_command("ROLE") def client_kill(self, address: str, **kwargs) -> ResponseT: """Disconnects the client at ``address`` (ip:port) - For more information see https://redis.io/commands/client-kill + For more information see https://valkey.io/commands/client-kill """ return self.execute_command("CLIENT KILL", address, **kwargs) @@ -499,7 +499,7 @@ def client_info(self, **kwargs) -> ResponseT: Returns information and statistics about the current client connection. - For more information see https://redis.io/commands/client-info + For more information see https://valkey.io/commands/client-info """ return self.execute_command("CLIENT INFO", **kwargs) @@ -514,7 +514,7 @@ def client_list( replica, pubsub) :param client_id: optional. a list of client ids - For more information see https://redis.io/commands/client-list + For more information see https://valkey.io/commands/client-list """ args = [] if _type is not None: @@ -534,7 +534,7 @@ def client_getname(self, **kwargs) -> ResponseT: """ Returns the current connection name - For more information see https://redis.io/commands/client-getname + For more information see https://valkey.io/commands/client-getname """ return self.execute_command("CLIENT GETNAME", **kwargs) @@ -543,7 +543,7 @@ def client_getredir(self, **kwargs) -> ResponseT: Returns the ID (an integer) of the client to whom we are redirecting tracking notifications. - see: https://redis.io/commands/client-getredir + see: https://valkey.io/commands/client-getredir """ return self.execute_command("CLIENT GETREDIR", **kwargs) @@ -551,7 +551,7 @@ def client_reply( self, reply: Union[Literal["ON"], Literal["OFF"], Literal["SKIP"]], **kwargs ) -> ResponseT: """ - Enable and disable redis server replies. + Enable and disable valkey server replies. ``reply`` Must be ON OFF or SKIP, ON - The default most with server replies to commands @@ -564,7 +564,7 @@ def client_reply( The test_client_reply unit test illustrates this, and conftest.py has a client with a timeout. - See https://redis.io/commands/client-reply + See https://valkey.io/commands/client-reply """ replies = ["ON", "OFF", "SKIP"] if reply not in replies: @@ -575,7 +575,7 @@ def client_id(self, **kwargs) -> ResponseT: """ Returns the current connection id - For more information see https://redis.io/commands/client-id + For more information see https://valkey.io/commands/client-id """ return self.execute_command("CLIENT ID", **kwargs) @@ -592,7 +592,7 @@ def client_tracking_on( Turn on the tracking mode. For more information about the options look at client_tracking func. - See https://redis.io/commands/client-tracking + See https://valkey.io/commands/client-tracking """ return self.client_tracking( True, clientid, prefix, bcast, optin, optout, noloop @@ -611,7 +611,7 @@ def client_tracking_off( Turn off the tracking mode. For more information about the options look at client_tracking func. - See https://redis.io/commands/client-tracking + See https://valkey.io/commands/client-tracking """ return self.client_tracking( False, clientid, prefix, bcast, optin, optout, noloop @@ -629,7 +629,7 @@ def client_tracking( **kwargs, ) -> ResponseT: """ - Enables the tracking feature of the Redis server, that is used + Enables the tracking feature of the Valkey server, that is used for server assisted client side caching. ``on`` indicate for tracking on or tracking off. The dafualt is on. @@ -655,7 +655,7 @@ def client_tracking( ``prefix`` for broadcasting, register a given key prefix, so that notifications will be provided only for keys starting with this string. - See https://redis.io/commands/client-tracking + See https://valkey.io/commands/client-tracking """ if len(prefix) != 0 and bcast is False: @@ -682,7 +682,7 @@ def client_trackinginfo(self, **kwargs) -> ResponseT: Returns the information about the current client connection's use of the server assisted client side cache. - See https://redis.io/commands/client-trackinginfo + See https://valkey.io/commands/client-trackinginfo """ return self.execute_command("CLIENT TRACKINGINFO", **kwargs) @@ -690,7 +690,7 @@ def client_setname(self, name: str, **kwargs) -> ResponseT: """ Sets the current connection name - For more information see https://redis.io/commands/client-setname + For more information see https://valkey.io/commands/client-setname .. note:: This method sets client name only for **current** connection. @@ -703,7 +703,7 @@ def client_setname(self, name: str, **kwargs) -> ResponseT: def client_setinfo(self, attr: str, value: str, **kwargs) -> ResponseT: """ Sets the current connection library name or version - For mor information see https://redis.io/commands/client-setinfo + For mor information see https://valkey.io/commands/client-setinfo """ return self.execute_command("CLIENT SETINFO", attr, value, **kwargs) @@ -716,7 +716,7 @@ def client_unblock( If ``error`` is False (default), the client is unblocked using the regular timeout mechanism. - For more information see https://redis.io/commands/client-unblock + For more information see https://valkey.io/commands/client-unblock """ args = ["CLIENT UNBLOCK", int(client_id)] if error: @@ -725,10 +725,10 @@ def client_unblock( def client_pause(self, timeout: int, all: bool = True, **kwargs) -> ResponseT: """ - Suspend all the Redis clients for the specified amount of time. + Suspend all the Valkey clients for the specified amount of time. - For more information see https://redis.io/commands/client-pause + For more information see https://valkey.io/commands/client-pause :param timeout: milliseconds to pause clients :param all: If true (default) all client commands are blocked. @@ -750,9 +750,9 @@ def client_pause(self, timeout: int, all: bool = True, **kwargs) -> ResponseT: def client_unpause(self, **kwargs) -> ResponseT: """ - Unpause all redis clients + Unpause all valkey clients - For more information see https://redis.io/commands/client-unpause + For more information see https://valkey.io/commands/client-unpause """ return self.execute_command("CLIENT UNPAUSE", **kwargs) @@ -760,7 +760,7 @@ def client_no_evict(self, mode: str) -> Union[Awaitable[str], str]: """ Sets the client eviction mode for the current connection. - For more information see https://redis.io/commands/client-no-evict + For more information see https://valkey.io/commands/client-no-evict """ return self.execute_command("CLIENT NO-EVICT", mode) @@ -771,15 +771,15 @@ def client_no_touch(self, mode: str) -> Union[Awaitable[str], str]: # When turned on, the current client will not change LFU/LRU stats, # unless it sends the TOUCH command. - For more information see https://redis.io/commands/client-no-touch + For more information see https://valkey.io/commands/client-no-touch """ return self.execute_command("CLIENT NO-TOUCH", mode) def command(self, **kwargs): """ - Returns dict reply of details about all Redis commands. + Returns dict reply of details about all Valkey commands. - For more information see https://redis.io/commands/command + For more information see https://valkey.io/commands/command """ return self.execute_command("COMMAND", **kwargs) @@ -804,7 +804,7 @@ def command_list( ``category``: get the commands in the ACL category ``pattern``: get the commands that match the given pattern - For more information see https://redis.io/commands/command-list/ + For more information see https://valkey.io/commands/command-list/ """ pieces = [] if module is not None: @@ -821,9 +821,9 @@ def command_list( def command_getkeysandflags(self, *args: List[str]) -> List[Union[str, List[str]]]: """ - Returns array of keys from a full Redis command and their usage flags. + Returns array of keys from a full Valkey command and their usage flags. - For more information see https://redis.io/commands/command-getkeysandflags + For more information see https://valkey.io/commands/command-getkeysandflags """ return self.execute_command("COMMAND GETKEYSANDFLAGS", *args) @@ -842,7 +842,7 @@ def config_get( """ Return a dictionary of configuration based on the ``pattern`` - For more information see https://redis.io/commands/config-get + For more information see https://valkey.io/commands/config-get """ return self.execute_command("CONFIG GET", pattern, *args, **kwargs) @@ -855,7 +855,7 @@ def config_set( ) -> ResponseT: """Set config item ``name`` with ``value`` - For more information see https://redis.io/commands/config-set + For more information see https://valkey.io/commands/config-set """ return self.execute_command("CONFIG SET", name, value, *args, **kwargs) @@ -863,7 +863,7 @@ def config_resetstat(self, **kwargs) -> ResponseT: """ Reset runtime statistics - For more information see https://redis.io/commands/config-resetstat + For more information see https://valkey.io/commands/config-resetstat """ return self.execute_command("CONFIG RESETSTAT", **kwargs) @@ -871,7 +871,7 @@ def config_rewrite(self, **kwargs) -> ResponseT: """ Rewrite config file with the minimal change to reflect running config. - For more information see https://redis.io/commands/config-rewrite + For more information see https://valkey.io/commands/config-rewrite """ return self.execute_command("CONFIG REWRITE", **kwargs) @@ -879,7 +879,7 @@ def dbsize(self, **kwargs) -> ResponseT: """ Returns the number of keys in the current database - For more information see https://redis.io/commands/dbsize + For more information see https://valkey.io/commands/dbsize """ return self.execute_command("DBSIZE", **kwargs) @@ -887,7 +887,7 @@ def debug_object(self, key: KeyT, **kwargs) -> ResponseT: """ Returns version specific meta information about a given key - For more information see https://redis.io/commands/debug-object + For more information see https://valkey.io/commands/debug-object """ return self.execute_command("DEBUG OBJECT", key, **kwargs) @@ -896,7 +896,7 @@ def debug_segfault(self, **kwargs) -> None: """ DEBUG SEGFAULT is intentionally not implemented in the client. - For more information see https://redis.io/commands/debug-segfault + For more information see https://valkey.io/commands/debug-segfault """ ) @@ -904,7 +904,7 @@ def echo(self, value: EncodableT, **kwargs) -> ResponseT: """ Echo the string back from the server - For more information see https://redis.io/commands/echo + For more information see https://valkey.io/commands/echo """ return self.execute_command("ECHO", value, **kwargs) @@ -915,7 +915,7 @@ def flushall(self, asynchronous: bool = False, **kwargs) -> ResponseT: ``asynchronous`` indicates whether the operation is executed asynchronously by the server. - For more information see https://redis.io/commands/flushall + For more information see https://valkey.io/commands/flushall """ args = [] if asynchronous: @@ -929,7 +929,7 @@ def flushdb(self, asynchronous: bool = False, **kwargs) -> ResponseT: ``asynchronous`` indicates whether the operation is executed asynchronously by the server. - For more information see https://redis.io/commands/flushdb + For more information see https://valkey.io/commands/flushdb """ args = [] if asynchronous: @@ -940,9 +940,9 @@ def sync(self) -> ResponseT: """ Initiates a replication stream from the master. - For more information see https://redis.io/commands/sync + For more information see https://valkey.io/commands/sync """ - from redis.client import NEVER_DECODE + from valkey.client import NEVER_DECODE options = {} options[NEVER_DECODE] = [] @@ -953,9 +953,9 @@ def psync(self, replicationid: str, offset: int): Initiates a replication stream from the master. Newer version for `sync`. - For more information see https://redis.io/commands/sync + For more information see https://valkey.io/commands/sync """ - from redis.client import NEVER_DECODE + from valkey.client import NEVER_DECODE options = {} options[NEVER_DECODE] = [] @@ -965,14 +965,14 @@ def swapdb(self, first: int, second: int, **kwargs) -> ResponseT: """ Swap two databases - For more information see https://redis.io/commands/swapdb + For more information see https://valkey.io/commands/swapdb """ return self.execute_command("SWAPDB", first, second, **kwargs) def select(self, index: int, **kwargs) -> ResponseT: - """Select the Redis logical database at index. + """Select the Valkey logical database at index. - See: https://redis.io/commands/select + See: https://valkey.io/commands/select """ return self.execute_command("SELECT", index, **kwargs) @@ -980,15 +980,15 @@ def info( self, section: Union[str, None] = None, *args: List[str], **kwargs ) -> ResponseT: """ - Returns a dictionary containing information about the Redis server + Returns a dictionary containing information about the Valkey server The ``section`` option can be used to select a specific section of information - The section option is not supported by older versions of Redis Server, + The section option is not supported by older versions of Valkey Server, and will generate ResponseError - For more information see https://redis.io/commands/info + For more information see https://valkey.io/commands/info """ if section is None: return self.execute_command("INFO", **kwargs) @@ -998,45 +998,45 @@ def info( def lastsave(self, **kwargs) -> ResponseT: """ Return a Python datetime object representing the last time the - Redis database was saved to disk + Valkey database was saved to disk - For more information see https://redis.io/commands/lastsave + For more information see https://valkey.io/commands/lastsave """ return self.execute_command("LASTSAVE", **kwargs) def latency_doctor(self): """Raise a NotImplementedError, as the client will not support LATENCY DOCTOR. - This funcion is best used within the redis-cli. + This funcion is best used within the valkey-cli. - For more information see https://redis.io/commands/latency-doctor + For more information see https://valkey.io/commands/latency-doctor """ raise NotImplementedError( """ LATENCY DOCTOR is intentionally not implemented in the client. - For more information see https://redis.io/commands/latency-doctor + For more information see https://valkey.io/commands/latency-doctor """ ) def latency_graph(self): """Raise a NotImplementedError, as the client will not support LATENCY GRAPH. - This funcion is best used within the redis-cli. + This funcion is best used within the valkey-cli. - For more information see https://redis.io/commands/latency-graph. + For more information see https://valkey.io/commands/latency-graph. """ raise NotImplementedError( """ LATENCY GRAPH is intentionally not implemented in the client. - For more information see https://redis.io/commands/latency-graph + For more information see https://valkey.io/commands/latency-graph """ ) def lolwut(self, *version_numbers: Union[str, float], **kwargs) -> ResponseT: """ - Get the Redis version and a piece of generative computer art + Get the Valkey version and a piece of generative computer art - See: https://redis.io/commands/lolwut + See: https://valkey.io/commands/lolwut """ if version_numbers: return self.execute_command("LOLWUT VERSION", *version_numbers, **kwargs) @@ -1046,7 +1046,7 @@ def lolwut(self, *version_numbers: Union[str, float], **kwargs) -> ResponseT: def reset(self) -> ResponseT: """Perform a full reset on the connection's server side contenxt. - See: https://redis.io/commands/reset + See: https://valkey.io/commands/reset """ return self.execute_command("RESET") @@ -1063,7 +1063,7 @@ def migrate( **kwargs, ) -> ResponseT: """ - Migrate 1 or more keys from the current Redis server to a different + Migrate 1 or more keys from the current Valkey server to a different server specified by the ``host``, ``port`` and ``destination_db``. The ``timeout``, specified in milliseconds, indicates the maximum @@ -1079,7 +1079,7 @@ def migrate( If ``auth`` is specified, authenticate to the destination server with the password provided. - For more information see https://redis.io/commands/migrate + For more information see https://valkey.io/commands/migrate """ keys = list_or_args(keys, []) if not keys: @@ -1111,7 +1111,7 @@ def memory_doctor(self, **kwargs) -> None: """ MEMORY DOCTOR is intentionally not implemented in the client. - For more information see https://redis.io/commands/memory-doctor + For more information see https://valkey.io/commands/memory-doctor """ ) @@ -1120,7 +1120,7 @@ def memory_help(self, **kwargs) -> None: """ MEMORY HELP is intentionally not implemented in the client. - For more information see https://redis.io/commands/memory-help + For more information see https://valkey.io/commands/memory-help """ ) @@ -1128,7 +1128,7 @@ def memory_stats(self, **kwargs) -> ResponseT: """ Return a dictionary of memory stats - For more information see https://redis.io/commands/memory-stats + For more information see https://valkey.io/commands/memory-stats """ return self.execute_command("MEMORY STATS", **kwargs) @@ -1136,7 +1136,7 @@ def memory_malloc_stats(self, **kwargs) -> ResponseT: """ Return an internal statistics report from the memory allocator. - See: https://redis.io/commands/memory-malloc-stats + See: https://valkey.io/commands/memory-malloc-stats """ return self.execute_command("MEMORY MALLOC-STATS", **kwargs) @@ -1151,7 +1151,7 @@ def memory_usage( sample. If left unspecified, the server's default is 5. Use 0 to sample all elements. - For more information see https://redis.io/commands/memory-usage + For more information see https://valkey.io/commands/memory-usage """ args = [] if isinstance(samples, int): @@ -1162,7 +1162,7 @@ def memory_purge(self, **kwargs) -> ResponseT: """ Attempts to purge dirty pages for reclamation by allocator - For more information see https://redis.io/commands/memory-purge + For more information see https://valkey.io/commands/memory-purge """ return self.execute_command("MEMORY PURGE", **kwargs) @@ -1179,7 +1179,7 @@ def latency_history(self, event: str) -> ResponseT: """ Returns the raw data of the ``event``'s latency spikes time series. - For more information see https://redis.io/commands/latency-history + For more information see https://valkey.io/commands/latency-history """ return self.execute_command("LATENCY HISTORY", event) @@ -1187,7 +1187,7 @@ def latency_latest(self) -> ResponseT: """ Reports the latest latency events logged. - For more information see https://redis.io/commands/latency-latest + For more information see https://valkey.io/commands/latency-latest """ return self.execute_command("LATENCY LATEST") @@ -1195,15 +1195,15 @@ def latency_reset(self, *events: str) -> ResponseT: """ Resets the latency spikes time series of all, or only some, events. - For more information see https://redis.io/commands/latency-reset + For more information see https://valkey.io/commands/latency-reset """ return self.execute_command("LATENCY RESET", *events) def ping(self, **kwargs) -> ResponseT: """ - Ping the Redis server + Ping the Valkey server - For more information see https://redis.io/commands/ping + For more information see https://valkey.io/commands/ping """ return self.execute_command("PING", **kwargs) @@ -1211,29 +1211,29 @@ def quit(self, **kwargs) -> ResponseT: """ Ask the server to close the connection. - For more information see https://redis.io/commands/quit + For more information see https://valkey.io/commands/quit """ return self.execute_command("QUIT", **kwargs) def replicaof(self, *args, **kwargs) -> ResponseT: """ - Update the replication settings of a redis replica, on the fly. + Update the replication settings of a valkey replica, on the fly. Examples of valid arguments include: NO ONE (set no replication) - host port (set to the host and port of a redis server) + host port (set to the host and port of a valkey server) - For more information see https://redis.io/commands/replicaof + For more information see https://valkey.io/commands/replicaof """ return self.execute_command("REPLICAOF", *args, **kwargs) def save(self, **kwargs) -> ResponseT: """ - Tell the Redis server to save its data to disk, + Tell the Valkey server to save its data to disk, blocking until the save is complete - For more information see https://redis.io/commands/save + For more information see https://valkey.io/commands/save """ return self.execute_command("SAVE", **kwargs) @@ -1246,7 +1246,7 @@ def shutdown( abort: bool = False, **kwargs, ) -> None: - """Shutdown the Redis server. If Redis has persistence configured, + """Shutdown the Valkey server. If Valkey has persistence configured, data will be flushed before shutdown. It is possible to specify modifiers to alter the behavior of the command: ``save`` will force a DB saving operation even if no save points are configured. @@ -1257,7 +1257,7 @@ def shutdown( ``force`` ignores any errors that would normally prevent the server from exiting ``abort`` cancels an ongoing shutdown and cannot be combined with other flags. - For more information see https://redis.io/commands/shutdown + For more information see https://valkey.io/commands/shutdown """ if save and nosave: raise DataError("SHUTDOWN save and nosave cannot both be set") @@ -1277,7 +1277,7 @@ def shutdown( except ConnectionError: # a ConnectionError here is expected return - raise RedisError("SHUTDOWN seems to have failed.") + raise ValkeyError("SHUTDOWN seems to have failed.") def slaveof( self, host: Union[str, None] = None, port: Union[int, None] = None, **kwargs @@ -1287,7 +1287,7 @@ def slaveof( by the ``host`` and ``port``. If called without arguments, the instance is promoted to a master instead. - For more information see https://redis.io/commands/slaveof + For more information see https://valkey.io/commands/slaveof """ if host is None and port is None: return self.execute_command("SLAVEOF", b"NO", b"ONE", **kwargs) @@ -1298,9 +1298,9 @@ def slowlog_get(self, num: Union[int, None] = None, **kwargs) -> ResponseT: Get the entries from the slowlog. If ``num`` is specified, get the most recent ``num`` items. - For more information see https://redis.io/commands/slowlog-get + For more information see https://valkey.io/commands/slowlog-get """ - from redis.client import NEVER_DECODE + from valkey.client import NEVER_DECODE args = ["SLOWLOG GET"] if num is not None: @@ -1314,7 +1314,7 @@ def slowlog_len(self, **kwargs) -> ResponseT: """ Get the number of items in the slowlog - For more information see https://redis.io/commands/slowlog-len + For more information see https://valkey.io/commands/slowlog-len """ return self.execute_command("SLOWLOG LEN", **kwargs) @@ -1322,7 +1322,7 @@ def slowlog_reset(self, **kwargs) -> ResponseT: """ Remove all items in the slowlog - For more information see https://redis.io/commands/slowlog-reset + For more information see https://valkey.io/commands/slowlog-reset """ return self.execute_command("SLOWLOG RESET", **kwargs) @@ -1331,18 +1331,18 @@ def time(self, **kwargs) -> ResponseT: Returns the server time as a 2-item tuple of ints: (seconds since epoch, microseconds into this second). - For more information see https://redis.io/commands/time + For more information see https://valkey.io/commands/time """ return self.execute_command("TIME", **kwargs) def wait(self, num_replicas: int, timeout: int, **kwargs) -> ResponseT: """ - Redis synchronous replication + Valkey synchronous replication That returns the number of replicas that processed the query when we finally have at least ``num_replicas``, or when the ``timeout`` was reached. - For more information see https://redis.io/commands/wait + For more information see https://valkey.io/commands/wait """ return self.execute_command("WAIT", num_replicas, timeout, **kwargs) @@ -1352,10 +1352,10 @@ def waitaof( """ This command blocks the current client until all previous write commands by that client are acknowledged as having been fsynced - to the AOF of the local Redis and/or at least the specified number + to the AOF of the local Valkey and/or at least the specified number of replicas. - For more information see https://redis.io/commands/waitaof + For more information see https://valkey.io/commands/waitaof """ return self.execute_command( "WAITAOF", num_local, num_replicas, timeout, **kwargs @@ -1405,13 +1405,13 @@ async def shutdown( abort: bool = False, **kwargs, ) -> None: - """Shutdown the Redis server. If Redis has persistence configured, + """Shutdown the Valkey server. If Valkey has persistence configured, data will be flushed before shutdown. If the "save" option is set, a data flush will be attempted even if there is no persistence configured. If the "nosave" option is set, no data flush will be attempted. The "save" and "nosave" options cannot both be set. - For more information see https://redis.io/commands/shutdown + For more information see https://valkey.io/commands/shutdown """ if save and nosave: raise DataError("SHUTDOWN save and nosave cannot both be set") @@ -1431,7 +1431,7 @@ async def shutdown( except ConnectionError: # a ConnectionError here is expected return - raise RedisError("SHUTDOWN seems to have failed.") + raise ValkeyError("SHUTDOWN seems to have failed.") class BitFieldOperation: @@ -1441,7 +1441,7 @@ class BitFieldOperation: def __init__( self, - client: Union["Redis", "AsyncRedis"], + client: Union["Valkey", "AsyncValkey"], key: str, default_overflow: Union[str, None] = None, ): @@ -1465,7 +1465,7 @@ def overflow(self, overflow: str): """ Update the overflow algorithm of successive INCRBY operations :param overflow: Overflow algorithm, one of WRAP, SAT, FAIL. See the - Redis docs for descriptions of these algorithmsself. + Valkey docs for descriptions of these algorithmsself. :returns: a :py:class:`BitFieldOperation` instance. """ overflow = overflow.upper() @@ -1490,7 +1490,7 @@ def incrby( fmt='u8', offset='#2', the offset will be 16. :param int increment: value to increment the bitfield by. :param str overflow: overflow algorithm. Defaults to WRAP, but other - acceptable values are SAT and FAIL. See the Redis docs for + acceptable values are SAT and FAIL. See the Valkey docs for descriptions of these algorithms. :returns: a :py:class:`BitFieldOperation` instance. """ @@ -1548,7 +1548,7 @@ def execute(self) -> ResponseT: class BasicKeyCommands(CommandsProtocol): """ - Redis basic key-based commands + Valkey basic key-based commands """ def append(self, key: KeyT, value: EncodableT) -> ResponseT: @@ -1557,7 +1557,7 @@ def append(self, key: KeyT, value: EncodableT) -> ResponseT: doesn't already exist, create it with a value of ``value``. Returns the new length of the value at ``key``. - For more information see https://redis.io/commands/append + For more information see https://valkey.io/commands/append """ return self.execute_command("APPEND", key, value) @@ -1572,7 +1572,7 @@ def bitcount( Returns the count of set bits in the value of ``key``. Optional ``start`` and ``end`` parameters indicate which bytes to consider - For more information see https://redis.io/commands/bitcount + For more information see https://valkey.io/commands/bitcount """ params = [key] if start is not None and end is not None: @@ -1585,7 +1585,7 @@ def bitcount( return self.execute_command("BITCOUNT", *params, keys=[key]) def bitfield( - self: Union["Redis", "AsyncRedis"], + self: Union["Valkey", "AsyncValkey"], key: KeyT, default_overflow: Union[str, None] = None, ) -> BitFieldOperation: @@ -1593,12 +1593,12 @@ def bitfield( Return a BitFieldOperation instance to conveniently construct one or more bitfield operations on ``key``. - For more information see https://redis.io/commands/bitfield + For more information see https://valkey.io/commands/bitfield """ return BitFieldOperation(self, key, default_overflow=default_overflow) def bitfield_ro( - self: Union["Redis", "AsyncRedis"], + self: Union["Valkey", "AsyncValkey"], key: KeyT, encoding: str, offset: BitfieldOffsetT, @@ -1611,7 +1611,7 @@ def bitfield_ro( encoding/offset pairs in optional list ``items`` Read-only variant of the BITFIELD command. - For more information see https://redis.io/commands/bitfield_ro + For more information see https://valkey.io/commands/bitfield_ro """ params = [key, "GET", encoding, offset] @@ -1625,7 +1625,7 @@ def bitop(self, operation: str, dest: KeyT, *keys: KeyT) -> ResponseT: Perform a bitwise operation using ``operation`` between ``keys`` and store the result in ``dest``. - For more information see https://redis.io/commands/bitop + For more information see https://valkey.io/commands/bitop """ return self.execute_command("BITOP", operation, dest, *keys) @@ -1643,7 +1643,7 @@ def bitpos( as a range of bytes and not a range of bits, so start=0 and end=2 means to look at the first three bytes. - For more information see https://redis.io/commands/bitpos + For more information see https://valkey.io/commands/bitpos """ if bit not in (0, 1): raise DataError("bit must be 0 or 1") @@ -1671,13 +1671,13 @@ def copy( Copy the value stored in the ``source`` key to the ``destination`` key. ``destination_db`` an alternative destination database. By default, - the ``destination`` key is created in the source Redis database. + the ``destination`` key is created in the source Valkey database. ``replace`` whether the ``destination`` key should be removed before copying the value to it. By default, the value is not copied if the ``destination`` key already exists. - For more information see https://redis.io/commands/copy + For more information see https://valkey.io/commands/copy """ params = [source, destination] if destination_db is not None: @@ -1691,7 +1691,7 @@ def decrby(self, name: KeyT, amount: int = 1) -> ResponseT: Decrements the value of ``key`` by ``amount``. If no key exists, the value will be initialized as 0 - ``amount`` - For more information see https://redis.io/commands/decrby + For more information see https://valkey.io/commands/decrby """ return self.execute_command("DECRBY", name, amount) @@ -1711,9 +1711,9 @@ def dump(self, name: KeyT) -> ResponseT: Return a serialized version of the value stored at the specified key. If key does not exist a nil bulk reply is returned. - For more information see https://redis.io/commands/dump + For more information see https://valkey.io/commands/dump """ - from redis.client import NEVER_DECODE + from valkey.client import NEVER_DECODE options = {} options[NEVER_DECODE] = [] @@ -1723,7 +1723,7 @@ def exists(self, *names: KeyT) -> ResponseT: """ Returns the number of ``names`` that exist - For more information see https://redis.io/commands/exists + For more information see https://valkey.io/commands/exists """ return self.execute_command("EXISTS", *names, keys=names) @@ -1749,7 +1749,7 @@ def expire( GT -> Set expiry only when the new expiry is greater than current one LT -> Set expiry only when the new expiry is less than current one - For more information see https://redis.io/commands/expire + For more information see https://valkey.io/commands/expire """ if isinstance(time, datetime.timedelta): time = int(time.total_seconds()) @@ -1786,7 +1786,7 @@ def expireat( -> GT -- Set expiry only when the new expiry is greater than current one -> LT -- Set expiry only when the new expiry is less than current one - For more information see https://redis.io/commands/expireat + For more information see https://valkey.io/commands/expireat """ if isinstance(when, datetime.datetime): when = int(when.timestamp()) @@ -1808,7 +1808,7 @@ def expiretime(self, key: str) -> int: Returns the absolute Unix timestamp (since January 1, 1970) in seconds at which the given key will expire. - For more information see https://redis.io/commands/expiretime + For more information see https://valkey.io/commands/expiretime """ return self.execute_command("EXPIRETIME", key) @@ -1816,7 +1816,7 @@ def get(self, name: KeyT) -> ResponseT: """ Return the value at key ``name``, or None if the key doesn't exist - For more information see https://redis.io/commands/get + For more information see https://valkey.io/commands/get """ return self.execute_command("GET", name, keys=[name]) @@ -1827,7 +1827,7 @@ def getdel(self, name: KeyT) -> ResponseT: the key on success (if and only if the key's value type is a string). - For more information see https://redis.io/commands/getdel + For more information see https://valkey.io/commands/getdel """ return self.execute_command("GETDEL", name) @@ -1858,7 +1858,7 @@ def getex( ``persist`` remove the time to live associated with ``name``. - For more information see https://redis.io/commands/getex + For more information see https://valkey.io/commands/getex """ opset = {ex, px, exat, pxat} @@ -1910,7 +1910,7 @@ def getbit(self, name: KeyT, offset: int) -> ResponseT: """ Returns an integer indicating the value of ``offset`` in ``name`` - For more information see https://redis.io/commands/getbit + For more information see https://valkey.io/commands/getbit """ return self.execute_command("GETBIT", name, offset, keys=[name]) @@ -1919,7 +1919,7 @@ def getrange(self, key: KeyT, start: int, end: int) -> ResponseT: Returns the substring of the string value stored at ``key``, determined by the offsets ``start`` and ``end`` (both are inclusive) - For more information see https://redis.io/commands/getrange + For more information see https://valkey.io/commands/getrange """ return self.execute_command("GETRANGE", key, start, end, keys=[key]) @@ -1928,10 +1928,10 @@ def getset(self, name: KeyT, value: EncodableT) -> ResponseT: Sets the value at key ``name`` to ``value`` and returns the old value at key ``name`` atomically. - As per Redis 6.2, GETSET is considered deprecated. + As per Valkey 6.2, GETSET is considered deprecated. Please use SET with GET parameter in new code. - For more information see https://redis.io/commands/getset + For more information see https://valkey.io/commands/getset """ return self.execute_command("GETSET", name, value) @@ -1940,7 +1940,7 @@ def incrby(self, name: KeyT, amount: int = 1) -> ResponseT: Increments the value of ``key`` by ``amount``. If no key exists, the value will be initialized as ``amount`` - For more information see https://redis.io/commands/incrby + For more information see https://valkey.io/commands/incrby """ return self.execute_command("INCRBY", name, amount) @@ -1951,7 +1951,7 @@ def incrbyfloat(self, name: KeyT, amount: float = 1.0) -> ResponseT: Increments the value at key ``name`` by floating ``amount``. If no key exists, the value will be initialized as ``amount`` - For more information see https://redis.io/commands/incrbyfloat + For more information see https://valkey.io/commands/incrbyfloat """ return self.execute_command("INCRBYFLOAT", name, amount) @@ -1959,7 +1959,7 @@ def keys(self, pattern: PatternT = "*", **kwargs) -> ResponseT: """ Returns a list of keys matching ``pattern`` - For more information see https://redis.io/commands/keys + For more information see https://valkey.io/commands/keys """ return self.execute_command("KEYS", pattern, **kwargs) @@ -1971,7 +1971,7 @@ def lmove( pushing it as the first/last element on the destination list. Returns the element being popped and pushed. - For more information see https://redis.io/commands/lmove + For more information see https://valkey.io/commands/lmove """ params = [first_list, second_list, src, dest] return self.execute_command("LMOVE", *params) @@ -1987,7 +1987,7 @@ def blmove( """ Blocking version of lmove. - For more information see https://redis.io/commands/blmove + For more information see https://valkey.io/commands/blmove """ params = [first_list, second_list, src, dest, timeout] return self.execute_command("BLMOVE", *params) @@ -1996,9 +1996,9 @@ def mget(self, keys: KeysT, *args: EncodableT) -> ResponseT: """ Returns a list of values ordered identically to ``keys`` - For more information see https://redis.io/commands/mget + For more information see https://valkey.io/commands/mget """ - from redis.client import EMPTY_RESPONSE + from valkey.client import EMPTY_RESPONSE args = list_or_args(keys, args) options = {} @@ -2013,7 +2013,7 @@ def mset(self, mapping: Mapping[AnyKeyT, EncodableT]) -> ResponseT: key/value pairs. Both keys and values should be strings or types that can be cast to a string via str(). - For more information see https://redis.io/commands/mset + For more information see https://valkey.io/commands/mset """ items = [] for pair in mapping.items(): @@ -2027,7 +2027,7 @@ def msetnx(self, mapping: Mapping[AnyKeyT, EncodableT]) -> ResponseT: should be strings or types that can be cast to a string via str(). Returns a boolean indicating if the operation was successful. - For more information see https://redis.io/commands/msetnx + For more information see https://valkey.io/commands/msetnx """ items = [] for pair in mapping.items(): @@ -2036,9 +2036,9 @@ def msetnx(self, mapping: Mapping[AnyKeyT, EncodableT]) -> ResponseT: def move(self, name: KeyT, db: int) -> ResponseT: """ - Moves the key ``name`` to a different Redis database ``db`` + Moves the key ``name`` to a different Valkey database ``db`` - For more information see https://redis.io/commands/move + For more information see https://valkey.io/commands/move """ return self.execute_command("MOVE", name, db) @@ -2046,7 +2046,7 @@ def persist(self, name: KeyT) -> ResponseT: """ Removes an expiration on ``name`` - For more information see https://redis.io/commands/persist + For more information see https://valkey.io/commands/persist """ return self.execute_command("PERSIST", name) @@ -2070,7 +2070,7 @@ def pexpire( GT -> Set expiry only when the new expiry is greater than current one LT -> Set expiry only when the new expiry is less than current one - For more information see https://redis.io/commands/pexpire + For more information see https://valkey.io/commands/pexpire """ if isinstance(time, datetime.timedelta): time = int(time.total_seconds() * 1000) @@ -2106,7 +2106,7 @@ def pexpireat( GT -> Set expiry only when the new expiry is greater than current one LT -> Set expiry only when the new expiry is less than current one - For more information see https://redis.io/commands/pexpireat + For more information see https://valkey.io/commands/pexpireat """ if isinstance(when, datetime.datetime): when = int(when.timestamp() * 1000) @@ -2126,7 +2126,7 @@ def pexpiretime(self, key: str) -> int: Returns the absolute Unix timestamp (since January 1, 1970) in milliseconds at which the given key will expire. - For more information see https://redis.io/commands/pexpiretime + For more information see https://valkey.io/commands/pexpiretime """ return self.execute_command("PEXPIRETIME", key) @@ -2136,7 +2136,7 @@ def psetex(self, name: KeyT, time_ms: ExpiryT, value: EncodableT): milliseconds. ``time_ms`` can be represented by an integer or a Python timedelta object - For more information see https://redis.io/commands/psetex + For more information see https://valkey.io/commands/psetex """ if isinstance(time_ms, datetime.timedelta): time_ms = int(time_ms.total_seconds() * 1000) @@ -2146,7 +2146,7 @@ def pttl(self, name: KeyT) -> ResponseT: """ Returns the number of milliseconds until the key ``name`` will expire - For more information see https://redis.io/commands/pttl + For more information see https://valkey.io/commands/pttl """ return self.execute_command("PTTL", name) @@ -2164,7 +2164,7 @@ def hrandfield( withvalues: The optional WITHVALUES modifier changes the reply so it includes the respective values of the randomly selected hash fields. - For more information see https://redis.io/commands/hrandfield + For more information see https://valkey.io/commands/hrandfield """ params = [] if count is not None: @@ -2178,7 +2178,7 @@ def randomkey(self, **kwargs) -> ResponseT: """ Returns the name of a random key - For more information see https://redis.io/commands/randomkey + For more information see https://valkey.io/commands/randomkey """ return self.execute_command("RANDOMKEY", **kwargs) @@ -2186,7 +2186,7 @@ def rename(self, src: KeyT, dst: KeyT) -> ResponseT: """ Rename key ``src`` to ``dst`` - For more information see https://redis.io/commands/rename + For more information see https://valkey.io/commands/rename """ return self.execute_command("RENAME", src, dst) @@ -2194,7 +2194,7 @@ def renamenx(self, src: KeyT, dst: KeyT): """ Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist - For more information see https://redis.io/commands/renamenx + For more information see https://valkey.io/commands/renamenx """ return self.execute_command("RENAMENX", src, dst) @@ -2216,7 +2216,7 @@ def restore( it's not specified an error is raised on collision. ``absttl`` if True, specified ``ttl`` should represent an absolute Unix - timestamp in milliseconds in which the key will expire. (Redis 5.0 or + timestamp in milliseconds in which the key will expire. (Valkey 5.0 or greater). ``idletime`` Used for eviction, this is the number of seconds the @@ -2225,7 +2225,7 @@ def restore( ``frequency`` Used for eviction, this is the frequency counter of the object stored at the key, prior to execution. - For more information see https://redis.io/commands/restore + For more information see https://valkey.io/commands/restore """ params = [name, ttl, value] if replace: @@ -2275,11 +2275,11 @@ def set( if it already exists. ``keepttl`` if True, retain the time to live associated with the key. - (Available since Redis 6.0) + (Available since Valkey 6.0) ``get`` if True, set the value at key ``name`` to ``value`` and return the old value stored at key, or None if the key did not exist. - (Available since Redis 6.2) + (Available since Valkey 6.2) ``exat`` sets an expire flag on key ``name`` for ``ex`` seconds, specified in unix time. @@ -2287,7 +2287,7 @@ def set( ``pxat`` sets an expire flag on key ``name`` for ``ex`` milliseconds, specified in unix time. - For more information see https://redis.io/commands/set + For more information see https://valkey.io/commands/set """ pieces: list[EncodableT] = [name, value] options = {} @@ -2341,7 +2341,7 @@ def setbit(self, name: KeyT, offset: int, value: int) -> ResponseT: Flag the ``offset`` in ``name`` as ``value``. Returns an integer indicating the previous value of ``offset``. - For more information see https://redis.io/commands/setbit + For more information see https://valkey.io/commands/setbit """ value = value and 1 or 0 return self.execute_command("SETBIT", name, offset, value) @@ -2352,7 +2352,7 @@ def setex(self, name: KeyT, time: ExpiryT, value: EncodableT) -> ResponseT: seconds. ``time`` can be represented by an integer or a Python timedelta object. - For more information see https://redis.io/commands/setex + For more information see https://valkey.io/commands/setex """ if isinstance(time, datetime.timedelta): time = int(time.total_seconds()) @@ -2362,7 +2362,7 @@ def setnx(self, name: KeyT, value: EncodableT) -> ResponseT: """ Set the value of key ``name`` to ``value`` if key doesn't exist - For more information see https://redis.io/commands/setnx + For more information see https://valkey.io/commands/setnx """ return self.execute_command("SETNX", name, value) @@ -2377,7 +2377,7 @@ def setrange(self, name: KeyT, offset: int, value: EncodableT) -> ResponseT: Returns the length of the new string. - For more information see https://redis.io/commands/setrange + For more information see https://valkey.io/commands/setrange """ return self.execute_command("SETRANGE", name, offset, value) @@ -2410,7 +2410,7 @@ def stralgo( ``withmatchlen`` Returns the matches with the len of the match. Can be provided only when ``idx`` set to True. - For more information see https://redis.io/commands/stralgo + For more information see https://valkey.io/commands/stralgo """ # check validity supported_algo = ["LCS"] @@ -2449,7 +2449,7 @@ def strlen(self, name: KeyT) -> ResponseT: """ Return the number of bytes stored in the value of ``name`` - For more information see https://redis.io/commands/strlen + For more information see https://valkey.io/commands/strlen """ return self.execute_command("STRLEN", name, keys=[name]) @@ -2465,7 +2465,7 @@ def touch(self, *args: KeyT) -> ResponseT: Alters the last access time of a key(s) ``*args``. A key is ignored if it does not exist. - For more information see https://redis.io/commands/touch + For more information see https://valkey.io/commands/touch """ return self.execute_command("TOUCH", *args) @@ -2473,7 +2473,7 @@ def ttl(self, name: KeyT) -> ResponseT: """ Returns the number of seconds until the key ``name`` will expire - For more information see https://redis.io/commands/ttl + For more information see https://valkey.io/commands/ttl """ return self.execute_command("TTL", name) @@ -2481,7 +2481,7 @@ def type(self, name: KeyT) -> ResponseT: """ Returns the type of key ``name`` - For more information see https://redis.io/commands/type + For more information see https://valkey.io/commands/type """ return self.execute_command("TYPE", name, keys=[name]) @@ -2489,7 +2489,7 @@ def watch(self, *names: KeyT) -> None: """ Watches the values at keys ``names``, or None if the key doesn't exist - For more information see https://redis.io/commands/watch + For more information see https://valkey.io/commands/watch """ warnings.warn(DeprecationWarning("Call WATCH from a Pipeline object")) @@ -2497,7 +2497,7 @@ def unwatch(self) -> None: """ Unwatches the value at key ``name``, or None of the key doesn't exist - For more information see https://redis.io/commands/unwatch + For more information see https://valkey.io/commands/unwatch """ warnings.warn(DeprecationWarning("Call UNWATCH from a Pipeline object")) @@ -2505,7 +2505,7 @@ def unlink(self, *names: KeyT) -> ResponseT: """ Unlink one or more keys specified by ``names`` - For more information see https://redis.io/commands/unlink + For more information see https://valkey.io/commands/unlink """ return self.execute_command("UNLINK", *names) @@ -2525,7 +2525,7 @@ def lcs( ``minmatchlen`` restrict the list of matches to the ones of the given ``minmatchlen``. If ``withmatchlen`` the length of the match also will be returned. - For more information see https://redis.io/commands/lcs + For more information see https://valkey.io/commands/lcs """ pieces = [key1, key2] if len: @@ -2541,16 +2541,16 @@ def lcs( class AsyncBasicKeyCommands(BasicKeyCommands): def __delitem__(self, name: KeyT): - raise TypeError("Async Redis client does not support class deletion") + raise TypeError("Async Valkey client does not support class deletion") def __contains__(self, name: KeyT): - raise TypeError("Async Redis client does not support class inclusion") + raise TypeError("Async Valkey client does not support class inclusion") def __getitem__(self, name: KeyT): - raise TypeError("Async Redis client does not support class retrieval") + raise TypeError("Async Valkey client does not support class retrieval") def __setitem__(self, name: KeyT, value: EncodableT): - raise TypeError("Async Redis client does not support class assignment") + raise TypeError("Async Valkey client does not support class assignment") async def watch(self, *names: KeyT) -> None: return super().watch(*names) @@ -2561,8 +2561,8 @@ async def unwatch(self) -> None: class ListCommands(CommandsProtocol): """ - Redis commands for List data type. - see: https://redis.io/topics/data-types#lists + Valkey commands for List data type. + see: https://valkey.io/topics/data-types#lists """ def blpop( @@ -2578,7 +2578,7 @@ def blpop( If timeout is 0, then block indefinitely. - For more information see https://redis.io/commands/blpop + For more information see https://valkey.io/commands/blpop """ if timeout is None: timeout = 0 @@ -2599,7 +2599,7 @@ def brpop( If timeout is 0, then block indefinitely. - For more information see https://redis.io/commands/brpop + For more information see https://valkey.io/commands/brpop """ if timeout is None: timeout = 0 @@ -2618,7 +2618,7 @@ def brpoplpush( seconds elapse, whichever is first. A ``timeout`` value of 0 blocks forever. - For more information see https://redis.io/commands/brpoplpush + For more information see https://valkey.io/commands/brpoplpush """ if timeout is None: timeout = 0 @@ -2639,7 +2639,7 @@ def blmpop( When all lists are empty this command blocks the connection until another client pushes to it or until the timeout, timeout of 0 blocks indefinitely - For more information see https://redis.io/commands/blmpop + For more information see https://valkey.io/commands/blmpop """ args = [timeout, numkeys, *args, direction, "COUNT", count] @@ -2656,7 +2656,7 @@ def lmpop( Pop ``count`` values (default 1) first non-empty list key from the list of args provided key names. - For more information see https://redis.io/commands/lmpop + For more information see https://valkey.io/commands/lmpop """ args = [num_keys] + list(args) + [direction] if count != 1: @@ -2673,7 +2673,7 @@ def lindex( Negative indexes are supported and will return an item at the end of the list - For more information see https://redis.io/commands/lindex + For more information see https://valkey.io/commands/lindex """ return self.execute_command("LINDEX", name, index, keys=[name]) @@ -2687,7 +2687,7 @@ def linsert( Returns the new length of the list on success or -1 if ``refvalue`` is not in the list. - For more information see https://redis.io/commands/linsert + For more information see https://valkey.io/commands/linsert """ return self.execute_command("LINSERT", name, where, refvalue, value) @@ -2695,7 +2695,7 @@ def llen(self, name: str) -> Union[Awaitable[int], int]: """ Return the length of the list ``name`` - For more information see https://redis.io/commands/llen + For more information see https://valkey.io/commands/llen """ return self.execute_command("LLEN", name, keys=[name]) @@ -2711,7 +2711,7 @@ def lpop( the list. When provided with the optional ``count`` argument, the reply will consist of up to count elements, depending on the list's length. - For more information see https://redis.io/commands/lpop + For more information see https://valkey.io/commands/lpop """ if count is not None: return self.execute_command("LPOP", name, count) @@ -2722,7 +2722,7 @@ def lpush(self, name: str, *values: FieldT) -> Union[Awaitable[int], int]: """ Push ``values`` onto the head of the list ``name`` - For more information see https://redis.io/commands/lpush + For more information see https://valkey.io/commands/lpush """ return self.execute_command("LPUSH", name, *values) @@ -2730,7 +2730,7 @@ def lpushx(self, name: str, *values: FieldT) -> Union[Awaitable[int], int]: """ Push ``value`` onto the head of the list ``name`` if ``name`` exists - For more information see https://redis.io/commands/lpushx + For more information see https://valkey.io/commands/lpushx """ return self.execute_command("LPUSHX", name, *values) @@ -2742,7 +2742,7 @@ def lrange(self, name: str, start: int, end: int) -> Union[Awaitable[list], list ``start`` and ``end`` can be negative numbers just like Python slicing notation - For more information see https://redis.io/commands/lrange + For more information see https://valkey.io/commands/lrange """ return self.execute_command("LRANGE", name, start, end, keys=[name]) @@ -2756,7 +2756,7 @@ def lrem(self, name: str, count: int, value: str) -> Union[Awaitable[int], int]: count < 0: Remove elements equal to value moving from tail to head. count = 0: Remove all elements equal to value. - For more information see https://redis.io/commands/lrem + For more information see https://valkey.io/commands/lrem """ return self.execute_command("LREM", name, count, value) @@ -2764,7 +2764,7 @@ def lset(self, name: str, index: int, value: str) -> Union[Awaitable[str], str]: """ Set element at ``index`` of list ``name`` to ``value`` - For more information see https://redis.io/commands/lset + For more information see https://valkey.io/commands/lset """ return self.execute_command("LSET", name, index, value) @@ -2776,7 +2776,7 @@ def ltrim(self, name: str, start: int, end: int) -> Union[Awaitable[str], str]: ``start`` and ``end`` can be negative numbers just like Python slicing notation - For more information see https://redis.io/commands/ltrim + For more information see https://valkey.io/commands/ltrim """ return self.execute_command("LTRIM", name, start, end) @@ -2792,7 +2792,7 @@ def rpop( When provided with the optional ``count`` argument, the reply will consist of up to count elements, depending on the list's length. - For more information see https://redis.io/commands/rpop + For more information see https://valkey.io/commands/rpop """ if count is not None: return self.execute_command("RPOP", name, count) @@ -2804,7 +2804,7 @@ def rpoplpush(self, src: str, dst: str) -> Union[Awaitable[str], str]: RPOP a value off of the ``src`` list and atomically LPUSH it on to the ``dst`` list. Returns the value. - For more information see https://redis.io/commands/rpoplpush + For more information see https://valkey.io/commands/rpoplpush """ return self.execute_command("RPOPLPUSH", src, dst) @@ -2812,7 +2812,7 @@ def rpush(self, name: str, *values: FieldT) -> Union[Awaitable[int], int]: """ Push ``values`` onto the tail of the list ``name`` - For more information see https://redis.io/commands/rpush + For more information see https://valkey.io/commands/rpush """ return self.execute_command("RPUSH", name, *values) @@ -2820,7 +2820,7 @@ def rpushx(self, name: str, *values: str) -> Union[Awaitable[int], int]: """ Push ``value`` onto the tail of the list ``name`` if ``name`` exists - For more information see https://redis.io/commands/rpushx + For more information see https://valkey.io/commands/rpushx """ return self.execute_command("RPUSHX", name, *values) @@ -2855,7 +2855,7 @@ def lpos( position(s) of items within the first 1000 entries in the list. A ``maxlen`` of 0 (the default) will scan the entire list. - For more information see https://redis.io/commands/lpos + For more information see https://valkey.io/commands/lpos """ pieces: list[EncodableT] = [name, value] if rank is not None: @@ -2904,7 +2904,7 @@ def sort( elements, sort will return a list of tuples, each containing the values fetched from the arguments to ``get``. - For more information see https://redis.io/commands/sort + For more information see https://valkey.io/commands/sort """ if (start is not None and num is None) or (num is not None and start is None): raise DataError("``start`` and ``num`` must both be specified") @@ -2969,7 +2969,7 @@ def sort_ro( ``alpha`` allows for sorting lexicographically rather than numerically - For more information see https://redis.io/commands/sort_ro + For more information see https://valkey.io/commands/sort_ro """ return self.sort( key, start=start, num=num, by=by, get=get, desc=desc, alpha=alpha @@ -2981,8 +2981,8 @@ def sort_ro( class ScanCommands(CommandsProtocol): """ - Redis SCAN commands. - see: https://redis.io/commands/scan + Valkey SCAN commands. + see: https://valkey.io/commands/scan """ def scan( @@ -2999,15 +2999,15 @@ def scan( ``match`` allows for filtering the keys by pattern - ``count`` provides a hint to Redis about the number of keys to + ``count`` provides a hint to Valkey about the number of keys to return per batch. - ``_type`` filters the returned values by a particular Redis type. - Stock Redis instances allow for the following types: + ``_type`` filters the returned values by a particular Valkey type. + Stock Valkey instances allow for the following types: HASH, LIST, SET, STREAM, STRING, ZSET - Additionally, Redis modules can expose other types as well. + Additionally, Valkey modules can expose other types as well. - For more information see https://redis.io/commands/scan + For more information see https://valkey.io/commands/scan """ pieces: list[EncodableT] = [cursor] if match is not None: @@ -3031,13 +3031,13 @@ def scan_iter( ``match`` allows for filtering the keys by pattern - ``count`` provides a hint to Redis about the number of keys to + ``count`` provides a hint to Valkey about the number of keys to return per batch. - ``_type`` filters the returned values by a particular Redis type. - Stock Redis instances allow for the following types: + ``_type`` filters the returned values by a particular Valkey type. + Stock Valkey instances allow for the following types: HASH, LIST, SET, STREAM, STRING, ZSET - Additionally, Redis modules can expose other types as well. + Additionally, Valkey modules can expose other types as well. """ cursor = "0" while cursor != 0: @@ -3061,7 +3061,7 @@ def sscan( ``count`` allows for hint the minimum number of returns - For more information see https://redis.io/commands/sscan + For more information see https://valkey.io/commands/sscan """ pieces: list[EncodableT] = [name, cursor] if match is not None: @@ -3107,7 +3107,7 @@ def hscan( ``no_values`` indicates to return only the keys, without values. - For more information see https://redis.io/commands/hscan + For more information see https://valkey.io/commands/hscan """ pieces: list[EncodableT] = [name, cursor] if match is not None: @@ -3163,7 +3163,7 @@ def zscan( ``score_cast_func`` a callable used to cast the score return value - For more information see https://redis.io/commands/zscan + For more information see https://valkey.io/commands/zscan """ pieces = [name, cursor] if match is not None: @@ -3216,13 +3216,13 @@ async def scan_iter( ``match`` allows for filtering the keys by pattern - ``count`` provides a hint to Redis about the number of keys to + ``count`` provides a hint to Valkey about the number of keys to return per batch. - ``_type`` filters the returned values by a particular Redis type. - Stock Redis instances allow for the following types: + ``_type`` filters the returned values by a particular Valkey type. + Stock Valkey instances allow for the following types: HASH, LIST, SET, STREAM, STRING, ZSET - Additionally, Redis modules can expose other types as well. + Additionally, Valkey modules can expose other types as well. """ cursor = "0" while cursor != 0: @@ -3315,15 +3315,15 @@ async def zscan_iter( class SetCommands(CommandsProtocol): """ - Redis commands for Set data type. - see: https://redis.io/topics/data-types#sets + Valkey commands for Set data type. + see: https://valkey.io/topics/data-types#sets """ def sadd(self, name: str, *values: FieldT) -> Union[Awaitable[int], int]: """ Add ``value(s)`` to set ``name`` - For more information see https://redis.io/commands/sadd + For more information see https://valkey.io/commands/sadd """ return self.execute_command("SADD", name, *values) @@ -3331,7 +3331,7 @@ def scard(self, name: str) -> Union[Awaitable[int], int]: """ Return the number of elements in set ``name`` - For more information see https://redis.io/commands/scard + For more information see https://valkey.io/commands/scard """ return self.execute_command("SCARD", name, keys=[name]) @@ -3339,7 +3339,7 @@ def sdiff(self, keys: List, *args: List) -> Union[Awaitable[list], list]: """ Return the difference of sets specified by ``keys`` - For more information see https://redis.io/commands/sdiff + For more information see https://valkey.io/commands/sdiff """ args = list_or_args(keys, args) return self.execute_command("SDIFF", *args, keys=args) @@ -3351,7 +3351,7 @@ def sdiffstore( Store the difference of sets specified by ``keys`` into a new set named ``dest``. Returns the number of keys in the new set. - For more information see https://redis.io/commands/sdiffstore + For more information see https://valkey.io/commands/sdiffstore """ args = list_or_args(keys, args) return self.execute_command("SDIFFSTORE", dest, *args) @@ -3360,7 +3360,7 @@ def sinter(self, keys: List, *args: List) -> Union[Awaitable[list], list]: """ Return the intersection of sets specified by ``keys`` - For more information see https://redis.io/commands/sinter + For more information see https://valkey.io/commands/sinter """ args = list_or_args(keys, args) return self.execute_command("SINTER", *args, keys=args) @@ -3375,7 +3375,7 @@ def sintercard( cardinality reaches limit partway through the computation, the algorithm will exit and yield limit as the cardinality - For more information see https://redis.io/commands/sintercard + For more information see https://valkey.io/commands/sintercard """ args = [numkeys, *keys, "LIMIT", limit] return self.execute_command("SINTERCARD", *args, keys=keys) @@ -3387,7 +3387,7 @@ def sinterstore( Store the intersection of sets specified by ``keys`` into a new set named ``dest``. Returns the number of keys in the new set. - For more information see https://redis.io/commands/sinterstore + For more information see https://valkey.io/commands/sinterstore """ args = list_or_args(keys, args) return self.execute_command("SINTERSTORE", dest, *args) @@ -3400,7 +3400,7 @@ def sismember( - 1 if the value is a member of the set. - 0 if the value is not a member of the set or if key does not exist. - For more information see https://redis.io/commands/sismember + For more information see https://valkey.io/commands/sismember """ return self.execute_command("SISMEMBER", name, value, keys=[name]) @@ -3408,7 +3408,7 @@ def smembers(self, name: str) -> Union[Awaitable[Set], Set]: """ Return all members of the set ``name`` - For more information see https://redis.io/commands/smembers + For more information see https://valkey.io/commands/smembers """ return self.execute_command("SMEMBERS", name, keys=[name]) @@ -3422,7 +3422,7 @@ def smismember(self, name: str, values: List, *args: List) -> Union[ - 1 if the value is a member of the set. - 0 if the value is not a member of the set or if key does not exist. - For more information see https://redis.io/commands/smismember + For more information see https://valkey.io/commands/smismember """ args = list_or_args(values, args) return self.execute_command("SMISMEMBER", name, *args, keys=[name]) @@ -3431,7 +3431,7 @@ def smove(self, src: str, dst: str, value: str) -> Union[Awaitable[bool], bool]: """ Move ``value`` from set ``src`` to set ``dst`` atomically - For more information see https://redis.io/commands/smove + For more information see https://valkey.io/commands/smove """ return self.execute_command("SMOVE", src, dst, value) @@ -3439,7 +3439,7 @@ def spop(self, name: str, count: Optional[int] = None) -> Union[str, List, None] """ Remove and return a random member of set ``name`` - For more information see https://redis.io/commands/spop + For more information see https://valkey.io/commands/spop """ args = (count is not None) and [count] or [] return self.execute_command("SPOP", name, *args) @@ -3452,9 +3452,9 @@ def srandmember( If ``number`` is supplied, returns a list of ``number`` random members of set ``name``. Note this is only available when running - Redis 2.6+. + Valkey 2.6+. - For more information see https://redis.io/commands/srandmember + For more information see https://valkey.io/commands/srandmember """ args = (number is not None) and [number] or [] return self.execute_command("SRANDMEMBER", name, *args) @@ -3463,7 +3463,7 @@ def srem(self, name: str, *values: FieldT) -> Union[Awaitable[int], int]: """ Remove ``values`` from set ``name`` - For more information see https://redis.io/commands/srem + For more information see https://valkey.io/commands/srem """ return self.execute_command("SREM", name, *values) @@ -3471,7 +3471,7 @@ def sunion(self, keys: List, *args: List) -> Union[Awaitable[List], List]: """ Return the union of sets specified by ``keys`` - For more information see https://redis.io/commands/sunion + For more information see https://valkey.io/commands/sunion """ args = list_or_args(keys, args) return self.execute_command("SUNION", *args, keys=args) @@ -3483,7 +3483,7 @@ def sunionstore( Store the union of sets specified by ``keys`` into a new set named ``dest``. Returns the number of keys in the new set. - For more information see https://redis.io/commands/sunionstore + For more information see https://valkey.io/commands/sunionstore """ args = list_or_args(keys, args) return self.execute_command("SUNIONSTORE", dest, *args) @@ -3494,8 +3494,8 @@ def sunionstore( class StreamCommands(CommandsProtocol): """ - Redis commands for Stream data type. - see: https://redis.io/topics/streams-intro + Valkey commands for Stream data type. + see: https://valkey.io/topics/streams-intro """ def xack(self, name: KeyT, groupname: GroupT, *ids: StreamIdT) -> ResponseT: @@ -3505,7 +3505,7 @@ def xack(self, name: KeyT, groupname: GroupT, *ids: StreamIdT) -> ResponseT: groupname: name of the consumer group. *ids: message ids to acknowledge. - For more information see https://redis.io/commands/xack + For more information see https://valkey.io/commands/xack """ return self.execute_command("XACK", name, groupname, *ids) @@ -3533,7 +3533,7 @@ def xadd( Can't be specified with maxlen. limit: specifies the maximum number of entries to retrieve - For more information see https://redis.io/commands/xadd + For more information see https://valkey.io/commands/xadd """ pieces: list[EncodableT] = [] if maxlen is not None and minid is not None: @@ -3588,7 +3588,7 @@ def xautoclaim( justid: optional boolean, false by default. Return just an array of IDs of messages successfully claimed, without returning the actual message - For more information see https://redis.io/commands/xautoclaim + For more information see https://valkey.io/commands/xautoclaim """ try: if int(min_idle_time) < 0: @@ -3658,7 +3658,7 @@ def xclaim( justid: optional boolean, false by default. Return just an array of IDs of messages successfully claimed, without returning the actual message - For more information see https://redis.io/commands/xclaim + For more information see https://valkey.io/commands/xclaim """ if not isinstance(min_idle_time, int) or min_idle_time < 0: raise DataError("XCLAIM min_idle_time must be a non negative integer") @@ -3702,7 +3702,7 @@ def xdel(self, name: KeyT, *ids: StreamIdT) -> ResponseT: name: name of the stream. *ids: message ids to delete. - For more information see https://redis.io/commands/xdel + For more information see https://valkey.io/commands/xdel """ return self.execute_command("XDEL", name, *ids) @@ -3720,7 +3720,7 @@ def xgroup_create( groupname: name of the consumer group. id: ID of the last item in the stream to consider already delivered. - For more information see https://redis.io/commands/xgroup-create + For more information see https://valkey.io/commands/xgroup-create """ pieces: list[EncodableT] = ["XGROUP CREATE", name, groupname, id] if mkstream: @@ -3741,7 +3741,7 @@ def xgroup_delconsumer( groupname: name of the consumer group. consumername: name of consumer to delete - For more information see https://redis.io/commands/xgroup-delconsumer + For more information see https://valkey.io/commands/xgroup-delconsumer """ return self.execute_command("XGROUP DELCONSUMER", name, groupname, consumername) @@ -3751,7 +3751,7 @@ def xgroup_destroy(self, name: KeyT, groupname: GroupT) -> ResponseT: name: name of the stream. groupname: name of the consumer group. - For more information see https://redis.io/commands/xgroup-destroy + For more information see https://valkey.io/commands/xgroup-destroy """ return self.execute_command("XGROUP DESTROY", name, groupname) @@ -3766,7 +3766,7 @@ def xgroup_createconsumer( groupname: name of the consumer group. consumername: name of consumer to create. - See: https://redis.io/commands/xgroup-createconsumer + See: https://valkey.io/commands/xgroup-createconsumer """ return self.execute_command( "XGROUP CREATECONSUMER", name, groupname, consumername @@ -3785,7 +3785,7 @@ def xgroup_setid( groupname: name of the consumer group. id: ID of the last item in the stream to consider already delivered. - For more information see https://redis.io/commands/xgroup-setid + For more information see https://valkey.io/commands/xgroup-setid """ pieces = [name, groupname, id] if entries_read is not None: @@ -3798,7 +3798,7 @@ def xinfo_consumers(self, name: KeyT, groupname: GroupT) -> ResponseT: name: name of the stream. groupname: name of the consumer group. - For more information see https://redis.io/commands/xinfo-consumers + For more information see https://valkey.io/commands/xinfo-consumers """ return self.execute_command("XINFO CONSUMERS", name, groupname) @@ -3807,7 +3807,7 @@ def xinfo_groups(self, name: KeyT) -> ResponseT: Returns general information about the consumer groups of the stream. name: name of the stream. - For more information see https://redis.io/commands/xinfo-groups + For more information see https://valkey.io/commands/xinfo-groups """ return self.execute_command("XINFO GROUPS", name) @@ -3817,7 +3817,7 @@ def xinfo_stream(self, name: KeyT, full: bool = False) -> ResponseT: name: name of the stream. full: optional boolean, false by default. Return full summary - For more information see https://redis.io/commands/xinfo-stream + For more information see https://valkey.io/commands/xinfo-stream """ pieces = [name] options = {} @@ -3830,7 +3830,7 @@ def xlen(self, name: KeyT) -> ResponseT: """ Returns the number of elements in a given stream. - For more information see https://redis.io/commands/xlen + For more information see https://valkey.io/commands/xlen """ return self.execute_command("XLEN", name, keys=[name]) @@ -3840,7 +3840,7 @@ def xpending(self, name: KeyT, groupname: GroupT) -> ResponseT: name: name of the stream. groupname: name of the consumer group. - For more information see https://redis.io/commands/xpending + For more information see https://valkey.io/commands/xpending """ return self.execute_command("XPENDING", name, groupname, keys=[name]) @@ -3922,7 +3922,7 @@ def xrange( count: if set, only return this many items, beginning with the earliest available. - For more information see https://redis.io/commands/xrange + For more information see https://valkey.io/commands/xrange """ pieces = [min, max] if count is not None: @@ -3950,7 +3950,7 @@ def xread( block: number of milliseconds to wait, if nothing already present. - For more information see https://redis.io/commands/xread + For more information see https://valkey.io/commands/xread """ pieces = [] if block is not None: @@ -3996,7 +3996,7 @@ def xreadgroup( block: number of milliseconds to wait, if nothing already present. noack: do not add messages to the PEL - For more information see https://redis.io/commands/xreadgroup + For more information see https://valkey.io/commands/xreadgroup """ pieces: list[EncodableT] = [b"GROUP", groupname, consumername] if count is not None: @@ -4039,7 +4039,7 @@ def xrevrange( count: if set, only return this many items, beginning with the latest available. - For more information see https://redis.io/commands/xrevrange + For more information see https://valkey.io/commands/xrevrange """ pieces: list[EncodableT] = [max, min] if count is not None: @@ -4068,7 +4068,7 @@ def xtrim( Can't be specified with maxlen. limit: specifies the maximum number of entries to retrieve - For more information see https://redis.io/commands/xtrim + For more information see https://valkey.io/commands/xtrim """ pieces: list[EncodableT] = [] if maxlen is not None and minid is not None: @@ -4099,8 +4099,8 @@ def xtrim( class SortedSetCommands(CommandsProtocol): """ - Redis commands for Sorted Sets data type. - see: https://redis.io/topics/data-types-intro#redis-sorted-sets + Valkey commands for Sorted Sets data type. + see: https://valkey.io/topics/data-types-intro#valkey-sorted-sets """ def zadd( @@ -4145,7 +4145,7 @@ def zadd( ``NX``, ``LT``, and ``GT`` are mutually exclusive options. - See: https://redis.io/commands/ZADD + See: https://valkey.io/commands/zadd """ if not mapping: raise DataError("ZADD requires at least one element/score pair") @@ -4185,7 +4185,7 @@ def zcard(self, name: KeyT) -> ResponseT: """ Return the number of elements in the sorted set ``name`` - For more information see https://redis.io/commands/zcard + For more information see https://valkey.io/commands/zcard """ return self.execute_command("ZCARD", name, keys=[name]) @@ -4194,7 +4194,7 @@ def zcount(self, name: KeyT, min: ZScoreBoundT, max: ZScoreBoundT) -> ResponseT: Returns the number of elements in the sorted set at key ``name`` with a score between ``min`` and ``max``. - For more information see https://redis.io/commands/zcount + For more information see https://valkey.io/commands/zcount """ return self.execute_command("ZCOUNT", name, min, max, keys=[name]) @@ -4203,7 +4203,7 @@ def zdiff(self, keys: KeysT, withscores: bool = False) -> ResponseT: Returns the difference between the first and all successive input sorted sets provided in ``keys``. - For more information see https://redis.io/commands/zdiff + For more information see https://valkey.io/commands/zdiff """ pieces = [len(keys), *keys] if withscores: @@ -4215,7 +4215,7 @@ def zdiffstore(self, dest: KeyT, keys: KeysT) -> ResponseT: Computes the difference between the first and all successive input sorted sets provided in ``keys`` and stores the result in ``dest``. - For more information see https://redis.io/commands/zdiffstore + For more information see https://valkey.io/commands/zdiffstore """ pieces = [len(keys), *keys] return self.execute_command("ZDIFFSTORE", dest, *pieces) @@ -4224,7 +4224,7 @@ def zincrby(self, name: KeyT, amount: float, value: EncodableT) -> ResponseT: """ Increment the score of ``value`` in sorted set ``name`` by ``amount`` - For more information see https://redis.io/commands/zincrby + For more information see https://valkey.io/commands/zincrby """ return self.execute_command("ZINCRBY", name, amount, value) @@ -4240,7 +4240,7 @@ def zinter( set will contain the minimum or maximum score of an element across the inputs where it exists. - For more information see https://redis.io/commands/zinter + For more information see https://valkey.io/commands/zinter """ return self._zaggregate("ZINTER", None, keys, aggregate, withscores=withscores) @@ -4259,7 +4259,7 @@ def zinterstore( contain the minimum or maximum score of an element across the inputs where it exists. - For more information see https://redis.io/commands/zinterstore + For more information see https://valkey.io/commands/zinterstore """ return self._zaggregate("ZINTERSTORE", dest, keys, aggregate) @@ -4273,7 +4273,7 @@ def zintercard( cardinality reaches limit partway through the computation, the algorithm will exit and yield limit as the cardinality - For more information see https://redis.io/commands/zintercard + For more information see https://valkey.io/commands/zintercard """ args = [numkeys, *keys, "LIMIT", limit] return self.execute_command("ZINTERCARD", *args, keys=keys) @@ -4283,7 +4283,7 @@ def zlexcount(self, name, min, max): Return the number of items in the sorted set ``name`` between the lexicographical range ``min`` and ``max``. - For more information see https://redis.io/commands/zlexcount + For more information see https://valkey.io/commands/zlexcount """ return self.execute_command("ZLEXCOUNT", name, min, max, keys=[name]) @@ -4292,7 +4292,7 @@ def zpopmax(self, name: KeyT, count: Union[int, None] = None) -> ResponseT: Remove and return up to ``count`` members with the highest scores from the sorted set ``name``. - For more information see https://redis.io/commands/zpopmax + For more information see https://valkey.io/commands/zpopmax """ args = (count is not None) and [count] or [] options = {"withscores": True} @@ -4303,7 +4303,7 @@ def zpopmin(self, name: KeyT, count: Union[int, None] = None) -> ResponseT: Remove and return up to ``count`` members with the lowest scores from the sorted set ``name``. - For more information see https://redis.io/commands/zpopmin + For more information see https://valkey.io/commands/zpopmin """ args = (count is not None) and [count] or [] options = {"withscores": True} @@ -4325,7 +4325,7 @@ def zrandmember( includes the respective scores of the randomly selected elements from the sorted set. - For more information see https://redis.io/commands/zrandmember + For more information see https://valkey.io/commands/zrandmember """ params = [] if count is not None: @@ -4346,7 +4346,7 @@ def bzpopmax(self, keys: KeysT, timeout: TimeoutSecT = 0) -> ResponseT: If timeout is 0, then block indefinitely. - For more information see https://redis.io/commands/bzpopmax + For more information see https://valkey.io/commands/bzpopmax """ if timeout is None: timeout = 0 @@ -4365,7 +4365,7 @@ def bzpopmin(self, keys: KeysT, timeout: TimeoutSecT = 0) -> ResponseT: If timeout is 0, then block indefinitely. - For more information see https://redis.io/commands/bzpopmin + For more information see https://valkey.io/commands/bzpopmin """ if timeout is None: timeout = 0 @@ -4384,7 +4384,7 @@ def zmpop( """ Pop ``count`` values (default 1) off of the first non-empty sorted set named in the ``keys`` list. - For more information see https://redis.io/commands/zmpop + For more information see https://valkey.io/commands/zmpop """ args = [num_keys] + keys if (min and max) or (not min and not max): @@ -4417,7 +4417,7 @@ def bzmpop( If timeout is 0, then block indefinitely. - For more information see https://redis.io/commands/bzmpop + For more information see https://valkey.io/commands/bzmpop """ args = [timeout, numkeys, *keys] if (min and max) or (not min and not max): @@ -4510,10 +4510,10 @@ def zrange( ``offset`` and ``num`` are specified, then return a slice of the range. Can't be provided when using ``bylex``. - For more information see https://redis.io/commands/zrange + For more information see https://valkey.io/commands/zrange """ - # Need to support ``desc`` also when using old redis version - # because it was supported in 3.5.3 (of redis-py) + # Need to support ``desc`` also when using old valkey version + # because it was supported in 3.5.3 (of valkey-py) if not byscore and not bylex and (offset is None and num is None) and desc: return self.zrevrange(name, start, end, withscores, score_cast_func) @@ -4551,7 +4551,7 @@ def zrevrange( ``score_cast_func`` a callable used to cast the score return value - For more information see https://redis.io/commands/zrevrange + For more information see https://valkey.io/commands/zrevrange """ pieces = ["ZREVRANGE", name, start, end] if withscores: @@ -4593,7 +4593,7 @@ def zrangestore( ``offset`` and ``num`` are specified, then return a slice of the range. Can't be provided when using ``bylex``. - For more information see https://redis.io/commands/zrangestore + For more information see https://valkey.io/commands/zrangestore """ return self._zrange( "ZRANGESTORE", @@ -4625,7 +4625,7 @@ def zrangebylex( If ``start`` and ``num`` are specified, then return a slice of the range. - For more information see https://redis.io/commands/zrangebylex + For more information see https://valkey.io/commands/zrangebylex """ if (start is not None and num is None) or (num is not None and start is None): raise DataError("``start`` and ``num`` must both be specified") @@ -4649,7 +4649,7 @@ def zrevrangebylex( If ``start`` and ``num`` are specified, then return a slice of the range. - For more information see https://redis.io/commands/zrevrangebylex + For more information see https://valkey.io/commands/zrevrangebylex """ if (start is not None and num is None) or (num is not None and start is None): raise DataError("``start`` and ``num`` must both be specified") @@ -4680,7 +4680,7 @@ def zrangebyscore( `score_cast_func`` a callable used to cast the score return value - For more information see https://redis.io/commands/zrangebyscore + For more information see https://valkey.io/commands/zrangebyscore """ if (start is not None and num is None) or (num is not None and start is None): raise DataError("``start`` and ``num`` must both be specified") @@ -4715,7 +4715,7 @@ def zrevrangebyscore( ``score_cast_func`` a callable used to cast the score return value - For more information see https://redis.io/commands/zrevrangebyscore + For more information see https://valkey.io/commands/zrevrangebyscore """ if (start is not None and num is None) or (num is not None and start is None): raise DataError("``start`` and ``num`` must both be specified") @@ -4740,7 +4740,7 @@ def zrank( The optional WITHSCORE argument supplements the command's reply with the score of the element returned. - For more information see https://redis.io/commands/zrank + For more information see https://valkey.io/commands/zrank """ if withscore: return self.execute_command("ZRANK", name, value, "WITHSCORE", keys=[name]) @@ -4750,7 +4750,7 @@ def zrem(self, name: KeyT, *values: FieldT) -> ResponseT: """ Remove member ``values`` from sorted set ``name`` - For more information see https://redis.io/commands/zrem + For more information see https://valkey.io/commands/zrem """ return self.execute_command("ZREM", name, *values) @@ -4761,7 +4761,7 @@ def zremrangebylex(self, name: KeyT, min: EncodableT, max: EncodableT) -> Respon Returns the number of elements removed. - For more information see https://redis.io/commands/zremrangebylex + For more information see https://valkey.io/commands/zremrangebylex """ return self.execute_command("ZREMRANGEBYLEX", name, min, max) @@ -4772,7 +4772,7 @@ def zremrangebyrank(self, name: KeyT, min: int, max: int) -> ResponseT: to largest. Values can be negative indicating the highest scores. Returns the number of elements removed - For more information see https://redis.io/commands/zremrangebyrank + For more information see https://valkey.io/commands/zremrangebyrank """ return self.execute_command("ZREMRANGEBYRANK", name, min, max) @@ -4783,7 +4783,7 @@ def zremrangebyscore( Remove all elements in the sorted set ``name`` with scores between ``min`` and ``max``. Returns the number of elements removed. - For more information see https://redis.io/commands/zremrangebyscore + For more information see https://valkey.io/commands/zremrangebyscore """ return self.execute_command("ZREMRANGEBYSCORE", name, min, max) @@ -4799,7 +4799,7 @@ def zrevrank( The optional ``withscore`` argument supplements the command's reply with the score of the element returned. - For more information see https://redis.io/commands/zrevrank + For more information see https://valkey.io/commands/zrevrank """ if withscore: return self.execute_command( @@ -4811,7 +4811,7 @@ def zscore(self, name: KeyT, value: EncodableT) -> ResponseT: """ Return the score of element ``value`` in sorted set ``name`` - For more information see https://redis.io/commands/zscore + For more information see https://valkey.io/commands/zscore """ return self.execute_command("ZSCORE", name, value, keys=[name]) @@ -4827,7 +4827,7 @@ def zunion( Scores will be aggregated based on the ``aggregate``, or SUM if none is provided. - For more information see https://redis.io/commands/zunion + For more information see https://valkey.io/commands/zunion """ return self._zaggregate("ZUNION", None, keys, aggregate, withscores=withscores) @@ -4842,7 +4842,7 @@ def zunionstore( a new sorted set, ``dest``. Scores in the destination will be aggregated based on the ``aggregate``, or SUM if none is provided. - For more information see https://redis.io/commands/zunionstore + For more information see https://valkey.io/commands/zunionstore """ return self._zaggregate("ZUNIONSTORE", dest, keys, aggregate) @@ -4855,7 +4855,7 @@ def zmscore(self, key: KeyT, members: List[str]) -> ResponseT: If the member does not exist, a None will be returned in corresponding position. - For more information see https://redis.io/commands/zmscore + For more information see https://valkey.io/commands/zmscore """ if not members: raise DataError("ZMSCORE members must be a non-empty list") @@ -4899,15 +4899,15 @@ def _zaggregate( class HyperlogCommands(CommandsProtocol): """ - Redis commands of HyperLogLogs data type. - see: https://redis.io/topics/data-types-intro#hyperloglogs + Valkey commands of HyperLogLogs data type. + see: https://valkey.io/topics/data-types-intro#hyperloglogs """ def pfadd(self, name: KeyT, *values: FieldT) -> ResponseT: """ Adds the specified elements to the specified HyperLogLog. - For more information see https://redis.io/commands/pfadd + For more information see https://valkey.io/commands/pfadd """ return self.execute_command("PFADD", name, *values) @@ -4916,7 +4916,7 @@ def pfcount(self, *sources: KeyT) -> ResponseT: Return the approximated cardinality of the set observed by the HyperLogLog at key(s). - For more information see https://redis.io/commands/pfcount + For more information see https://valkey.io/commands/pfcount """ return self.execute_command("PFCOUNT", *sources) @@ -4924,7 +4924,7 @@ def pfmerge(self, dest: KeyT, *sources: KeyT) -> ResponseT: """ Merge N different HyperLogLogs into a single one. - For more information see https://redis.io/commands/pfmerge + For more information see https://valkey.io/commands/pfmerge """ return self.execute_command("PFMERGE", dest, *sources) @@ -4934,15 +4934,15 @@ def pfmerge(self, dest: KeyT, *sources: KeyT) -> ResponseT: class HashCommands(CommandsProtocol): """ - Redis commands for Hash data type. - see: https://redis.io/topics/data-types-intro#redis-hashes + Valkey commands for Hash data type. + see: https://valkey.io/topics/data-types-intro#valkey-hashes """ def hdel(self, name: str, *keys: str) -> Union[Awaitable[int], int]: """ Delete ``keys`` from hash ``name`` - For more information see https://redis.io/commands/hdel + For more information see https://valkey.io/commands/hdel """ return self.execute_command("HDEL", name, *keys) @@ -4950,7 +4950,7 @@ def hexists(self, name: str, key: str) -> Union[Awaitable[bool], bool]: """ Returns a boolean indicating if ``key`` exists within hash ``name`` - For more information see https://redis.io/commands/hexists + For more information see https://valkey.io/commands/hexists """ return self.execute_command("HEXISTS", name, key, keys=[name]) @@ -4960,7 +4960,7 @@ def hget( """ Return the value of ``key`` within the hash ``name`` - For more information see https://redis.io/commands/hget + For more information see https://valkey.io/commands/hget """ return self.execute_command("HGET", name, key, keys=[name]) @@ -4968,7 +4968,7 @@ def hgetall(self, name: str) -> Union[Awaitable[dict], dict]: """ Return a Python dict of the hash's name/value pairs - For more information see https://redis.io/commands/hgetall + For more information see https://valkey.io/commands/hgetall """ return self.execute_command("HGETALL", name, keys=[name]) @@ -4978,7 +4978,7 @@ def hincrby( """ Increment the value of ``key`` in hash ``name`` by ``amount`` - For more information see https://redis.io/commands/hincrby + For more information see https://valkey.io/commands/hincrby """ return self.execute_command("HINCRBY", name, key, amount) @@ -4988,7 +4988,7 @@ def hincrbyfloat( """ Increment the value of ``key`` in hash ``name`` by floating ``amount`` - For more information see https://redis.io/commands/hincrbyfloat + For more information see https://valkey.io/commands/hincrbyfloat """ return self.execute_command("HINCRBYFLOAT", name, key, amount) @@ -4996,7 +4996,7 @@ def hkeys(self, name: str) -> Union[Awaitable[List], List]: """ Return the list of keys within hash ``name`` - For more information see https://redis.io/commands/hkeys + For more information see https://valkey.io/commands/hkeys """ return self.execute_command("HKEYS", name, keys=[name]) @@ -5004,7 +5004,7 @@ def hlen(self, name: str) -> Union[Awaitable[int], int]: """ Return the number of elements in hash ``name`` - For more information see https://redis.io/commands/hlen + For more information see https://valkey.io/commands/hlen """ return self.execute_command("HLEN", name, keys=[name]) @@ -5024,7 +5024,7 @@ def hset( added to hash ``name``. Returns the number of fields that were added. - For more information see https://redis.io/commands/hset + For more information see https://valkey.io/commands/hset """ if key is None and not mapping and not items: raise DataError("'hset' with no key value pairs") @@ -5044,7 +5044,7 @@ def hsetnx(self, name: str, key: str, value: str) -> Union[Awaitable[bool], bool Set ``key`` to ``value`` within hash ``name`` if ``key`` does not exist. Returns 1 if HSETNX created a field, otherwise 0. - For more information see https://redis.io/commands/hsetnx + For more information see https://valkey.io/commands/hsetnx """ return self.execute_command("HSETNX", name, key, value) @@ -5053,7 +5053,7 @@ def hmset(self, name: str, mapping: dict) -> Union[Awaitable[str], str]: Set key to value within hash ``name`` for each corresponding key and value from the ``mapping`` dict. - For more information see https://redis.io/commands/hmset + For more information see https://valkey.io/commands/hmset """ warnings.warn( f"{self.__class__.__name__}.hmset() is deprecated. " @@ -5072,7 +5072,7 @@ def hmget(self, name: str, keys: List, *args: List) -> Union[Awaitable[List], Li """ Returns a list of values ordered identically to ``keys`` - For more information see https://redis.io/commands/hmget + For more information see https://valkey.io/commands/hmget """ args = list_or_args(keys, args) return self.execute_command("HMGET", name, *args, keys=[name]) @@ -5081,7 +5081,7 @@ def hvals(self, name: str) -> Union[Awaitable[List], List]: """ Return the list of values within hash ``name`` - For more information see https://redis.io/commands/hvals + For more information see https://valkey.io/commands/hvals """ return self.execute_command("HVALS", name, keys=[name]) @@ -5090,7 +5090,7 @@ def hstrlen(self, name: str, key: str) -> Union[Awaitable[int], int]: Return the number of bytes stored in the value of ``key`` within hash ``name`` - For more information see https://redis.io/commands/hstrlen + For more information see https://valkey.io/commands/hstrlen """ return self.execute_command("HSTRLEN", name, key, keys=[name]) @@ -5103,7 +5103,7 @@ class Script: An executable Lua script object returned by ``register_script`` """ - def __init__(self, registered_client: "Redis", script: ScriptTextT): + def __init__(self, registered_client: "Valkey", script: ScriptTextT): self.registered_client = registered_client self.script = script # Precalculate and store the SHA1 hex digest of the script. @@ -5123,7 +5123,7 @@ def __call__( self, keys: Union[Sequence[KeyT], None] = None, args: Union[Iterable[EncodableT], None] = None, - client: Union["Redis", None] = None, + client: Union["Valkey", None] = None, ): """Execute the script, passing any required ``args``""" keys = keys or [] @@ -5131,8 +5131,8 @@ def __call__( if client is None: client = self.registered_client args = tuple(keys) + tuple(args) - # make sure the Redis server knows about the script - from redis.client import Pipeline + # make sure the Valkey server knows about the script + from valkey.client import Pipeline if isinstance(client, Pipeline): # Make sure the pipeline can register the script before executing. @@ -5152,7 +5152,7 @@ class AsyncScript: An executable Lua script object returned by ``register_script`` """ - def __init__(self, registered_client: "AsyncRedis", script: ScriptTextT): + def __init__(self, registered_client: "AsyncValkey", script: ScriptTextT): self.registered_client = registered_client self.script = script # Precalculate and store the SHA1 hex digest of the script. @@ -5172,7 +5172,7 @@ async def __call__( self, keys: Union[Sequence[KeyT], None] = None, args: Union[Iterable[EncodableT], None] = None, - client: Union["AsyncRedis", None] = None, + client: Union["AsyncValkey", None] = None, ): """Execute the script, passing any required ``args``""" keys = keys or [] @@ -5180,8 +5180,8 @@ async def __call__( if client is None: client = self.registered_client args = tuple(keys) + tuple(args) - # make sure the Redis server knows about the script - from redis.asyncio.client import Pipeline + # make sure the Valkey server knows about the script + from valkey.asyncio.client import Pipeline if isinstance(client, Pipeline): # Make sure the pipeline can register the script before executing. @@ -5198,8 +5198,8 @@ async def __call__( class PubSubCommands(CommandsProtocol): """ - Redis PubSub commands. - see https://redis.io/topics/pubsub + Valkey PubSub commands. + see https://valkey.io/topics/pubsub """ def publish(self, channel: ChannelT, message: EncodableT, **kwargs) -> ResponseT: @@ -5207,7 +5207,7 @@ def publish(self, channel: ChannelT, message: EncodableT, **kwargs) -> ResponseT Publish ``message`` on ``channel``. Returns the number of subscribers the message was delivered to. - For more information see https://redis.io/commands/publish + For more information see https://valkey.io/commands/publish """ return self.execute_command("PUBLISH", channel, message, **kwargs) @@ -5216,7 +5216,7 @@ def spublish(self, shard_channel: ChannelT, message: EncodableT) -> ResponseT: Posts a message to the given shard channel. Returns the number of clients that received the message - For more information see https://redis.io/commands/spublish + For more information see https://valkey.io/commands/spublish """ return self.execute_command("SPUBLISH", shard_channel, message) @@ -5224,7 +5224,7 @@ def pubsub_channels(self, pattern: PatternT = "*", **kwargs) -> ResponseT: """ Return a list of channels that have at least one subscriber - For more information see https://redis.io/commands/pubsub-channels + For more information see https://valkey.io/commands/pubsub-channels """ return self.execute_command("PUBSUB CHANNELS", pattern, **kwargs) @@ -5232,7 +5232,7 @@ def pubsub_shardchannels(self, pattern: PatternT = "*", **kwargs) -> ResponseT: """ Return a list of shard_channels that have at least one subscriber - For more information see https://redis.io/commands/pubsub-shardchannels + For more information see https://valkey.io/commands/pubsub-shardchannels """ return self.execute_command("PUBSUB SHARDCHANNELS", pattern, **kwargs) @@ -5240,7 +5240,7 @@ def pubsub_numpat(self, **kwargs) -> ResponseT: """ Returns the number of subscriptions to patterns - For more information see https://redis.io/commands/pubsub-numpat + For more information see https://valkey.io/commands/pubsub-numpat """ return self.execute_command("PUBSUB NUMPAT", **kwargs) @@ -5249,7 +5249,7 @@ def pubsub_numsub(self, *args: ChannelT, **kwargs) -> ResponseT: Return a list of (channel, number of subscribers) tuples for each channel given in ``*args`` - For more information see https://redis.io/commands/pubsub-numsub + For more information see https://valkey.io/commands/pubsub-numsub """ return self.execute_command("PUBSUB NUMSUB", *args, **kwargs) @@ -5258,7 +5258,7 @@ def pubsub_shardnumsub(self, *args: ChannelT, **kwargs) -> ResponseT: Return a list of (shard_channel, number of subscribers) tuples for each channel given in ``*args`` - For more information see https://redis.io/commands/pubsub-shardnumsub + For more information see https://valkey.io/commands/pubsub-shardnumsub """ return self.execute_command("PUBSUB SHARDNUMSUB", *args, **kwargs) @@ -5268,8 +5268,8 @@ def pubsub_shardnumsub(self, *args: ChannelT, **kwargs) -> ResponseT: class ScriptCommands(CommandsProtocol): """ - Redis Lua script commands. see: - https://redis.com/ebook/part-3-next-steps/chapter-11-scripting-redis-with-lua/ + Valkey Lua script commands. see: + https://valkey.com/ebook/part-3-next-steps/chapter-11-scripting-valkey-with-lua/ """ def _eval( @@ -5286,9 +5286,9 @@ def eval( Returns the result of the script. In practice, use the object returned by ``register_script``. This - function exists purely for Redis API completion. + function exists purely for Valkey API completion. - For more information see https://redis.io/commands/eval + For more information see https://valkey.io/commands/eval """ return self._eval("EVAL", script, numkeys, *keys_and_args) @@ -5302,7 +5302,7 @@ def eval_ro( will touch and the key names and argument values in ``keys_and_args``. Returns the result of the script. - For more information see https://redis.io/commands/eval_ro + For more information see https://valkey.io/commands/eval_ro """ return self._eval("EVAL_RO", script, numkeys, *keys_and_args) @@ -5321,9 +5321,9 @@ def evalsha( of the script. In practice, use the object returned by ``register_script``. This - function exists purely for Redis API completion. + function exists purely for Valkey API completion. - For more information see https://redis.io/commands/evalsha + For more information see https://valkey.io/commands/evalsha """ return self._evalsha("EVALSHA", sha, numkeys, *keys_and_args) @@ -5338,7 +5338,7 @@ def evalsha_ro( key names and argument values in ``keys_and_args``. Returns the result of the script. - For more information see https://redis.io/commands/evalsha_ro + For more information see https://valkey.io/commands/evalsha_ro """ return self._evalsha("EVALSHA_RO", sha, numkeys, *keys_and_args) @@ -5348,7 +5348,7 @@ def script_exists(self, *args: str) -> ResponseT: each script as ``args``. Returns a list of boolean values indicating if if each already script exists in the cache. - For more information see https://redis.io/commands/script-exists + For more information see https://valkey.io/commands/script-exists """ return self.execute_command("SCRIPT EXISTS", *args) @@ -5365,15 +5365,15 @@ def script_flush( ``sync_type`` is by default SYNC (synchronous) but it can also be ASYNC. - For more information see https://redis.io/commands/script-flush + For more information see https://valkey.io/commands/script-flush """ - # Redis pre 6 had no sync_type. + # Valkey pre 6 had no sync_type. if sync_type not in ["SYNC", "ASYNC", None]: raise DataError( - "SCRIPT FLUSH defaults to SYNC in redis > 6.2, or " + "SCRIPT FLUSH defaults to SYNC in valkey > 6.2, or " "accepts SYNC/ASYNC. For older versions, " - "of redis leave as None." + "of valkey leave as None." ) if sync_type is None: pieces = [] @@ -5385,7 +5385,7 @@ def script_kill(self) -> ResponseT: """ Kill the currently executing Lua script - For more information see https://redis.io/commands/script-kill + For more information see https://valkey.io/commands/script-kill """ return self.execute_command("SCRIPT KILL") @@ -5393,11 +5393,11 @@ def script_load(self, script: ScriptTextT) -> ResponseT: """ Load a Lua ``script`` into the script cache. Returns the SHA. - For more information see https://redis.io/commands/script-load + For more information see https://valkey.io/commands/script-load """ return self.execute_command("SCRIPT LOAD", script) - def register_script(self: "Redis", script: ScriptTextT) -> Script: + def register_script(self: "Valkey", script: ScriptTextT) -> Script: """ Register a Lua ``script`` specifying the ``keys`` it will touch. Returns a Script object that is callable and hides the complexity of @@ -5411,7 +5411,7 @@ class AsyncScriptCommands(ScriptCommands): async def script_debug(self, *args) -> None: return super().script_debug() - def register_script(self: "AsyncRedis", script: ScriptTextT) -> AsyncScript: + def register_script(self: "AsyncValkey", script: ScriptTextT) -> AsyncScript: """ Register a Lua ``script`` specifying the ``keys`` it will touch. Returns a Script object that is callable and hides the complexity of @@ -5423,8 +5423,8 @@ def register_script(self: "AsyncRedis", script: ScriptTextT) -> AsyncScript: class GeoCommands(CommandsProtocol): """ - Redis Geospatial commands. - see: https://redis.com/redis-best-practices/indexing-patterns/geospatial/ + Valkey Geospatial commands. + see: https://valkey.com/valkey-best-practices/indexing-patterns/geospatial/ """ def geoadd( @@ -5453,7 +5453,7 @@ def geoadd( Changed elements include new elements that were added and elements whose scores changed. - For more information see https://redis.io/commands/geoadd + For more information see https://valkey.io/commands/geoadd """ if nx and xx: raise DataError("GEOADD allows either 'nx' or 'xx', not both") @@ -5478,7 +5478,7 @@ def geodist( The units must be one of the following : m, km mi, ft. By default meters are used. - For more information see https://redis.io/commands/geodist + For more information see https://valkey.io/commands/geodist """ pieces: list[EncodableT] = [name, place1, place2] if unit and unit not in ("m", "km", "mi", "ft"): @@ -5492,7 +5492,7 @@ def geohash(self, name: KeyT, *values: FieldT) -> ResponseT: Return the geo hash string for each item of ``values`` members of the specified key identified by the ``name`` argument. - For more information see https://redis.io/commands/geohash + For more information see https://valkey.io/commands/geohash """ return self.execute_command("GEOHASH", name, *values, keys=[name]) @@ -5502,7 +5502,7 @@ def geopos(self, name: KeyT, *values: FieldT) -> ResponseT: the specified key identified by the ``name`` argument. Each position is represented by the pairs lon and lat. - For more information see https://redis.io/commands/geopos + For more information see https://valkey.io/commands/geopos """ return self.execute_command("GEOPOS", name, *values, keys=[name]) @@ -5550,7 +5550,7 @@ def georadius( named with a specific key, instead of ``store`` the sorted set destination score is set with the distance. - For more information see https://redis.io/commands/georadius + For more information see https://valkey.io/commands/georadius """ return self._georadiusgeneric( "GEORADIUS", @@ -5590,7 +5590,7 @@ def georadiusbymember( and latitude value, it takes the name of a member already existing inside the geospatial index represented by the sorted set. - For more information see https://redis.io/commands/georadiusbymember + For more information see https://valkey.io/commands/georadiusbymember """ return self._georadiusgeneric( "GEORADIUSBYMEMBER", @@ -5712,7 +5712,7 @@ def geosearch( ``withhash`` indicates to return the geohash string of each place. - For more information see https://redis.io/commands/geosearch + For more information see https://valkey.io/commands/geosearch """ return self._geosearchgeneric( @@ -5749,7 +5749,7 @@ def geosearchstore( sort: Union[str, None] = None, count: Union[int, None] = None, any: bool = False, - storedist: bool = False, + stovalkeyt: bool = False, ) -> ResponseT: """ This command is like GEOSEARCH, but stores the result in @@ -5759,7 +5759,7 @@ def geosearchstore( items in a sorted set populated with their distance from the center of the circle or box, as a floating-point number. - For more information see https://redis.io/commands/geosearchstore + For more information see https://valkey.io/commands/geosearchstore """ return self._geosearchgeneric( "GEOSEARCHSTORE", @@ -5779,7 +5779,7 @@ def geosearchstore( withdist=None, withhash=None, store=None, - store_dist=storedist, + store_dist=stovalkeyt, ) def _geosearchgeneric( @@ -5854,8 +5854,8 @@ def _geosearchgeneric( class ModuleCommands(CommandsProtocol): """ - Redis Module commands. - see: https://redis.io/topics/modules-intro + Valkey Module commands. + see: https://valkey.io/topics/modules-intro """ def module_load(self, path, *args) -> ResponseT: @@ -5864,7 +5864,7 @@ def module_load(self, path, *args) -> ResponseT: Passes all ``*args`` to the module, during loading. Raises ``ModuleError`` if a module is not found at ``path``. - For more information see https://redis.io/commands/module-load + For more information see https://valkey.io/commands/module-load """ return self.execute_command("MODULE LOAD", path, *args) @@ -5877,7 +5877,7 @@ def module_loadex( """ Loads a module from a dynamic library at runtime with configuration directives. - For more information see https://redis.io/commands/module-loadex + For more information see https://valkey.io/commands/module-loadex """ pieces = [] if options is not None: @@ -5894,7 +5894,7 @@ def module_unload(self, name) -> ResponseT: Unloads the module ``name``. Raises ``ModuleError`` if ``name`` is not in loaded modules. - For more information see https://redis.io/commands/module-unload + For more information see https://valkey.io/commands/module-unload """ return self.execute_command("MODULE UNLOAD", name) @@ -5903,7 +5903,7 @@ def module_list(self) -> ResponseT: Returns a list of dictionaries containing the name and version of all loaded modules. - For more information see https://redis.io/commands/module-list + For more information see https://valkey.io/commands/module-list """ return self.execute_command("MODULE LIST") @@ -5944,8 +5944,8 @@ def __call__(self, keys=[], args=[], client=None): if client is None: client = self.registered_client args = tuple(keys) + tuple(args) - # make sure the Redis server knows about the script - from redis.client import Pipeline + # make sure the Valkey server knows about the script + from valkey.client import Pipeline if isinstance(client, Pipeline): # Make sure the pipeline can register the script before executing. @@ -5967,7 +5967,7 @@ def get_encoder(self): # DEPRECATED # In version <=4.1.2, this was the code we used to get the encoder. # However, after 4.1.2 we added support for scripting in clustered - # redis. ClusteredRedis doesn't have a `.connection_pool` attribute + # valkey. ClusteredValkey doesn't have a `.connection_pool` attribute # so we changed the Script class to use # `self.registered_client.get_encoder` (see above). # However, that is technically a breaking change, as consumers who @@ -5985,7 +5985,7 @@ async def command_info(self) -> None: class ClusterCommands(CommandsProtocol): """ - Class for Redis Cluster commands + Class for Valkey Cluster commands """ def cluster(self, cluster_arg, *args, **kwargs) -> ResponseT: @@ -5993,17 +5993,17 @@ def cluster(self, cluster_arg, *args, **kwargs) -> ResponseT: def readwrite(self, **kwargs) -> ResponseT: """ - Disables read queries for a connection to a Redis Cluster slave node. + Disables read queries for a connection to a Valkey Cluster slave node. - For more information see https://redis.io/commands/readwrite + For more information see https://valkey.io/commands/readwrite """ return self.execute_command("READWRITE", **kwargs) def readonly(self, **kwargs) -> ResponseT: """ - Enables read queries for a connection to a Redis Cluster replica node. + Enables read queries for a connection to a Valkey Cluster replica node. - For more information see https://redis.io/commands/readonly + For more information see https://valkey.io/commands/readonly """ return self.execute_command("READONLY", **kwargs) @@ -6013,21 +6013,21 @@ def readonly(self, **kwargs) -> ResponseT: class FunctionCommands: """ - Redis Function commands + Valkey Function commands """ def function_load( self, code: str, replace: Optional[bool] = False ) -> Union[Awaitable[str], str]: """ - Load a library to Redis. + Load a library to Valkey. :param code: the source code (must start with Shebang statement that provides a metadata about the library) :param replace: changes the behavior to overwrite the existing library with the new contents. Return the library name that was loaded. - For more information see https://redis.io/commands/function-load + For more information see https://valkey.io/commands/function-load """ pieces = ["REPLACE"] if replace else [] pieces.append(code) @@ -6037,7 +6037,7 @@ def function_delete(self, library: str) -> Union[Awaitable[str], str]: """ Delete the library called ``library`` and all its functions. - For more information see https://redis.io/commands/function-delete + For more information see https://valkey.io/commands/function-delete """ return self.execute_command("FUNCTION DELETE", library) @@ -6045,7 +6045,7 @@ def function_flush(self, mode: str = "SYNC") -> Union[Awaitable[str], str]: """ Deletes all the libraries. - For more information see https://redis.io/commands/function-flush + For more information see https://valkey.io/commands/function-flush """ return self.execute_command("FUNCTION FLUSH", mode) @@ -6074,7 +6074,7 @@ def fcall( """ Invoke a function. - For more information see https://redis.io/commands/fcall + For more information see https://valkey.io/commands/fcall """ return self._fcall("FCALL", function, numkeys, *keys_and_args) @@ -6085,7 +6085,7 @@ def fcall_ro( This is a read-only variant of the FCALL command that cannot execute commands that modify data. - For more information see https://redis.io/commands/fcal_ro + For more information see https://valkey.io/commands/fcal_ro """ return self._fcall("FCALL_RO", function, numkeys, *keys_and_args) @@ -6093,9 +6093,9 @@ def function_dump(self) -> Union[Awaitable[str], str]: """ Return the serialized payload of loaded libraries. - For more information see https://redis.io/commands/function-dump + For more information see https://valkey.io/commands/function-dump """ - from redis.client import NEVER_DECODE + from valkey.client import NEVER_DECODE options = {} options[NEVER_DECODE] = [] @@ -6110,7 +6110,7 @@ def function_restore( You can use the optional policy argument to provide a policy for handling existing libraries. - For more information see https://redis.io/commands/function-restore + For more information see https://valkey.io/commands/function-restore """ return self.execute_command("FUNCTION RESTORE", payload, policy) @@ -6118,7 +6118,7 @@ def function_kill(self) -> Union[Awaitable[str], str]: """ Kill a function that is currently executing. - For more information see https://redis.io/commands/function-kill + For more information see https://valkey.io/commands/function-kill """ return self.execute_command("FUNCTION KILL") @@ -6127,7 +6127,7 @@ def function_stats(self) -> Union[Awaitable[List], List]: Return information about the function that's currently running and information about the available execution engines. - For more information see https://redis.io/commands/function-stats + For more information see https://valkey.io/commands/function-stats """ return self.execute_command("FUNCTION STATS") @@ -6150,7 +6150,7 @@ def tfunction_load( ``replace`` - an optional argument, instructs RedisGears to replace the function if its already exists - For more information see https://redis.io/commands/tfunction-load/ + For more information see https://valkey.io/commands/tfunction-load/ """ pieces = [] if replace: @@ -6166,7 +6166,7 @@ def tfunction_delete(self, lib_name: str) -> ResponseT: ``lib_name`` the library name to delete. - For more information see https://redis.io/commands/tfunction-delete/ + For more information see https://valkey.io/commands/tfunction-delete/ """ return self.execute_command("TFUNCTION DELETE", lib_name) @@ -6183,7 +6183,7 @@ def tfunction_list( ``verbose`` output verbosity level, higher number will increase verbosity level ``lib_name`` specifying a library name (can be used multiple times to show multiple libraries in a single command) # noqa - For more information see https://redis.io/commands/tfunction-list/ + For more information see https://valkey.io/commands/tfunction-list/ """ pieces = [] if with_code: @@ -6233,7 +6233,7 @@ def tfcall( ``keys`` - the keys that will be touched by the function. ``args`` - Additional argument to pass to the function. - For more information see https://redis.io/commands/tfcall/ + For more information see https://valkey.io/commands/tfcall/ """ return self._tfcall(lib_name, func_name, keys, False, *args) @@ -6252,7 +6252,7 @@ def tfcall_async( ``keys`` - the keys that will be touched by the function. ``args`` - Additional argument to pass to the function. - For more information see https://redis.io/commands/tfcall/ + For more information see https://valkey.io/commands/tfcall/ """ return self._tfcall(lib_name, func_name, keys, True, *args) @@ -6272,8 +6272,8 @@ class DataAccessCommands( SortedSetCommands, ): """ - A class containing all of the implemented data access redis commands. - This class is to be used as a mixin for synchronous Redis clients. + A class containing all of the implemented data access valkey commands. + This class is to be used as a mixin for synchronous Valkey clients. """ @@ -6289,8 +6289,8 @@ class AsyncDataAccessCommands( AsyncSortedSetCommands, ): """ - A class containing all of the implemented data access redis commands. - This class is to be used as a mixin for asynchronous Redis clients. + A class containing all of the implemented data access valkey commands. + This class is to be used as a mixin for asynchronous Valkey clients. """ @@ -6306,8 +6306,8 @@ class CoreCommands( GearsCommands, ): """ - A class containing all of the implemented redis commands. This class is - to be used as a mixin for synchronous Redis clients. + A class containing all of the implemented valkey commands. This class is + to be used as a mixin for synchronous Valkey clients. """ @@ -6323,6 +6323,6 @@ class AsyncCoreCommands( AsyncGearsCommands, ): """ - A class containing all of the implemented redis commands. This class is - to be used as a mixin for asynchronous Redis clients. + A class containing all of the implemented valkey commands. This class is + to be used as a mixin for asynchronous Valkey clients. """ diff --git a/redis/commands/graph/__init__.py b/valkey/commands/graph/__init__.py similarity index 98% rename from redis/commands/graph/__init__.py rename to valkey/commands/graph/__init__.py index ffaf1fb4..d41a7df4 100644 --- a/redis/commands/graph/__init__.py +++ b/valkey/commands/graph/__init__.py @@ -22,8 +22,8 @@ def __init__(self, client, name=random_string()): """ warnings.warn( DeprecationWarning( - "RedisGraph support is deprecated as of Redis Stack 7.2 \ - (https://redis.com/blog/redisgraph-eol/)" + "RedisGraph support is deprecated as of Valkey Stack 7.2 \ + (https://valkey.com/blog/valkeygraph-eol/)" ) ) self.NAME = name # Graph key diff --git a/redis/commands/graph/commands.py b/valkey/commands/graph/commands.py similarity index 90% rename from redis/commands/graph/commands.py rename to valkey/commands/graph/commands.py index 762ab42e..d50f5950 100644 --- a/redis/commands/graph/commands.py +++ b/valkey/commands/graph/commands.py @@ -1,5 +1,5 @@ -from redis import DataError -from redis.exceptions import ResponseError +from valkey import DataError +from valkey.exceptions import ResponseError from .exceptions import VersionMismatchException from .execution_plan import ExecutionPlan @@ -40,7 +40,7 @@ def commit(self): def query(self, q, params=None, timeout=None, read_only=False, profile=False): """ Executes a query against the graph. - For more information see `GRAPH.QUERY `_. # noqa + For more information see `GRAPH.QUERY `_. # noqa Args: @@ -107,12 +107,12 @@ def merge(self, pattern): def delete(self): """ Deletes graph. - For more information see `DELETE `_. # noqa + For more information see `DELETE `_. # noqa """ self._clear_schema() return self.execute_command(DELETE_CMD, self.name) - # declared here, to override the built in redis.db.flush() + # declared here, to override the built in valkey.db.flush() def flush(self): """ Commit the graph and reset the edges and the nodes to zero length. @@ -125,7 +125,7 @@ def bulk(self, **kwargs): """Internal only. Not supported.""" raise NotImplementedError( "GRAPH.BULK is internal only. " - "Use https://github.com/redisgraph/redisgraph-bulk-loader." + "Use https://github.com/valkeygraph/valkeygraph-bulk-loader." ) def profile(self, query): @@ -134,7 +134,7 @@ def profile(self, query): for each operation's execution. Return a string representation of a query execution plan, with details on results produced by and time spent in each operation. - For more information see `GRAPH.PROFILE `_. # noqa + For more information see `GRAPH.PROFILE `_. # noqa """ return self.query(query, profile=True) @@ -142,7 +142,7 @@ def slowlog(self): """ Get a list containing up to 10 of the slowest queries issued against the given graph ID. - For more information see `GRAPH.SLOWLOG `_. # noqa + For more information see `GRAPH.SLOWLOG `_. # noqa Each item in the list has the following structure: 1. A unix timestamp at which the log entry was processed. @@ -155,7 +155,7 @@ def slowlog(self): def config(self, name, value=None, set=False): """ Retrieve or update a RedisGraph configuration. - For more information see `https://redis.io/commands/graph.config-get/>`_. # noqa + For more information see `https://valkey.io/commands/graph.config-get/>`_. # noqa Args: @@ -179,7 +179,7 @@ def config(self, name, value=None, set=False): def list_keys(self): """ Lists all graph keys in the keyspace. - For more information see `GRAPH.LIST `_. # noqa + For more information see `GRAPH.LIST `_. # noqa """ return self.execute_command(LIST_CMD) @@ -203,7 +203,7 @@ def explain(self, query, params=None): """ Get the execution plan for given query, GRAPH.EXPLAIN returns ExecutionPlan object. - For more information see `GRAPH.EXPLAIN `_. # noqa + For more information see `GRAPH.EXPLAIN `_. # noqa Args: query: the query that will be executed @@ -219,7 +219,7 @@ class AsyncGraphCommands(GraphCommands): async def query(self, q, params=None, timeout=None, read_only=False, profile=False): """ Executes a query against the graph. - For more information see `GRAPH.QUERY `_. # noqa + For more information see `GRAPH.QUERY `_. # noqa Args: diff --git a/redis/commands/graph/edge.py b/valkey/commands/graph/edge.py similarity index 100% rename from redis/commands/graph/edge.py rename to valkey/commands/graph/edge.py diff --git a/redis/commands/graph/exceptions.py b/valkey/commands/graph/exceptions.py similarity index 100% rename from redis/commands/graph/exceptions.py rename to valkey/commands/graph/exceptions.py diff --git a/redis/commands/graph/execution_plan.py b/valkey/commands/graph/execution_plan.py similarity index 100% rename from redis/commands/graph/execution_plan.py rename to valkey/commands/graph/execution_plan.py diff --git a/redis/commands/graph/node.py b/valkey/commands/graph/node.py similarity index 100% rename from redis/commands/graph/node.py rename to valkey/commands/graph/node.py diff --git a/redis/commands/graph/path.py b/valkey/commands/graph/path.py similarity index 100% rename from redis/commands/graph/path.py rename to valkey/commands/graph/path.py diff --git a/redis/commands/graph/query_result.py b/valkey/commands/graph/query_result.py similarity index 99% rename from redis/commands/graph/query_result.py rename to valkey/commands/graph/query_result.py index 7c7f58b9..07f4f728 100644 --- a/redis/commands/graph/query_result.py +++ b/valkey/commands/graph/query_result.py @@ -3,7 +3,7 @@ from distutils.util import strtobool # from prettytable import PrettyTable -from redis import ResponseError +from valkey import ResponseError from .edge import Edge from .exceptions import VersionMismatchException diff --git a/redis/commands/helpers.py b/valkey/commands/helpers.py similarity index 95% rename from redis/commands/helpers.py rename to valkey/commands/helpers.py index 127141f6..5e8d028f 100644 --- a/redis/commands/helpers.py +++ b/valkey/commands/helpers.py @@ -3,8 +3,8 @@ import string from typing import List, Tuple -import redis -from redis.typing import KeysT, KeyT +import valkey +from valkey.typing import KeysT, KeyT def list_or_args(keys: KeysT, args: Tuple[KeyT, ...]) -> List[KeyT]: @@ -165,7 +165,7 @@ def stringify_param_value(value): def get_protocol_version(client): - if isinstance(client, redis.Redis) or isinstance(client, redis.asyncio.Redis): + if isinstance(client, valkey.Valkey) or isinstance(client, valkey.asyncio.Valkey): return client.connection_pool.connection_kwargs.get("protocol") - elif isinstance(client, redis.cluster.AbstractRedisCluster): + elif isinstance(client, valkey.cluster.AbstractValkeyCluster): return client.nodes_manager.connection_kwargs.get("protocol") diff --git a/redis/commands/json/__init__.py b/valkey/commands/json/__init__.py similarity index 95% rename from redis/commands/json/__init__.py rename to valkey/commands/json/__init__.py index 01077e6b..7a39fca6 100644 --- a/redis/commands/json/__init__.py +++ b/valkey/commands/json/__init__.py @@ -1,6 +1,6 @@ from json import JSONDecodeError, JSONDecoder, JSONEncoder -import redis +import valkey from ..helpers import get_protocol_version, nativestr from .commands import JSONCommands @@ -107,13 +107,13 @@ def pipeline(self, transaction=True, shard_hint=None): Usage example: - r = redis.Redis() + r = valkey.Valkey() pipe = r.json().pipeline() pipe.jsonset('foo', '.', {'hello!': 'world'}) pipe.jsonget('foo') pipe.jsonget('notakey') """ - if isinstance(self.client, redis.RedisCluster): + if isinstance(self.client, valkey.ValkeyCluster): p = ClusterPipeline( nodes_manager=self.client.nodes_manager, commands_parser=self.client.commands_parser, @@ -139,9 +139,9 @@ def pipeline(self, transaction=True, shard_hint=None): return p -class ClusterPipeline(JSONCommands, redis.cluster.ClusterPipeline): +class ClusterPipeline(JSONCommands, valkey.cluster.ClusterPipeline): """Cluster pipeline for the module.""" -class Pipeline(JSONCommands, redis.client.Pipeline): +class Pipeline(JSONCommands, valkey.client.Pipeline): """Pipeline for the module.""" diff --git a/redis/commands/json/_util.py b/valkey/commands/json/_util.py similarity index 100% rename from redis/commands/json/_util.py rename to valkey/commands/json/_util.py diff --git a/redis/commands/json/commands.py b/valkey/commands/json/commands.py similarity index 86% rename from redis/commands/json/commands.py rename to valkey/commands/json/commands.py index b37dc376..3b7ee09a 100644 --- a/redis/commands/json/commands.py +++ b/valkey/commands/json/commands.py @@ -2,8 +2,8 @@ from json import JSONDecodeError, loads from typing import Dict, List, Optional, Tuple, Union -from redis.exceptions import DataError -from redis.utils import deprecated_function +from valkey.exceptions import DataError +from valkey.utils import deprecated_function from ._util import JsonType from .decoders import decode_dict_keys @@ -19,7 +19,7 @@ def arrappend( """Append the objects ``args`` to the array under the ``path` in key ``name``. - For more information see `JSON.ARRAPPEND `_.. + For more information see `JSON.ARRAPPEND `_.. """ # noqa pieces = [name, str(path)] for o in args: @@ -41,7 +41,7 @@ def arrindex( The search can be limited using the optional inclusive ``start`` and exclusive ``stop`` indices. - For more information see `JSON.ARRINDEX `_. + For more information see `JSON.ARRINDEX `_. """ # noqa pieces = [name, str(path), self._encode(scalar)] if start is not None: @@ -57,7 +57,7 @@ def arrinsert( """Insert the objects ``args`` to the array at index ``index`` under the ``path` in key ``name``. - For more information see `JSON.ARRINSERT `_. + For more information see `JSON.ARRINSERT `_. """ # noqa pieces = [name, str(path), index] for o in args: @@ -70,7 +70,7 @@ def arrlen( """Return the length of the array JSON value under ``path`` at key``name``. - For more information see `JSON.ARRLEN `_. + For more information see `JSON.ARRLEN `_. """ # noqa return self.execute_command("JSON.ARRLEN", name, str(path), keys=[name]) @@ -83,7 +83,7 @@ def arrpop( """Pop the element at ``index`` in the array JSON value under ``path`` at key ``name``. - For more information see `JSON.ARRPOP `_. + For more information see `JSON.ARRPOP `_. """ # noqa return self.execute_command("JSON.ARRPOP", name, str(path), index) @@ -93,21 +93,21 @@ def arrtrim( """Trim the array JSON value under ``path`` at key ``name`` to the inclusive range given by ``start`` and ``stop``. - For more information see `JSON.ARRTRIM `_. + For more information see `JSON.ARRTRIM `_. """ # noqa return self.execute_command("JSON.ARRTRIM", name, str(path), start, stop) def type(self, name: str, path: Optional[str] = Path.root_path()) -> List[str]: """Get the type of the JSON value under ``path`` from key ``name``. - For more information see `JSON.TYPE `_. + For more information see `JSON.TYPE `_. """ # noqa return self.execute_command("JSON.TYPE", name, str(path), keys=[name]) def resp(self, name: str, path: Optional[str] = Path.root_path()) -> List: """Return the JSON value under ``path`` at key ``name``. - For more information see `JSON.RESP `_. + For more information see `JSON.RESP `_. """ # noqa return self.execute_command("JSON.RESP", name, str(path), keys=[name]) @@ -117,7 +117,7 @@ def objkeys( """Return the key names in the dictionary JSON value under ``path`` at key ``name``. - For more information see `JSON.OBJKEYS `_. + For more information see `JSON.OBJKEYS `_. """ # noqa return self.execute_command("JSON.OBJKEYS", name, str(path), keys=[name]) @@ -127,7 +127,7 @@ def objlen( """Return the length of the dictionary JSON value under ``path`` at key ``name``. - For more information see `JSON.OBJLEN `_. + For more information see `JSON.OBJLEN `_. """ # noqa return self.execute_command("JSON.OBJLEN", name, str(path), keys=[name]) @@ -135,7 +135,7 @@ def numincrby(self, name: str, path: str, number: int) -> str: """Increment the numeric (integer or floating point) JSON value under ``path`` at key ``name`` by the provided ``number``. - For more information see `JSON.NUMINCRBY `_. + For more information see `JSON.NUMINCRBY `_. """ # noqa return self.execute_command( "JSON.NUMINCRBY", name, str(path), self._encode(number) @@ -146,7 +146,7 @@ def nummultby(self, name: str, path: str, number: int) -> str: """Multiply the numeric (integer or floating point) JSON value under ``path`` at key ``name`` with the provided ``number``. - For more information see `JSON.NUMMULTBY `_. + For more information see `JSON.NUMMULTBY `_. """ # noqa return self.execute_command( "JSON.NUMMULTBY", name, str(path), self._encode(number) @@ -159,14 +159,14 @@ def clear(self, name: str, path: Optional[str] = Path.root_path()) -> int: Return the count of cleared paths (ignoring non-array and non-objects paths). - For more information see `JSON.CLEAR `_. + For more information see `JSON.CLEAR `_. """ # noqa return self.execute_command("JSON.CLEAR", name, str(path)) def delete(self, key: str, path: Optional[str] = Path.root_path()) -> int: """Delete the JSON value stored at key ``key`` under ``path``. - For more information see `JSON.DEL `_. + For more information see `JSON.DEL `_. """ return self.execute_command("JSON.DEL", key, str(path)) @@ -183,7 +183,7 @@ def get( ```no_escape`` is a boolean flag to add no_escape option to get non-ascii characters - For more information see `JSON.GET `_. + For more information see `JSON.GET `_. """ # noqa pieces = [name] if no_escape: @@ -208,7 +208,7 @@ def mget(self, keys: List[str], path: str) -> List[JsonType]: Get the objects stored as a JSON values under ``path``. ``keys`` is a list of one or more keys. - For more information see `JSON.MGET `_. + For more information see `JSON.MGET `_. """ # noqa pieces = [] pieces += keys @@ -235,7 +235,7 @@ def set( For the purpose of using this within a pipeline, this command is also aliased to JSON.SET. - For more information see `JSON.SET `_. + For more information see `JSON.SET `_. """ if decode_keys: obj = decode_dict_keys(obj) @@ -264,7 +264,7 @@ def mset(self, triplets: List[Tuple[str, str, JsonType]]) -> Optional[str]: For the purpose of using this within a pipeline, this command is also aliased to JSON.MSET. - For more information see `JSON.MSET `_. + For more information see `JSON.MSET `_. """ pieces = [] for triplet in triplets: @@ -285,7 +285,7 @@ def merge( ``decode_keys`` If set to True, the keys of ``obj`` will be decoded with utf-8. - For more information see `JSON.MERGE `_. + For more information see `JSON.MERGE `_. """ if decode_keys: obj = decode_dict_keys(obj) @@ -361,7 +361,7 @@ def strlen(self, name: str, path: Optional[str] = None) -> List[Union[int, None] """Return the length of the string JSON value under ``path`` at key ``name``. - For more information see `JSON.STRLEN `_. + For more information see `JSON.STRLEN `_. """ # noqa pieces = [name] if path is not None: @@ -374,7 +374,7 @@ def toggle( """Toggle boolean value under ``path`` at key ``name``. returning the new value. - For more information see `JSON.TOGGLE `_. + For more information see `JSON.TOGGLE `_. """ # noqa return self.execute_command("JSON.TOGGLE", name, str(path)) @@ -385,7 +385,7 @@ def strappend( the key name, the path is determined to be the first. If a single option is passed, then the root_path (i.e Path.root_path()) is used. - For more information see `JSON.STRAPPEND `_. + For more information see `JSON.STRAPPEND `_. """ # noqa pieces = [name, str(path), self._encode(value)] return self.execute_command("JSON.STRAPPEND", *pieces) @@ -399,7 +399,7 @@ def debug( """Return the memory usage in bytes of a value under ``path`` from key ``name``. - For more information see `JSON.DEBUG `_. + For more information see `JSON.DEBUG `_. """ # noqa valid_subcommands = ["MEMORY", "HELP"] if subcommand not in valid_subcommands: diff --git a/redis/commands/json/decoders.py b/valkey/commands/json/decoders.py similarity index 100% rename from redis/commands/json/decoders.py rename to valkey/commands/json/decoders.py diff --git a/redis/commands/json/path.py b/valkey/commands/json/path.py similarity index 100% rename from redis/commands/json/path.py rename to valkey/commands/json/path.py diff --git a/redis/commands/search/__init__.py b/valkey/commands/search/__init__.py similarity index 97% rename from redis/commands/search/__init__.py rename to valkey/commands/search/__init__.py index a2bb23b7..205a36f1 100644 --- a/redis/commands/search/__init__.py +++ b/valkey/commands/search/__init__.py @@ -1,4 +1,4 @@ -import redis +import valkey from ...asyncio.client import Pipeline as AsyncioPipeline from .commands import ( @@ -92,7 +92,7 @@ def __init__(self, client, index_name="idx"): Create a new Client for the given index_name. The default name is `idx` - If conn is not None, we employ an already existing redis connection + If conn is not None, we employ an already existing valkey connection """ self._MODULE_CALLBACKS = {} self.client = client @@ -181,7 +181,7 @@ def pipeline(self, transaction=True, shard_hint=None): return p -class Pipeline(SearchCommands, redis.client.Pipeline): +class Pipeline(SearchCommands, valkey.client.Pipeline): """Pipeline for the module.""" diff --git a/redis/commands/search/_util.py b/valkey/commands/search/_util.py similarity index 100% rename from redis/commands/search/_util.py rename to valkey/commands/search/_util.py diff --git a/redis/commands/search/aggregation.py b/valkey/commands/search/aggregation.py similarity index 99% rename from redis/commands/search/aggregation.py rename to valkey/commands/search/aggregation.py index 50d18f47..45172380 100644 --- a/redis/commands/search/aggregation.py +++ b/valkey/commands/search/aggregation.py @@ -19,7 +19,7 @@ class Reducer: """ Base reducer object for all reducers. - See the `redisearch.reducers` module for the actual reducers. + See the `valkeyearch.reducers` module for the actual reducers. """ NAME = None diff --git a/redis/commands/search/commands.py b/valkey/commands/search/commands.py similarity index 90% rename from redis/commands/search/commands.py rename to valkey/commands/search/commands.py index 2df2b5a7..f90edaf6 100644 --- a/redis/commands/search/commands.py +++ b/valkey/commands/search/commands.py @@ -2,8 +2,8 @@ import time from typing import Dict, List, Optional, Union -from redis.client import Pipeline -from redis.utils import deprecated_function +from valkey.client import Pipeline +from valkey.utils import deprecated_function from ..helpers import get_protocol_version, parse_to_dict from ._util import to_string @@ -186,7 +186,7 @@ def create_index( in the index. - **skip_initial_scan**: If true, we do not scan and index. - For more information see `FT.CREATE `_. + For more information see `FT.CREATE `_. """ # noqa args = [CREATE_CMD, self.index_name] @@ -214,9 +214,9 @@ def create_index( args.append("SCHEMA") try: - args += list(itertools.chain(*(f.redis_args() for f in fields))) + args += list(itertools.chain(*(f.valkey_args() for f in fields))) except TypeError: - args += fields.redis_args() + args += fields.valkey_args() return self.execute_command(*args) @@ -229,14 +229,14 @@ def alter_schema_add(self, fields: List[str]): - **fields**: a list of Field objects to add for the index - For more information see `FT.ALTER `_. + For more information see `FT.ALTER `_. """ # noqa args = [ALTER_CMD, self.index_name, "SCHEMA", "ADD"] try: - args += list(itertools.chain(*(f.redis_args() for f in fields))) + args += list(itertools.chain(*(f.valkey_args() for f in fields))) except TypeError: - args += fields.redis_args() + args += fields.valkey_args() return self.execute_command(*args) @@ -250,7 +250,7 @@ def dropindex(self, delete_documents: bool = False): - **delete_documents**: If `True`, all documents will be deleted. - For more information see `FT.DROPINDEX `_. + For more information see `FT.DROPINDEX `_. """ # noqa delete_str = "DD" if delete_documents else "" return self.execute_command(DROPINDEX_CMD, self.index_name, delete_str) @@ -318,7 +318,7 @@ def _add_document_hash( return self.execute_command(*args) @deprecated_function( - version="2.0.0", reason="deprecated since redisearch 2.0, call hset instead" + version="2.0.0", reason="deprecated since valkeyearch 2.0, call hset instead" ) def add_document( self, @@ -374,7 +374,7 @@ def add_document( ) @deprecated_function( - version="2.0.0", reason="deprecated since redisearch 2.0, call hset instead" + version="2.0.0", reason="deprecated since valkeyearch 2.0, call hset instead" ) def add_document_hash(self, doc_id, score=1.0, language=None, replace=False): """ @@ -383,7 +383,7 @@ def add_document_hash(self, doc_id, score=1.0, language=None, replace=False): ### Parameters - **doc_id**: the document's id. This has to be an existing HASH key - in Redis that will hold the fields the index needs. + in Valkey that will hold the fields the index needs. - **score**: the document ranking, between 0.0 and 1.0 - **replace**: if True, and the document already is in the index, we perform an update and reindex the document @@ -444,7 +444,7 @@ def info(self): Get info an stats about the the current index, including the number of documents, memory consumption, etc - For more information see `FT.INFO `_. + For more information see `FT.INFO `_. """ res = self.execute_command(INFO_CMD, self.index_name) @@ -494,7 +494,7 @@ def search( default parameters, or a Query object for complex queries. See RediSearch's documentation on query format - For more information see `FT.SEARCH `_. + For more information see `FT.SEARCH `_. """ # noqa args, query = self._mk_query_args(query, query_params=query_params) st = time.time() @@ -514,7 +514,7 @@ def explain( ): """Returns the execution plan for a complex query. - For more information see `FT.EXPLAIN `_. + For more information see `FT.EXPLAIN `_. """ # noqa args, query_text = self._mk_query_args(query, query_params=query_params) return self.execute_command(EXPLAIN_CMD, *args) @@ -537,7 +537,7 @@ def aggregate( An `AggregateResult` object is returned. You can access the rows from its `rows` property, which will always yield the rows of the result. - For more information see `FT.AGGREGATE `_. + For more information see `FT.AGGREGATE `_. """ # noqa if isinstance(query, AggregateRequest): has_cursor = bool(query._cursor) @@ -628,7 +628,7 @@ def spellcheck(self, query, distance=None, include=None, exclude=None): **include**: specifies an inclusion custom dictionary. **exclude**: specifies an exclusion custom dictionary. - For more information see `FT.SPELLCHECK `_. + For more information see `FT.SPELLCHECK `_. """ # noqa cmd = [SPELLCHECK_CMD, self.index_name, query] if distance: @@ -652,7 +652,7 @@ def dict_add(self, name: str, *terms: List[str]): - **name**: Dictionary name. - **terms**: List of items for adding to the dictionary. - For more information see `FT.DICTADD `_. + For more information see `FT.DICTADD `_. """ # noqa cmd = [DICT_ADD_CMD, name] cmd.extend(terms) @@ -666,7 +666,7 @@ def dict_del(self, name: str, *terms: List[str]): - **name**: Dictionary name. - **terms**: List of items for removing from the dictionary. - For more information see `FT.DICTDEL `_. + For more information see `FT.DICTDEL `_. """ # noqa cmd = [DICT_DEL_CMD, name] cmd.extend(terms) @@ -679,7 +679,7 @@ def dict_dump(self, name: str): - **name**: Dictionary name. - For more information see `FT.DICTDUMP `_. + For more information see `FT.DICTDUMP `_. """ # noqa cmd = [DICT_DUMP_CMD, name] return self.execute_command(*cmd) @@ -692,7 +692,7 @@ def config_set(self, option: str, value: str) -> bool: - **option**: the name of the configuration option. - **value**: a value for the configuration option. - For more information see `FT.CONFIG SET `_. + For more information see `FT.CONFIG SET `_. """ # noqa cmd = [CONFIG_CMD, "SET", option, value] raw = self.execute_command(*cmd) @@ -705,7 +705,7 @@ def config_get(self, option: str) -> str: - **option**: the name of the configuration option. - For more information see `FT.CONFIG GET `_. + For more information see `FT.CONFIG GET `_. """ # noqa cmd = [CONFIG_CMD, "GET", option] res = self.execute_command(*cmd) @@ -719,7 +719,7 @@ def tagvals(self, tagfield: str): - **tagfield**: Tag field name - For more information see `FT.TAGVALS `_. + For more information see `FT.TAGVALS `_. """ # noqa return self.execute_command(TAGVALS_CMD, self.index_name, tagfield) @@ -732,7 +732,7 @@ def aliasadd(self, alias: str): - **alias**: Name of the alias to create - For more information see `FT.ALIASADD `_. + For more information see `FT.ALIASADD `_. """ # noqa return self.execute_command(ALIAS_ADD_CMD, alias, self.index_name) @@ -745,7 +745,7 @@ def aliasupdate(self, alias: str): - **alias**: Name of the alias to create - For more information see `FT.ALIASUPDATE `_. + For more information see `FT.ALIASUPDATE `_. """ # noqa return self.execute_command(ALIAS_UPDATE_CMD, alias, self.index_name) @@ -758,7 +758,7 @@ def aliasdel(self, alias: str): - **alias**: Name of the alias to delete - For more information see `FT.ALIASDEL `_. + For more information see `FT.ALIASDEL `_. """ # noqa return self.execute_command(ALIAS_DEL_CMD, alias) @@ -769,7 +769,7 @@ def sugadd(self, key, *suggestions, **kwargs): If kwargs["increment"] is true and the terms are already in the server's dictionary, we increment their scores. - For more information see `FT.SUGADD `_. + For more information see `FT.SUGADD `_. """ # noqa # If Transaction is not False it will MULTI/EXEC which will error pipe = self.pipeline(transaction=False) @@ -789,7 +789,7 @@ def suglen(self, key: str) -> int: """ Return the number of entries in the AutoCompleter index. - For more information see `FT.SUGLEN `_. + For more information see `FT.SUGLEN `_. """ # noqa return self.execute_command(SUGLEN_COMMAND, key) @@ -798,7 +798,7 @@ def sugdel(self, key: str, string: str) -> int: Delete a string from the AutoCompleter index. Returns 1 if the string was found and deleted, 0 otherwise. - For more information see `FT.SUGDEL `_. + For more information see `FT.SUGDEL `_. """ # noqa return self.execute_command(SUGDEL_COMMAND, key, string) @@ -840,7 +840,7 @@ def sugget( A list of Suggestion objects. If with_scores was False, the score of all suggestions is 1. - For more information see `FT.SUGGET `_. + For more information see `FT.SUGGET `_. """ # noqa args = [SUGGET_COMMAND, key, prefix, "MAX", num] if fuzzy: @@ -874,7 +874,7 @@ def synupdate(self, groupid: str, skipinitial: bool = False, *terms: List[str]): terms : The terms. - For more information see `FT.SYNUPDATE `_. + For more information see `FT.SYNUPDATE `_. """ # noqa cmd = [SYNUPDATE_CMD, self.index_name, groupid] if skipinitial: @@ -889,7 +889,7 @@ def syndump(self): The command is used to dump the synonyms data structure. Returns a list of synonym terms and their synonym group ids. - For more information see `FT.SYNDUMP `_. + For more information see `FT.SYNDUMP `_. """ # noqa res = self.execute_command(SYNDUMP_CMD, self.index_name) return self._parse_results(SYNDUMP_CMD, res) @@ -901,7 +901,7 @@ async def info(self): Get info an stats about the the current index, including the number of documents, memory consumption, etc - For more information see `FT.INFO `_. + For more information see `FT.INFO `_. """ res = await self.execute_command(INFO_CMD, self.index_name) @@ -921,7 +921,7 @@ async def search( default parameters, or a Query object for complex queries. See RediSearch's documentation on query format - For more information see `FT.SEARCH `_. + For more information see `FT.SEARCH `_. """ # noqa args, query = self._mk_query_args(query, query_params=query_params) st = time.time() @@ -949,7 +949,7 @@ async def aggregate( An `AggregateResult` object is returned. You can access the rows from its `rows` property, which will always yield the rows of the result. - For more information see `FT.AGGREGATE `_. + For more information see `FT.AGGREGATE `_. """ # noqa if isinstance(query, AggregateRequest): has_cursor = bool(query._cursor) @@ -978,7 +978,7 @@ async def spellcheck(self, query, distance=None, include=None, exclude=None): **include**: specifies an inclusion custom dictionary. **exclude**: specifies an exclusion custom dictionary. - For more information see `FT.SPELLCHECK `_. + For more information see `FT.SPELLCHECK `_. """ # noqa cmd = [SPELLCHECK_CMD, self.index_name, query] if distance: @@ -1002,7 +1002,7 @@ async def config_set(self, option: str, value: str) -> bool: - **option**: the name of the configuration option. - **value**: a value for the configuration option. - For more information see `FT.CONFIG SET `_. + For more information see `FT.CONFIG SET `_. """ # noqa cmd = [CONFIG_CMD, "SET", option, value] raw = await self.execute_command(*cmd) @@ -1015,7 +1015,7 @@ async def config_get(self, option: str) -> str: - **option**: the name of the configuration option. - For more information see `FT.CONFIG GET `_. + For more information see `FT.CONFIG GET `_. """ # noqa cmd = [CONFIG_CMD, "GET", option] res = {} @@ -1044,7 +1044,7 @@ async def sugadd(self, key, *suggestions, **kwargs): If kwargs["increment"] is true and the terms are already in the server's dictionary, we increment their scores. - For more information see `FT.SUGADD `_. + For more information see `FT.SUGADD `_. """ # noqa # If Transaction is not False it will MULTI/EXEC which will error pipe = self.pipeline(transaction=False) @@ -1098,7 +1098,7 @@ async def sugget( A list of Suggestion objects. If with_scores was False, the score of all suggestions is 1. - For more information see `FT.SUGGET `_. + For more information see `FT.SUGGET `_. """ # noqa args = [SUGGET_COMMAND, key, prefix, "MAX", num] if fuzzy: diff --git a/redis/commands/search/document.py b/valkey/commands/search/document.py similarity index 100% rename from redis/commands/search/document.py rename to valkey/commands/search/document.py diff --git a/redis/commands/search/field.py b/valkey/commands/search/field.py similarity index 94% rename from redis/commands/search/field.py rename to valkey/commands/search/field.py index f316ed9f..72907ae4 100644 --- a/redis/commands/search/field.py +++ b/valkey/commands/search/field.py @@ -1,6 +1,6 @@ from typing import List -from redis import DataError +from valkey import DataError class Field: @@ -41,7 +41,7 @@ def __init__( def append_arg(self, value): self.args.append(value) - def redis_args(self): + def valkey_args(self): args = [self.name] if self.as_name: args += [self.AS, self.as_name] @@ -119,7 +119,7 @@ def __init__(self, name: str, **kwargs): class TagField(Field): """ TagField is a tag-indexing field with simpler compression and tokenization. - See http://redisearch.io/Tags/ + See http://valkeyearch.io/Tags/ """ SEPARATOR = "SEPARATOR" @@ -145,7 +145,7 @@ def __init__( class VectorField(Field): """ Allows vector similarity queries against the value in this attribute. - See https://oss.redis.com/redisearch/Vectors/#vector_fields. + See https://oss.valkey.com/valkeyearch/Vectors/#vector_fields. """ def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs): @@ -159,7 +159,7 @@ def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs): ``attributes`` each algorithm can have specific attributes. Some of them are mandatory and some of them are optional. See - https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm + https://oss.valkey.com/valkeyearch/master/Vectors/#specific_creation_attributes_per_algorithm for more information. """ sort = kwargs.get("sortable", False) diff --git a/redis/commands/search/indexDefinition.py b/valkey/commands/search/indexDefinition.py similarity index 100% rename from redis/commands/search/indexDefinition.py rename to valkey/commands/search/indexDefinition.py diff --git a/redis/commands/search/query.py b/valkey/commands/search/query.py similarity index 99% rename from redis/commands/search/query.py rename to valkey/commands/search/query.py index 113ddf9d..3bdac876 100644 --- a/redis/commands/search/query.py +++ b/valkey/commands/search/query.py @@ -169,7 +169,7 @@ def scorer(self, scorer: str) -> "Query": return self def get_args(self) -> List[str]: - """Format the redis arguments for this query and return them.""" + """Format the valkey arguments for this query and return them.""" args = [self._query_string] args += self._get_args_tags() args += self._summarize_fields + self._highlight_fields diff --git a/redis/commands/search/querystring.py b/valkey/commands/search/querystring.py similarity index 100% rename from redis/commands/search/querystring.py rename to valkey/commands/search/querystring.py diff --git a/redis/commands/search/reducers.py b/valkey/commands/search/reducers.py similarity index 98% rename from redis/commands/search/reducers.py rename to valkey/commands/search/reducers.py index 8b60f232..694558de 100644 --- a/redis/commands/search/reducers.py +++ b/valkey/commands/search/reducers.py @@ -4,7 +4,7 @@ class FieldOnlyReducer(Reducer): - """See https://redis.io/docs/interact/search-and-query/search/aggregations/""" + """See https://valkey.io/docs/interact/search-and-query/search/aggregations/""" def __init__(self, field: str) -> None: super().__init__(field) diff --git a/redis/commands/search/result.py b/valkey/commands/search/result.py similarity index 100% rename from redis/commands/search/result.py rename to valkey/commands/search/result.py diff --git a/redis/commands/search/suggestion.py b/valkey/commands/search/suggestion.py similarity index 100% rename from redis/commands/search/suggestion.py rename to valkey/commands/search/suggestion.py diff --git a/redis/commands/sentinel.py b/valkey/commands/sentinel.py similarity index 95% rename from redis/commands/sentinel.py rename to valkey/commands/sentinel.py index f7457579..87cb0994 100644 --- a/redis/commands/sentinel.py +++ b/valkey/commands/sentinel.py @@ -3,12 +3,12 @@ class SentinelCommands: """ - A class containing the commands specific to redis sentinel. This class is + A class containing the commands specific to valkey sentinel. This class is to be used as a mixin. """ def sentinel(self, *args): - """Redis Sentinel's SENTINEL command.""" + """Valkey Sentinel's SENTINEL command.""" warnings.warn(DeprecationWarning("Use the individual sentinel_* methods")) def sentinel_get_master_addr_by_name(self, service_name): @@ -95,5 +95,5 @@ def sentinel_flushconfig(self): class AsyncSentinelCommands(SentinelCommands): async def sentinel(self, *args) -> None: - """Redis Sentinel's SENTINEL command.""" + """Valkey Sentinel's SENTINEL command.""" super().sentinel(*args) diff --git a/redis/commands/timeseries/__init__.py b/valkey/commands/timeseries/__init__.py similarity index 89% rename from redis/commands/timeseries/__init__.py rename to valkey/commands/timeseries/__init__.py index 4188b93d..c56ba907 100644 --- a/redis/commands/timeseries/__init__.py +++ b/valkey/commands/timeseries/__init__.py @@ -1,5 +1,5 @@ -import redis -from redis._parsers.helpers import bool_ok +import valkey +from valkey._parsers.helpers import bool_ok from ..helpers import get_protocol_version, parse_to_list from .commands import ( @@ -24,7 +24,7 @@ class TimeSeries(TimeSeriesCommands): """ - This class subclasses redis-py's `Redis` and implements RedisTimeSeries's + This class subclasses valkey-py's `Valkey` and implements RedisTimeSeries's commands (prefixed with "ts"). The client allows to interact with RedisTimeSeries and use all of it's functionality. @@ -70,14 +70,14 @@ def pipeline(self, transaction=True, shard_hint=None): Usage example: - r = redis.Redis() + r = valkey.Valkey() pipe = r.ts().pipeline() for i in range(100): pipeline.add("with_pipeline", i, 1.1 * i) pipeline.execute() """ - if isinstance(self.client, redis.RedisCluster): + if isinstance(self.client, valkey.ValkeyCluster): p = ClusterPipeline( nodes_manager=self.client.nodes_manager, commands_parser=self.client.commands_parser, @@ -100,9 +100,9 @@ def pipeline(self, transaction=True, shard_hint=None): return p -class ClusterPipeline(TimeSeriesCommands, redis.cluster.ClusterPipeline): +class ClusterPipeline(TimeSeriesCommands, valkey.cluster.ClusterPipeline): """Cluster pipeline for the module.""" -class Pipeline(TimeSeriesCommands, redis.client.Pipeline): +class Pipeline(TimeSeriesCommands, valkey.client.Pipeline): """Pipeline for the module.""" diff --git a/redis/commands/timeseries/commands.py b/valkey/commands/timeseries/commands.py similarity index 96% rename from redis/commands/timeseries/commands.py rename to valkey/commands/timeseries/commands.py index 208ddfb0..4e55b1ad 100644 --- a/redis/commands/timeseries/commands.py +++ b/valkey/commands/timeseries/commands.py @@ -1,7 +1,7 @@ from typing import Dict, List, Optional, Tuple, Union -from redis.exceptions import DataError -from redis.typing import KeyT, Number +from valkey.exceptions import DataError +from valkey.typing import KeyT, Number ADD_CMD = "TS.ADD" ALTER_CMD = "TS.ALTER" @@ -63,7 +63,7 @@ def create( the updated value is equal to (previous + new). If no previous sample \ exists, set the updated value equal to the new value. - For more information: https://redis.io/commands/ts.create/ + For more information: https://valkey.io/commands/ts.create/ """ # noqa params = [key] self._append_retention(params, retention_msecs) @@ -110,7 +110,7 @@ def alter( the updated value is equal to (previous + new). If no previous sample \ exists, set the updated value equal to the new value. - For more information: https://redis.io/commands/ts.alter/ + For more information: https://valkey.io/commands/ts.alter/ """ # noqa params = [key] self._append_retention(params, retention_msecs) @@ -164,7 +164,7 @@ def add( the updated value is equal to (previous + new). If no previous sample \ exists, set the updated value equal to the new value. - For more information: https://redis.io/commands/ts.add/ + For more information: https://valkey.io/commands/ts.add/ """ # noqa params = [key, timestamp, value] self._append_retention(params, retention_msecs) @@ -182,7 +182,7 @@ def madd(self, ktv_tuples: List[Tuple[KeyT, Union[int, str], Number]]): Expects a list of `tuples` as (`key`,`timestamp`, `value`). Return value is an array with timestamps of insertions. - For more information: https://redis.io/commands/ts.madd/ + For more information: https://valkey.io/commands/ts.madd/ """ # noqa params = [] for ktv in ktv_tuples: @@ -222,7 +222,7 @@ def incrby( chunk_size: Memory size, in bytes, allocated for each data chunk. - For more information: https://redis.io/commands/ts.incrby/ + For more information: https://valkey.io/commands/ts.incrby/ """ # noqa params = [key, value] self._append_timestamp(params, timestamp) @@ -265,7 +265,7 @@ def decrby( chunk_size: Memory size, in bytes, allocated for each data chunk. - For more information: https://redis.io/commands/ts.decrby/ + For more information: https://valkey.io/commands/ts.decrby/ """ # noqa params = [key, value] self._append_timestamp(params, timestamp) @@ -289,7 +289,7 @@ def delete(self, key: KeyT, from_time: int, to_time: int): to_time: End timestamp for the range deletion. - For more information: https://redis.io/commands/ts.del/ + For more information: https://valkey.io/commands/ts.del/ """ # noqa return self.execute_command(DEL_CMD, key, from_time, to_time) @@ -320,7 +320,7 @@ def createrule( Assure that there is a bucket that starts at exactly align_timestamp and align all other buckets accordingly. - For more information: https://redis.io/commands/ts.createrule/ + For more information: https://valkey.io/commands/ts.createrule/ """ # noqa params = [source_key, dest_key] self._append_aggregation(params, aggregation_type, bucket_size_msec) @@ -333,7 +333,7 @@ def deleterule(self, source_key: KeyT, dest_key: KeyT): """ Delete a compaction rule from `source_key` to `dest_key`.. - For more information: https://redis.io/commands/ts.deleterule/ + For more information: https://valkey.io/commands/ts.deleterule/ """ # noqa return self.execute_command(DELETERULE_CMD, source_key, dest_key) @@ -417,7 +417,7 @@ def range( empty: Reports aggregations for empty buckets. - For more information: https://redis.io/commands/ts.range/ + For more information: https://valkey.io/commands/ts.range/ """ # noqa params = self.__range_params( key, @@ -489,7 +489,7 @@ def revrange( empty: Reports aggregations for empty buckets. - For more information: https://redis.io/commands/ts.revrange/ + For more information: https://valkey.io/commands/ts.revrange/ """ # noqa params = self.__range_params( key, @@ -608,7 +608,7 @@ def mrange( empty: Reports aggregations for empty buckets. - For more information: https://redis.io/commands/ts.mrange/ + For more information: https://valkey.io/commands/ts.mrange/ """ # noqa params = self.__mrange_params( aggregation_type, @@ -696,7 +696,7 @@ def mrevrange( empty: Reports aggregations for empty buckets. - For more information: https://redis.io/commands/ts.mrevrange/ + For more information: https://valkey.io/commands/ts.mrevrange/ """ # noqa params = self.__mrange_params( aggregation_type, @@ -726,7 +726,7 @@ def get(self, key: KeyT, latest: Optional[bool] = False): `latest` used when a time series is a compaction, reports the compacted value of the latest (possibly partial) bucket - For more information: https://redis.io/commands/ts.get/ + For more information: https://valkey.io/commands/ts.get/ """ # noqa params = [key] self._append_latest(params, latest) @@ -755,7 +755,7 @@ def mget( Used when a time series is a compaction, reports the compacted value of the latest possibly partial bucket - For more information: https://redis.io/commands/ts.mget/ + For more information: https://valkey.io/commands/ts.mget/ """ # noqa params = [] self._append_latest(params, latest) @@ -768,7 +768,7 @@ def info(self, key: KeyT): """# noqa Get information of `key`. - For more information: https://redis.io/commands/ts.info/ + For more information: https://valkey.io/commands/ts.info/ """ # noqa return self.execute_command(INFO_CMD, key, keys=[key]) @@ -776,7 +776,7 @@ def queryindex(self, filters: List[str]): """# noqa Get all time series keys matching the `filter` list. - For more information: https://redis.io/commands/ts.queryindex/ + For more information: https://valkey.io/commands/ts.queryindex/ """ # noq return self.execute_command(QUERYINDEX_CMD, *filters) diff --git a/redis/commands/timeseries/info.py b/valkey/commands/timeseries/info.py similarity index 95% rename from redis/commands/timeseries/info.py rename to valkey/commands/timeseries/info.py index 3a384dc0..afdf70ec 100644 --- a/redis/commands/timeseries/info.py +++ b/valkey/commands/timeseries/info.py @@ -6,7 +6,7 @@ class TSInfo: """ Hold information and statistics on the time-series. Can be created using ``tsinfo`` command - https://oss.redis.com/redistimeseries/commands/#tsinfo. + https://oss.valkey.com/valkeytimeseries/commands/#tsinfo. """ rules = [] @@ -57,7 +57,7 @@ def __init__(self, args): Policy that will define handling of duplicate samples. Can read more about on - https://oss.redis.com/redistimeseries/configuration/#duplicate_policy + https://oss.valkey.com/valkeytimeseries/configuration/#duplicate_policy """ response = dict(zip(map(nativestr, args[::2]), args[1::2])) self.rules = response.get("rules") diff --git a/redis/commands/timeseries/utils.py b/valkey/commands/timeseries/utils.py similarity index 100% rename from redis/commands/timeseries/utils.py rename to valkey/commands/timeseries/utils.py diff --git a/redis/commands/redismodules.py b/valkey/commands/valkeymodules.py similarity index 83% rename from redis/commands/redismodules.py rename to valkey/commands/valkeymodules.py index 7e2045a7..6f747058 100644 --- a/redis/commands/redismodules.py +++ b/valkey/commands/valkeymodules.py @@ -1,13 +1,13 @@ from json import JSONDecoder, JSONEncoder -class RedisModuleCommands: - """This class contains the wrapper functions to bring supported redis +class ValkeyModuleCommands: + """This class contains the wrapper functions to bring supported valkey modules into the command namespace. """ def json(self, encoder=JSONEncoder(), decoder=JSONDecoder()): - """Access the json namespace, providing support for redis json.""" + """Access the json namespace, providing support for valkey json.""" from .json import JSON @@ -15,7 +15,7 @@ def json(self, encoder=JSONEncoder(), decoder=JSONDecoder()): return jj def ft(self, index_name="idx"): - """Access the search namespace, providing support for redis search.""" + """Access the search namespace, providing support for valkey search.""" from .search import Search @@ -24,7 +24,7 @@ def ft(self, index_name="idx"): def ts(self): """Access the timeseries namespace, providing support for - redis timeseries data. + valkey timeseries data. """ from .timeseries import TimeSeries @@ -74,7 +74,7 @@ def tdigest(self): def graph(self, index_name="idx"): """Access the graph namespace, providing support for - redis graph data. + valkey graph data. """ from .graph import Graph @@ -83,9 +83,9 @@ def graph(self, index_name="idx"): return g -class AsyncRedisModuleCommands(RedisModuleCommands): +class AsyncValkeyModuleCommands(ValkeyModuleCommands): def ft(self, index_name="idx"): - """Access the search namespace, providing support for redis search.""" + """Access the search namespace, providing support for valkey search.""" from .search import AsyncSearch @@ -94,7 +94,7 @@ def ft(self, index_name="idx"): def graph(self, index_name="idx"): """Access the graph namespace, providing support for - redis graph data. + valkey graph data. """ from .graph import AsyncGraph diff --git a/redis/connection.py b/valkey/connection.py similarity index 94% rename from redis/connection.py rename to valkey/connection.py index f745ecc1..01d305fe 100644 --- a/redis/connection.py +++ b/valkey/connection.py @@ -28,9 +28,9 @@ ChildDeadlockedError, ConnectionError, DataError, - RedisError, ResponseError, TimeoutError, + ValkeyError, ) from .retry import Retry from .typing import KeysT, ResponseT @@ -64,7 +64,7 @@ class HiredisRespSerializer: def pack(self, *args: List): - """Pack a series of arguments into the Redis protocol""" + """Pack a series of arguments into the Valkey protocol""" output = [] if isinstance(args[0], str): @@ -86,10 +86,10 @@ def __init__(self, buffer_cutoff, encode) -> None: self.encode = encode def pack(self, *args): - """Pack a series of arguments into the Redis protocol""" + """Pack a series of arguments into the Valkey protocol""" output = [] # the client might have included 1 or more literal arguments in - # the command name, e.g., 'CONFIG GET'. The Redis server expects these + # the command name, e.g., 'CONFIG GET'. The Valkey server expects these # arguments to be sent separately, so split the first argument # manually. These arguments should be bytestrings so that they are # not encoded. @@ -132,7 +132,7 @@ def pack(self, *args): class AbstractConnection: - "Manages communication to and from a Redis server" + "Manages communication to and from a Valkey server" def __init__( self, @@ -149,11 +149,11 @@ def __init__( socket_read_size: int = 65536, health_check_interval: int = 0, client_name: Optional[str] = None, - lib_name: Optional[str] = "redis-py", + lib_name: Optional[str] = "valkey-py", lib_version: Optional[str] = get_lib_version(), username: Optional[str] = None, retry: Union[Any, None] = None, - redis_connect_func: Optional[Callable[[], None]] = None, + valkey_connect_func: Optional[Callable[[], None]] = None, credential_provider: Optional[CredentialProvider] = None, protocol: Optional[int] = 2, command_packer: Optional[Callable[[], None]] = None, @@ -210,7 +210,7 @@ def __init__( self.retry = Retry(NoBackoff(), 0) self.health_check_interval = health_check_interval self.next_health_check = 0 - self.redis_connect_func = redis_connect_func + self.valkey_connect_func = valkey_connect_func self.encoder = Encoder(encoding, encoding_errors, decode_responses) self._sock = None self._socket_read_size = socket_read_size @@ -236,7 +236,7 @@ def __init__( self.client_cache = client_cache if client_cache is not None else _cache if self.client_cache is not None: if self.protocol not in [3, "3"]: - raise RedisError( + raise ValkeyError( "client caching is only supported with protocol version 3 or higher" ) self.cache_deny_list = cache_deny_list @@ -296,7 +296,7 @@ def set_parser(self, parser_class): self._parser = parser_class(socket_read_size=self._socket_read_size) def connect(self): - "Connects to the Redis server if not already connected" + "Connects to the Valkey server if not already connected" if self._sock: return try: @@ -310,13 +310,13 @@ def connect(self): self._sock = sock try: - if self.redis_connect_func is None: + if self.valkey_connect_func is None: # Use the default on_connect function self.on_connect() else: - # Use the passed function redis_connect_func - self.redis_connect_func(self) - except RedisError: + # Use the passed function valkey_connect_func + self.valkey_connect_func(self) + except ValkeyError: # clean up after any error in on_connect self.disconnect() raise @@ -380,7 +380,7 @@ def on_connect(self): try: auth_response = self.read_response() except AuthenticationWrongNumberOfArgsError: - # a username and password were specified but the Redis + # a username and password were specified but the Valkey # server seems to be < 6.0.0 which expects a single password # arg. retry auth with just the password. # https://github.com/andymccurdy/redis-py/issues/1274 @@ -439,7 +439,7 @@ def on_connect(self): self._parser.set_invalidation_push_handler(self._cache_invalidation_process) def disconnect(self, *args): - "Disconnects from the Redis server" + "Disconnects from the Valkey server" self._parser.on_disconnect() conn_sock = self._sock @@ -477,7 +477,7 @@ def check_health(self): self.retry.call_with_retry(self._send_ping, self._ping_failed) def send_packed_command(self, command, check_health=True): - """Send an already packed command to the Redis server""" + """Send an already packed command to the Valkey server""" if not self._sock: self.connect() # guard against health check recursion @@ -508,7 +508,7 @@ def send_packed_command(self, command, check_health=True): raise def send_command(self, *args, **kwargs): - """Pack and send a command to the Redis server""" + """Pack and send a command to the Valkey server""" self.send_packed_command( self._command_packer.pack(*args), check_health=kwargs.get("check_health", True), @@ -575,11 +575,11 @@ def read_response( return response def pack_command(self, *args): - """Pack a series of arguments into the Redis protocol""" + """Pack a series of arguments into the Valkey protocol""" return self._command_packer.pack(*args) def pack_commands(self, commands): - """Pack multiple commands into the Redis protocol""" + """Pack multiple commands into the Valkey protocol""" output = [] pieces = [] buffer_length = 0 @@ -612,7 +612,7 @@ def _cache_invalidation_process( self, data: List[Union[str, Optional[List[str]]]] ) -> None: """ - Invalidate (delete) all redis commands associated with a specific key. + Invalidate (delete) all valkey commands associated with a specific key. `data` is a list of strings, where the first string is the invalidation message and the second string is the list of keys to invalidate. (if the list of keys is None, then all keys are invalidated) @@ -665,7 +665,7 @@ def invalidate_key_from_cache(self, key: KeysT): class Connection(AbstractConnection): - "Manages TCP communication to and from a Redis server" + "Manages TCP communication to and from a Valkey server" def __init__( self, @@ -756,7 +756,7 @@ def _error_message(self, exception): class SSLConnection(Connection): - """Manages SSL connections to and from the Redis server(s). + """Manages SSL connections to and from the Valkey server(s). This class extends the Connection class, adding SSL functionality, and making use of ssl.SSLContext (https://docs.python.org/3/library/ssl.html#ssl.SSLContext) """ # noqa @@ -799,10 +799,10 @@ def __init__( ssl_ciphers: A string listing the ciphers that are allowed to be used. Defaults to None, which means that the default ciphers are used. See https://docs.python.org/3/library/ssl.html#ssl.SSLContext.set_ciphers for more information. Raises: - RedisError + ValkeyError """ # noqa if not SSL_AVAILABLE: - raise RedisError("Python wasn't built with SSL support") + raise ValkeyError("Python wasn't built with SSL support") self.keyfile = ssl_keyfile self.certfile = ssl_certfile @@ -815,7 +815,7 @@ def __init__( "required": ssl.CERT_REQUIRED, } if ssl_cert_reqs not in CERT_REQS: - raise RedisError( + raise ValkeyError( f"Invalid SSL Certificate Requirements Flag: {ssl_cert_reqs}" ) ssl_cert_reqs = CERT_REQS[ssl_cert_reqs] @@ -859,10 +859,10 @@ def _connect(self): context.set_ciphers(self.ssl_ciphers) sslsock = context.wrap_socket(sock, server_hostname=self.host) if self.ssl_validate_ocsp is True and CRYPTOGRAPHY_AVAILABLE is False: - raise RedisError("cryptography is not installed.") + raise ValkeyError("cryptography is not installed.") if self.ssl_validate_ocsp_stapled and self.ssl_validate_ocsp: - raise RedisError( + raise ValkeyError( "Either an OCSP staple or pure OCSP connection must be validated " "- not both." ) @@ -906,7 +906,7 @@ def _connect(self): class UnixDomainSocketConnection(AbstractConnection): - "Manages UDS communication to and from a Redis server" + "Manages UDS communication to and from a Valkey server" def __init__(self, path="", socket_timeout=None, **kwargs): self.path = path @@ -972,13 +972,13 @@ def to_bool(value): def parse_url(url): if not ( - url.startswith("redis://") - or url.startswith("rediss://") + url.startswith("valkey://") + or url.startswith("valkeys://") or url.startswith("unix://") ): raise ValueError( - "Redis URL must specify one of the following " - "schemes (redis://, rediss://, unix://)" + "Valkey URL must specify one of the following " + "schemes (valkey://, valkeys://, unix://)" ) url = urlparse(url) @@ -1001,13 +1001,13 @@ def parse_url(url): if url.password: kwargs["password"] = unquote(url.password) - # We only support redis://, rediss:// and unix:// schemes. + # We only support valkey://, valkeys:// and unix:// schemes. if url.scheme == "unix": if url.path: kwargs["path"] = unquote(url.path) kwargs["connection_class"] = UnixDomainSocketConnection - else: # implied: url.scheme in ("redis", "rediss"): + else: # implied: url.scheme in ("valkey", "valkeys"): if url.hostname: kwargs["host"] = unquote(url.hostname) if url.port: @@ -1021,7 +1021,7 @@ def parse_url(url): except (AttributeError, ValueError): pass - if url.scheme == "rediss": + if url.scheme == "valkeys": kwargs["connection_class"] = SSLConnection return kwargs @@ -1030,7 +1030,7 @@ def parse_url(url): class ConnectionPool: """ Create a connection pool. ``If max_connections`` is set, then this - object raises :py:class:`~redis.exceptions.ConnectionError` when the pool's + object raises :py:class:`~valkey.exceptions.ConnectionError` when the pool's limit is reached. By default, TCP connections are created unless ``connection_class`` @@ -1048,16 +1048,14 @@ def from_url(cls, url, **kwargs): For example:: - redis://[[username]:[password]]@localhost:6379/0 - rediss://[[username]:[password]]@localhost:6379/0 + valkey://[[username]:[password]]@localhost:6379/0 + valkeys://[[username]:[password]]@localhost:6379/0 unix://[username@]/path/to/socket.sock?db=0[&password=password] Three URL schemes are supported: - - `redis://` creates a TCP socket connection. See more at: - - - `rediss://` creates a SSL wrapped TCP socket connection. See more at: - + - `valkey://` creates a TCP socket connection. + - `valkeys://` creates a SSL wrapped TCP socket connection. - ``unix://``: creates a Unix Domain Socket connection. The username, password, hostname, path and all querystring values @@ -1067,9 +1065,9 @@ def from_url(cls, url, **kwargs): There are several ways to specify a database number. The first value found will be used: - 1. A ``db`` querystring option, e.g. redis://localhost?db=0 - 2. If using the redis:// or rediss:// schemes, the path argument - of the url, e.g. redis://localhost/0 + 1. A ``db`` querystring option, e.g. valkey://localhost?db=0 + 2. If using the valkey:// or valkeys:// schemes, the path argument + of the url, e.g. valkey://localhost/0 3. A ``db`` keyword argument to this function. If none of these options are specified, the default db=0 is used. @@ -1172,7 +1170,7 @@ def _checkpid(self) -> None: # to mitigate this possible deadlock, _checkpid() will only wait 5 # seconds to acquire _fork_lock. if _fork_lock cannot be acquired in # that time it is assumed that the child is deadlocked and a - # redis.ChildDeadlockedError error is raised. + # valkey.ChildDeadlockedError error is raised. if self.pid != os.getpid(): acquired = self._fork_lock.acquire(timeout=5) if not acquired: @@ -1196,7 +1194,7 @@ def get_connection(self, command_name: str, *keys, **options) -> "Connection": self._in_use_connections.add(connection) try: - # ensure this connection is connected to Redis + # ensure this connection is connected to Valkey connection.connect() # if client caching is not enabled connections that the pool # provides should be ready to send a command. @@ -1318,18 +1316,18 @@ class BlockingConnectionPool(ConnectionPool): """ Thread-safe blocking connection pool:: - >>> from redis.client import Redis - >>> client = Redis(connection_pool=BlockingConnectionPool()) + >>> from valkey.client import Valkey + >>> client = Valkey(connection_pool=BlockingConnectionPool()) It performs the same function as the default - :py:class:`~redis.ConnectionPool` implementation, in that, + :py:class:`~valkey.ConnectionPool` implementation, in that, it maintains a pool of reusable connections that can be shared by - multiple redis clients (safely across threads if required). + multiple valkey clients (safely across threads if required). The difference is that, in the event that a client tries to get a connection from the pool when all of connections are in use, rather than - raising a :py:class:`~redis.ConnectionError` (as the default - :py:class:`~redis.ConnectionPool` implementation does), it + raising a :py:class:`~valkey.ConnectionError` (as the default + :py:class:`~valkey.ConnectionPool` implementation does), it makes the client wait ("blocks") for a specified number of seconds until a connection becomes available. @@ -1415,7 +1413,7 @@ def get_connection(self, command_name, *keys, **options): try: connection = self.pool.get(block=True, timeout=self.timeout) except Empty: - # Note that this is not caught by the redis client and will be + # Note that this is not caught by the valkey client and will be # raised unless handled by application code. If you want never to raise ConnectionError("No connection available.") @@ -1425,7 +1423,7 @@ def get_connection(self, command_name, *keys, **options): connection = self.make_connection() try: - # ensure this connection is connected to Redis + # ensure this connection is connected to Valkey connection.connect() # connections that the pool provides should be ready to send # a command. if not, the connection was either returned to the diff --git a/redis/crc.py b/valkey/crc.py similarity index 65% rename from redis/crc.py rename to valkey/crc.py index e2612411..f096da9e 100644 --- a/redis/crc.py +++ b/valkey/crc.py @@ -1,15 +1,15 @@ from binascii import crc_hqx -from redis.typing import EncodedT +from valkey.typing import EncodedT -# Redis Cluster's key space is divided into 16384 slots. +# Valkey Cluster's key space is divided into 16384 slots. # For more information see: https://github.com/redis/redis/issues/2576 -REDIS_CLUSTER_HASH_SLOTS = 16384 +VALKEY_CLUSTER_HASH_SLOTS = 16384 -__all__ = ["key_slot", "REDIS_CLUSTER_HASH_SLOTS"] +__all__ = ["key_slot", "VALKEY_CLUSTER_HASH_SLOTS"] -def key_slot(key: EncodedT, bucket: int = REDIS_CLUSTER_HASH_SLOTS) -> int: +def key_slot(key: EncodedT, bucket: int = VALKEY_CLUSTER_HASH_SLOTS) -> int: """Calculate key slot for a given key. See Keys distribution model in https://redis.io/topics/cluster-spec :param key - bytes diff --git a/redis/credentials.py b/valkey/credentials.py similarity index 100% rename from redis/credentials.py rename to valkey/credentials.py diff --git a/redis/exceptions.py b/valkey/exceptions.py similarity index 83% rename from redis/exceptions.py rename to valkey/exceptions.py index dcc06774..3ca8033d 100644 --- a/redis/exceptions.py +++ b/valkey/exceptions.py @@ -1,15 +1,15 @@ -"Core exceptions raised by the Redis client" +"Core exceptions raised by the Valkey client" -class RedisError(Exception): +class ValkeyError(Exception): pass -class ConnectionError(RedisError): +class ConnectionError(ValkeyError): pass -class TimeoutError(RedisError): +class TimeoutError(ValkeyError): pass @@ -25,23 +25,23 @@ class BusyLoadingError(ConnectionError): pass -class InvalidResponse(RedisError): +class InvalidResponse(ValkeyError): pass -class ResponseError(RedisError): +class ResponseError(ValkeyError): pass -class DataError(RedisError): +class DataError(ValkeyError): pass -class PubSubError(RedisError): +class PubSubError(ValkeyError): pass -class WatchError(RedisError): +class WatchError(ValkeyError): pass @@ -52,10 +52,10 @@ class NoScriptError(ResponseError): class OutOfMemoryError(ResponseError): """ Indicates the database is full. Can only occur when either: - * Redis maxmemory-policy=noeviction - * Redis maxmemory-policy=volatile* and there are no evictable keys + * Valkey maxmemory-policy=noeviction + * Valkey maxmemory-policy=volatile* and there are no evictable keys - For more information see `Memory optimization in Redis `_. # noqa + For more information see `Memory optimization in Valkey `_. # noqa """ pass @@ -77,7 +77,7 @@ class ModuleError(ResponseError): pass -class LockError(RedisError, ValueError): +class LockError(ValkeyError, ValueError): "Errors acquiring or releasing a lock" # NOTE: For backwards compatibility, this class derives from ValueError. # This was originally chosen to behave like threading.Lock. @@ -106,15 +106,15 @@ class AuthenticationWrongNumberOfArgsError(ResponseError): pass -class RedisClusterException(Exception): +class ValkeyClusterException(Exception): """ - Base exception for the RedisCluster client + Base exception for the ValkeyCluster client """ pass -class ClusterError(RedisError): +class ClusterError(ValkeyError): """ Cluster errors occurred multiple times, resulting in an exhaustion of the command execution TTL @@ -126,7 +126,7 @@ class ClusterError(RedisError): class ClusterDownError(ClusterError, ResponseError): """ Error indicated CLUSTERDOWN error received from cluster. - By default Redis Cluster nodes stop accepting queries if they detect there + By default Valkey Cluster nodes stop accepting queries if they detect there is at least a hash slot uncovered (no available node is serving it). This way if the cluster is partially down (for example a range of hash slots are no longer covered) the entire cluster eventually becomes @@ -205,7 +205,7 @@ class MasterDownError(ClusterDownError): pass -class SlotNotCoveredError(RedisClusterException): +class SlotNotCoveredError(ValkeyClusterException): """ This error only happens in the case where the connection pool will try to fetch what node that is covered by a given slot. diff --git a/redis/lock.py b/valkey/lock.py similarity index 92% rename from redis/lock.py rename to valkey/lock.py index cae7f27e..059b14a0 100644 --- a/redis/lock.py +++ b/valkey/lock.py @@ -4,13 +4,13 @@ from types import SimpleNamespace, TracebackType from typing import Optional, Type -from redis.exceptions import LockError, LockNotOwnedError -from redis.typing import Number +from valkey.exceptions import LockError, LockNotOwnedError +from valkey.typing import Number class Lock: """ - A shared, distributed Lock. Using Redis for locking allows the Lock + A shared, distributed Lock. Using Valkey for locking allows the Lock to be shared across processes and/or machines. It's left to the user to resolve deadlock issues and make sure @@ -75,7 +75,7 @@ class Lock: def __init__( self, - redis, + valkey, name: str, timeout: Optional[Number] = None, sleep: Number = 0.1, @@ -84,8 +84,8 @@ def __init__( thread_local: bool = True, ): """ - Create a new Lock instance named ``name`` using the Redis client - supplied by ``redis``. + Create a new Lock instance named ``name`` using the Valkey client + supplied by ``valkey``. ``timeout`` indicates a maximum life for the lock in seconds. By default, it will remain locked until release() is called. @@ -116,7 +116,7 @@ def __init__( thread-1 sets the token to "abc" time: 1, thread-2 blocks trying to acquire `my-lock` using the Lock instance. - time: 5, thread-1 has not yet completed. redis expires the lock + time: 5, thread-1 has not yet completed. valkey expires the lock key. time: 5, thread-2 acquired `my-lock` now that it's available. thread-2 sets the token to "xyz" @@ -133,7 +133,7 @@ def __init__( is that these cases aren't common and as such default to using thread local storage. """ - self.redis = redis + self.valkey = valkey self.name = name self.timeout = timeout self.sleep = sleep @@ -146,7 +146,7 @@ def __init__( def register_scripts(self) -> None: cls = self.__class__ - client = self.redis + client = self.valkey if cls.lua_release is None: cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT) if cls.lua_extend is None: @@ -178,7 +178,7 @@ def acquire( token: Optional[str] = None, ): """ - Use Redis to hold a shared, distributed lock named ``name``. + Use Valkey to hold a shared, distributed lock named ``name``. Returns True once the lock is acquired. If ``blocking`` is False, always return immediately. If the lock @@ -197,7 +197,7 @@ def acquire( if token is None: token = uuid.uuid1().hex.encode() else: - encoder = self.redis.get_encoder() + encoder = self.valkey.get_encoder() token = encoder.encode(token) if blocking is None: blocking = self.blocking @@ -223,7 +223,7 @@ def do_acquire(self, token: str) -> bool: timeout = int(self.timeout * 1000) else: timeout = None - if self.redis.set(self.name, token, nx=True, px=timeout): + if self.valkey.set(self.name, token, nx=True, px=timeout): return True return False @@ -231,17 +231,17 @@ def locked(self) -> bool: """ Returns True if this key is locked by any process, otherwise False. """ - return self.redis.get(self.name) is not None + return self.valkey.get(self.name) is not None def owned(self) -> bool: """ Returns True if this key is locked by this lock, otherwise False. """ - stored_token = self.redis.get(self.name) + stored_token = self.valkey.get(self.name) # need to always compare bytes to bytes # TODO: this can be simplified when the context manager is finished if stored_token and not isinstance(stored_token, bytes): - encoder = self.redis.get_encoder() + encoder = self.valkey.get_encoder() stored_token = encoder.encode(stored_token) return self.local.token is not None and stored_token == self.local.token @@ -257,7 +257,9 @@ def release(self) -> None: def do_release(self, expected_token: str) -> None: if not bool( - self.lua_release(keys=[self.name], args=[expected_token], client=self.redis) + self.lua_release( + keys=[self.name], args=[expected_token], client=self.valkey + ) ): raise LockNotOwnedError( "Cannot release a lock that's no longer owned", @@ -287,7 +289,7 @@ def do_extend(self, additional_time: int, replace_ttl: bool) -> bool: self.lua_extend( keys=[self.name], args=[self.local.token, additional_time, "1" if replace_ttl else "0"], - client=self.redis, + client=self.valkey, ) ): raise LockNotOwnedError( @@ -313,7 +315,7 @@ def do_reacquire(self) -> bool: timeout = int(self.timeout * 1000) if not bool( self.lua_reacquire( - keys=[self.name], args=[self.local.token, timeout], client=self.redis + keys=[self.name], args=[self.local.token, timeout], client=self.valkey ) ): raise LockNotOwnedError( diff --git a/redis/ocsp.py b/valkey/ocsp.py similarity index 99% rename from redis/ocsp.py rename to valkey/ocsp.py index 8819848f..4d475e59 100644 --- a/redis/ocsp.py +++ b/valkey/ocsp.py @@ -15,7 +15,7 @@ from cryptography.hazmat.primitives.hashes import SHA1, Hash from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat from cryptography.x509 import ocsp -from redis.exceptions import AuthorizationError, ConnectionError +from valkey.exceptions import AuthorizationError, ConnectionError def _verify_response(issuer_cert, ocsp_response): diff --git a/redis/py.typed b/valkey/py.typed similarity index 100% rename from redis/py.typed rename to valkey/py.typed diff --git a/redis/retry.py b/valkey/retry.py similarity index 96% rename from redis/retry.py rename to valkey/retry.py index 60644305..02962bd9 100644 --- a/redis/retry.py +++ b/valkey/retry.py @@ -1,7 +1,7 @@ import socket from time import sleep -from redis.exceptions import ConnectionError, TimeoutError +from valkey.exceptions import ConnectionError, TimeoutError class Retry: diff --git a/redis/sentinel.py b/valkey/sentinel.py similarity index 89% rename from redis/sentinel.py rename to valkey/sentinel.py index 72b5bef5..4b12bdd9 100644 --- a/redis/sentinel.py +++ b/valkey/sentinel.py @@ -2,11 +2,16 @@ import weakref from typing import Optional -from redis.client import Redis -from redis.commands import SentinelCommands -from redis.connection import Connection, ConnectionPool, SSLConnection -from redis.exceptions import ConnectionError, ReadOnlyError, ResponseError, TimeoutError -from redis.utils import str_if_bytes +from valkey.client import Valkey +from valkey.commands import SentinelCommands +from valkey.connection import Connection, ConnectionPool, SSLConnection +from valkey.exceptions import ( + ConnectionError, + ReadOnlyError, + ResponseError, + TimeoutError, +) +from valkey.utils import str_if_bytes class MasterNotFoundError(ConnectionError): @@ -197,9 +202,9 @@ def rotate_slaves(self): class Sentinel(SentinelCommands): """ - Redis Sentinel cluster client + Valkey Sentinel cluster client - >>> from redis.sentinel import Sentinel + >>> from valkey.sentinel import Sentinel >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) >>> master.set('foo', 'bar') @@ -216,12 +221,12 @@ class Sentinel(SentinelCommands): ``sentinel_kwargs`` is a dictionary of connection arguments used when connecting to sentinel instances. Any argument that can be passed to - a normal Redis connection can be specified here. If ``sentinel_kwargs`` is + a normal Valkey connection can be specified here. If ``sentinel_kwargs`` is not specified, any socket_timeout and socket_keepalive options specified in ``connection_kwargs`` will be used. ``connection_kwargs`` are keyword arguments that will be used when - establishing a connection to a Redis server. + establishing a connection to a Valkey server. """ def __init__( @@ -240,7 +245,7 @@ def __init__( self.sentinel_kwargs = sentinel_kwargs self.sentinels = [ - Redis(hostname, port, **self.sentinel_kwargs) + Valkey(hostname, port, **self.sentinel_kwargs) for hostname, port in sentinels ] self.min_other_sentinels = min_other_sentinels @@ -285,7 +290,7 @@ def check_master_state(self, state, service_name): def discover_master(self, service_name): """ - Asks sentinel servers for the Redis master's address corresponding + Asks sentinel servers for the Valkey master's address corresponding to the service labeled ``service_name``. Returns a pair (address, port) or raises MasterNotFoundError if no @@ -336,54 +341,54 @@ def discover_slaves(self, service_name): def master_for( self, service_name, - redis_class=Redis, + valkey_class=Valkey, connection_pool_class=SentinelConnectionPool, **kwargs, ): """ - Returns a redis client instance for the ``service_name`` master. + Returns a valkey client instance for the ``service_name`` master. - A :py:class:`~redis.sentinel.SentinelConnectionPool` class is + A :py:class:`~valkey.sentinel.SentinelConnectionPool` class is used to retrieve the master's address before establishing a new connection. NOTE: If the master's address has changed, any cached connections to the old master are closed. - By default clients will be a :py:class:`~redis.Redis` instance. - Specify a different class to the ``redis_class`` argument if you + By default clients will be a :py:class:`~valkey.Valkey` instance. + Specify a different class to the ``valkey_class`` argument if you desire something different. The ``connection_pool_class`` specifies the connection pool to - use. The :py:class:`~redis.sentinel.SentinelConnectionPool` + use. The :py:class:`~valkey.sentinel.SentinelConnectionPool` will be used by default. All other keyword arguments are merged with any connection_kwargs passed to this class and passed to the connection pool as keyword - arguments to be used to initialize Redis connections. + arguments to be used to initialize Valkey connections. """ kwargs["is_master"] = True connection_kwargs = dict(self.connection_kwargs) connection_kwargs.update(kwargs) - return redis_class.from_pool( + return valkey_class.from_pool( connection_pool_class(service_name, self, **connection_kwargs) ) def slave_for( self, service_name, - redis_class=Redis, + valkey_class=Valkey, connection_pool_class=SentinelConnectionPool, **kwargs, ): """ - Returns redis client instance for the ``service_name`` slave(s). + Returns valkey client instance for the ``service_name`` slave(s). A SentinelConnectionPool class is used to retrieve the slave's address before establishing a new connection. - By default clients will be a :py:class:`~redis.Redis` instance. - Specify a different class to the ``redis_class`` argument if you + By default clients will be a :py:class:`~valkey.Valkey` instance. + Specify a different class to the ``valkey_class`` argument if you desire something different. The ``connection_pool_class`` specifies the connection pool to use. @@ -391,11 +396,11 @@ def slave_for( All other keyword arguments are merged with any connection_kwargs passed to this class and passed to the connection pool as keyword - arguments to be used to initialize Redis connections. + arguments to be used to initialize Valkey connections. """ kwargs["is_master"] = False connection_kwargs = dict(self.connection_kwargs) connection_kwargs.update(kwargs) - return redis_class.from_pool( + return valkey_class.from_pool( connection_pool_class(service_name, self, **connection_kwargs) ) diff --git a/redis/typing.py b/valkey/typing.py similarity index 90% rename from redis/typing.py rename to valkey/typing.py index 838219fb..c44e4cbb 100644 --- a/redis/typing.py +++ b/valkey/typing.py @@ -14,9 +14,9 @@ ) if TYPE_CHECKING: - from redis._parsers import Encoder - from redis.asyncio.connection import ConnectionPool as AsyncConnectionPool - from redis.connection import ConnectionPool + from valkey._parsers import Encoder + from valkey.asyncio.connection import ConnectionPool as AsyncConnectionPool + from valkey.connection import ConnectionPool Number = Union[int, float] @@ -28,7 +28,7 @@ ZScoreBoundT = Union[float, str] # str allows for the [ or ( prefix BitfieldOffsetT = Union[int, str] # str allows for #x syntax _StringLikeT = Union[bytes, str, memoryview] -KeyT = _StringLikeT # Main redis key space +KeyT = _StringLikeT # Main valkey key space PatternT = _StringLikeT # Patterns matched against keys, fields etc FieldT = EncodableT # Fields within hash tables, streams and geo commands KeysT = Union[KeyT, Iterable[KeyT]] diff --git a/redis/utils.py b/valkey/utils.py similarity index 92% rename from redis/utils.py rename to valkey/utils.py index 01fdfed7..77ad0e35 100644 --- a/redis/utils.py +++ b/valkey/utils.py @@ -36,19 +36,19 @@ def from_url(url, **kwargs): """ - Returns an active Redis client generated from the given database URL. + Returns an active Valkey client generated from the given database URL. Will attempt to extract the database id from the path url fragment, if none is provided. """ - from redis.client import Redis + from valkey.client import Valkey - return Redis.from_url(url, **kwargs) + return Valkey.from_url(url, **kwargs) @contextmanager -def pipeline(redis_obj): - p = redis_obj.pipeline() +def pipeline(valkey_obj): + p = valkey_obj.pipeline() yield p p.execute() @@ -141,7 +141,7 @@ def _set_info_logger(): def get_lib_version(): try: - libver = metadata.version("redis") + libver = metadata.version("valkey") except metadata.PackageNotFoundError: libver = "99.99.99" return libver diff --git a/whitelist.py b/whitelist.py index 29cd529e..69be39a7 100644 --- a/whitelist.py +++ b/whitelist.py @@ -1,18 +1,18 @@ -exc_type # unused variable (/data/repos/redis/redis-py/redis/client.py:1045) -exc_value # unused variable (/data/repos/redis/redis-py/redis/client.py:1045) -traceback # unused variable (/data/repos/redis/redis-py/redis/client.py:1045) -exc_type # unused variable (/data/repos/redis/redis-py/redis/client.py:1211) -exc_value # unused variable (/data/repos/redis/redis-py/redis/client.py:1211) -traceback # unused variable (/data/repos/redis/redis-py/redis/client.py:1211) -exc_type # unused variable (/data/repos/redis/redis-py/redis/client.py:1589) -exc_value # unused variable (/data/repos/redis/redis-py/redis/client.py:1589) -traceback # unused variable (/data/repos/redis/redis-py/redis/client.py:1589) -exc_type # unused variable (/data/repos/redis/redis-py/redis/lock.py:156) -exc_value # unused variable (/data/repos/redis/redis-py/redis/lock.py:156) -traceback # unused variable (/data/repos/redis/redis-py/redis/lock.py:156) -exc_type # unused variable (/data/repos/redis/redis-py/redis/asyncio/utils.py:26) -exc_value # unused variable (/data/repos/redis/redis-py/redis/asyncio/utils.py:26) -traceback # unused variable (/data/repos/redis/redis-py/redis/asyncio/utils.py:26) -AsyncConnectionPool # unused import (//data/repos/redis/redis-py/redis/typing.py:9) -AsyncRedis # unused import (//data/repos/redis/redis-py/redis/commands/core.py:49) -TargetNodesT # unused import (//data/repos/redis/redis-py/redis/commands/cluster.py:46) +exc_type # unused variable (/data/repos/valkey/valkey-py/valkey/client.py:1045) +exc_value # unused variable (/data/repos/valkey/valkey-py/valkey/client.py:1045) +traceback # unused variable (/data/repos/valkey/valkey-py/valkey/client.py:1045) +exc_type # unused variable (/data/repos/valkey/valkey-py/valkey/client.py:1211) +exc_value # unused variable (/data/repos/valkey/valkey-py/valkey/client.py:1211) +traceback # unused variable (/data/repos/valkey/valkey-py/valkey/client.py:1211) +exc_type # unused variable (/data/repos/valkey/valkey-py/valkey/client.py:1589) +exc_value # unused variable (/data/repos/valkey/valkey-py/valkey/client.py:1589) +traceback # unused variable (/data/repos/valkey/valkey-py/valkey/client.py:1589) +exc_type # unused variable (/data/repos/valkey/valkey-py/valkey/lock.py:156) +exc_value # unused variable (/data/repos/valkey/valkey-py/valkey/lock.py:156) +traceback # unused variable (/data/repos/valkey/valkey-py/valkey/lock.py:156) +exc_type # unused variable (/data/repos/valkey/valkey-py/valkey/asyncio/utils.py:26) +exc_value # unused variable (/data/repos/valkey/valkey-py/valkey/asyncio/utils.py:26) +traceback # unused variable (/data/repos/valkey/valkey-py/valkey/asyncio/utils.py:26) +AsyncConnectionPool # unused import (//data/repos/valkey/valkey-py/valkey/typing.py:9) +AsyncValkey # unused import (//data/repos/valkey/valkey-py/valkey/commands/core.py:49) +TargetNodesT # unused import (//data/repos/valkey/valkey-py/valkey/commands/cluster.py:46)