This commit is contained in:
Iliyan Angelov
2025-09-19 11:58:53 +03:00
parent 306b20e24a
commit 6b247e5b9f
11423 changed files with 1500615 additions and 778 deletions

View File

@@ -0,0 +1,88 @@
from redis import asyncio # noqa
from redis.backoff import default_backoff
from redis.client import Redis, StrictRedis
from redis.cluster import RedisCluster
from redis.connection import (
BlockingConnectionPool,
Connection,
ConnectionPool,
SSLConnection,
UnixDomainSocketConnection,
)
from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider
from redis.exceptions import (
AuthenticationError,
AuthenticationWrongNumberOfArgsError,
BusyLoadingError,
ChildDeadlockedError,
ConnectionError,
CrossSlotTransactionError,
DataError,
InvalidPipelineStack,
InvalidResponse,
MaxConnectionsError,
OutOfMemoryError,
PubSubError,
ReadOnlyError,
RedisClusterException,
RedisError,
ResponseError,
TimeoutError,
WatchError,
)
from redis.sentinel import (
Sentinel,
SentinelConnectionPool,
SentinelManagedConnection,
SentinelManagedSSLConnection,
)
from redis.utils import from_url
def int_or_str(value):
try:
return int(value)
except ValueError:
return value
__version__ = "6.4.0"
VERSION = tuple(map(int_or_str, __version__.split(".")))
__all__ = [
"AuthenticationError",
"AuthenticationWrongNumberOfArgsError",
"BlockingConnectionPool",
"BusyLoadingError",
"ChildDeadlockedError",
"Connection",
"ConnectionError",
"ConnectionPool",
"CredentialProvider",
"CrossSlotTransactionError",
"DataError",
"from_url",
"default_backoff",
"InvalidPipelineStack",
"InvalidResponse",
"MaxConnectionsError",
"OutOfMemoryError",
"PubSubError",
"ReadOnlyError",
"Redis",
"RedisCluster",
"RedisClusterException",
"RedisError",
"ResponseError",
"Sentinel",
"SentinelConnectionPool",
"SentinelManagedConnection",
"SentinelManagedSSLConnection",
"SSLConnection",
"UsernamePasswordCredentialProvider",
"StrictRedis",
"TimeoutError",
"UnixDomainSocketConnection",
"WatchError",
]

View File

@@ -0,0 +1,27 @@
from .base import (
AsyncPushNotificationsParser,
BaseParser,
PushNotificationsParser,
_AsyncRESPBase,
)
from .commands import AsyncCommandsParser, CommandsParser
from .encoders import Encoder
from .hiredis import _AsyncHiredisParser, _HiredisParser
from .resp2 import _AsyncRESP2Parser, _RESP2Parser
from .resp3 import _AsyncRESP3Parser, _RESP3Parser
__all__ = [
"AsyncCommandsParser",
"_AsyncHiredisParser",
"_AsyncRESPBase",
"_AsyncRESP2Parser",
"_AsyncRESP3Parser",
"AsyncPushNotificationsParser",
"CommandsParser",
"Encoder",
"BaseParser",
"_HiredisParser",
"_RESP2Parser",
"_RESP3Parser",
"PushNotificationsParser",
]

View File

@@ -0,0 +1,289 @@
import sys
from abc import ABC
from asyncio import IncompleteReadError, StreamReader, TimeoutError
from typing import Callable, List, Optional, Protocol, Union
if sys.version_info.major >= 3 and sys.version_info.minor >= 11:
from asyncio import timeout as async_timeout
else:
from async_timeout import timeout as async_timeout
from ..exceptions import (
AskError,
AuthenticationError,
AuthenticationWrongNumberOfArgsError,
BusyLoadingError,
ClusterCrossSlotError,
ClusterDownError,
ConnectionError,
ExecAbortError,
MasterDownError,
ModuleError,
MovedError,
NoPermissionError,
NoScriptError,
OutOfMemoryError,
ReadOnlyError,
RedisError,
ResponseError,
TryAgainError,
)
from ..typing import EncodableT
from .encoders import Encoder
from .socket import SERVER_CLOSED_CONNECTION_ERROR, SocketBuffer
MODULE_LOAD_ERROR = "Error loading the extension. Please check the server logs."
NO_SUCH_MODULE_ERROR = "Error unloading module: no such module with that name"
MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not possible."
MODULE_EXPORTS_DATA_TYPES_ERROR = (
"Error unloading module: the module "
"exports one or more module-side data "
"types, can't unload"
)
# user send an AUTH cmd to a server without authorization configured
NO_AUTH_SET_ERROR = {
# Redis >= 6.0
"AUTH <password> called without any password "
"configured for the default user. Are you sure "
"your configuration is correct?": AuthenticationError,
# Redis < 6.0
"Client sent AUTH, but no password is set": AuthenticationError,
}
class BaseParser(ABC):
EXCEPTION_CLASSES = {
"ERR": {
"max number of clients reached": ConnectionError,
"invalid password": AuthenticationError,
# some Redis server versions report invalid command syntax
# in lowercase
"wrong number of arguments "
"for 'auth' command": AuthenticationWrongNumberOfArgsError,
# some Redis server versions report invalid command syntax
# in uppercase
"wrong number of arguments "
"for 'AUTH' command": AuthenticationWrongNumberOfArgsError,
MODULE_LOAD_ERROR: ModuleError,
MODULE_EXPORTS_DATA_TYPES_ERROR: ModuleError,
NO_SUCH_MODULE_ERROR: ModuleError,
MODULE_UNLOAD_NOT_POSSIBLE_ERROR: ModuleError,
**NO_AUTH_SET_ERROR,
},
"OOM": OutOfMemoryError,
"WRONGPASS": AuthenticationError,
"EXECABORT": ExecAbortError,
"LOADING": BusyLoadingError,
"NOSCRIPT": NoScriptError,
"READONLY": ReadOnlyError,
"NOAUTH": AuthenticationError,
"NOPERM": NoPermissionError,
"ASK": AskError,
"TRYAGAIN": TryAgainError,
"MOVED": MovedError,
"CLUSTERDOWN": ClusterDownError,
"CROSSSLOT": ClusterCrossSlotError,
"MASTERDOWN": MasterDownError,
}
@classmethod
def parse_error(cls, response):
"Parse an error response"
error_code = response.split(" ")[0]
if error_code in cls.EXCEPTION_CLASSES:
response = response[len(error_code) + 1 :]
exception_class = cls.EXCEPTION_CLASSES[error_code]
if isinstance(exception_class, dict):
exception_class = exception_class.get(response, ResponseError)
return exception_class(response)
return ResponseError(response)
def on_disconnect(self):
raise NotImplementedError()
def on_connect(self, connection):
raise NotImplementedError()
class _RESPBase(BaseParser):
"""Base class for sync-based resp parsing"""
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self.encoder = None
self._sock = None
self._buffer = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
"Called when the socket connects"
self._sock = connection._sock
self._buffer = SocketBuffer(
self._sock, self.socket_read_size, connection.socket_timeout
)
self.encoder = connection.encoder
def on_disconnect(self):
"Called when the socket disconnects"
self._sock = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoder = None
def can_read(self, timeout):
return self._buffer and self._buffer.can_read(timeout)
class AsyncBaseParser(BaseParser):
"""Base parsing class for the python-backed async parser"""
__slots__ = "_stream", "_read_size"
def __init__(self, socket_read_size: int):
self._stream: Optional[StreamReader] = None
self._read_size = socket_read_size
async def can_read_destructive(self) -> bool:
raise NotImplementedError()
async def read_response(
self, disable_decoding: bool = False
) -> Union[EncodableT, ResponseError, None, List[EncodableT]]:
raise NotImplementedError()
_INVALIDATION_MESSAGE = [b"invalidate", "invalidate"]
class PushNotificationsParser(Protocol):
"""Protocol defining RESP3-specific parsing functionality"""
pubsub_push_handler_func: Callable
invalidation_push_handler_func: Optional[Callable] = None
def handle_pubsub_push_response(self, response):
"""Handle pubsub push responses"""
raise NotImplementedError()
def handle_push_response(self, response, **kwargs):
if response[0] not in _INVALIDATION_MESSAGE:
return self.pubsub_push_handler_func(response)
if self.invalidation_push_handler_func:
return self.invalidation_push_handler_func(response)
def set_pubsub_push_handler(self, pubsub_push_handler_func):
self.pubsub_push_handler_func = pubsub_push_handler_func
def set_invalidation_push_handler(self, invalidation_push_handler_func):
self.invalidation_push_handler_func = invalidation_push_handler_func
class AsyncPushNotificationsParser(Protocol):
"""Protocol defining async RESP3-specific parsing functionality"""
pubsub_push_handler_func: Callable
invalidation_push_handler_func: Optional[Callable] = None
async def handle_pubsub_push_response(self, response):
"""Handle pubsub push responses asynchronously"""
raise NotImplementedError()
async def handle_push_response(self, response, **kwargs):
"""Handle push responses asynchronously"""
if response[0] not in _INVALIDATION_MESSAGE:
return await self.pubsub_push_handler_func(response)
if self.invalidation_push_handler_func:
return await self.invalidation_push_handler_func(response)
def set_pubsub_push_handler(self, pubsub_push_handler_func):
"""Set the pubsub push handler function"""
self.pubsub_push_handler_func = pubsub_push_handler_func
def set_invalidation_push_handler(self, invalidation_push_handler_func):
"""Set the invalidation push handler function"""
self.invalidation_push_handler_func = invalidation_push_handler_func
class _AsyncRESPBase(AsyncBaseParser):
"""Base class for async resp parsing"""
__slots__ = AsyncBaseParser.__slots__ + ("encoder", "_buffer", "_pos", "_chunks")
def __init__(self, socket_read_size: int):
super().__init__(socket_read_size)
self.encoder: Optional[Encoder] = None
self._buffer = b""
self._chunks = []
self._pos = 0
def _clear(self):
self._buffer = b""
self._chunks.clear()
def on_connect(self, connection):
"""Called when the stream connects"""
self._stream = connection._reader
if self._stream is None:
raise RedisError("Buffer is closed.")
self.encoder = connection.encoder
self._clear()
self._connected = True
def on_disconnect(self):
"""Called when the stream disconnects"""
self._connected = False
async def can_read_destructive(self) -> bool:
if not self._connected:
raise RedisError("Buffer is closed.")
if self._buffer:
return True
try:
async with async_timeout(0):
return self._stream.at_eof()
except TimeoutError:
return False
async def _read(self, length: int) -> bytes:
"""
Read `length` bytes of data. These are assumed to be followed
by a '\r\n' terminator which is subsequently discarded.
"""
want = length + 2
end = self._pos + want
if len(self._buffer) >= end:
result = self._buffer[self._pos : end - 2]
else:
tail = self._buffer[self._pos :]
try:
data = await self._stream.readexactly(want - len(tail))
except IncompleteReadError as error:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from error
result = (tail + data)[:-2]
self._chunks.append(data)
self._pos += want
return result
async def _readline(self) -> bytes:
"""
read an unknown number of bytes up to the next '\r\n'
line separator, which is discarded.
"""
found = self._buffer.find(b"\r\n", self._pos)
if found >= 0:
result = self._buffer[self._pos : found]
else:
tail = self._buffer[self._pos :]
data = await self._stream.readline()
if not data.endswith(b"\r\n"):
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
result = (tail + data)[:-2]
self._chunks.append(data)
self._pos += len(result) + 2
return result

View File

@@ -0,0 +1,281 @@
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
from redis.exceptions import RedisError, ResponseError
from redis.utils import str_if_bytes
if TYPE_CHECKING:
from redis.asyncio.cluster import ClusterNode
class AbstractCommandsParser:
def _get_pubsub_keys(self, *args):
"""
Get the keys from pubsub command.
Although PubSub commands have predetermined key locations, they are not
supported in the 'COMMAND's output, so the key positions are hardcoded
in this method
"""
if len(args) < 2:
# The command has no keys in it
return None
args = [str_if_bytes(arg) for arg in args]
command = args[0].upper()
keys = None
if command == "PUBSUB":
# the second argument is a part of the command name, e.g.
# ['PUBSUB', 'NUMSUB', 'foo'].
pubsub_type = args[1].upper()
if pubsub_type in ["CHANNELS", "NUMSUB", "SHARDCHANNELS", "SHARDNUMSUB"]:
keys = args[2:]
elif command in ["SUBSCRIBE", "PSUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE"]:
# format example:
# SUBSCRIBE channel [channel ...]
keys = list(args[1:])
elif command in ["PUBLISH", "SPUBLISH"]:
# format example:
# PUBLISH channel message
keys = [args[1]]
return keys
def parse_subcommand(self, command, **options):
cmd_dict = {}
cmd_name = str_if_bytes(command[0])
cmd_dict["name"] = cmd_name
cmd_dict["arity"] = int(command[1])
cmd_dict["flags"] = [str_if_bytes(flag) for flag in command[2]]
cmd_dict["first_key_pos"] = command[3]
cmd_dict["last_key_pos"] = command[4]
cmd_dict["step_count"] = command[5]
if len(command) > 7:
cmd_dict["tips"] = command[7]
cmd_dict["key_specifications"] = command[8]
cmd_dict["subcommands"] = command[9]
return cmd_dict
class CommandsParser(AbstractCommandsParser):
"""
Parses Redis commands to get command keys.
COMMAND output is used to determine key locations.
Commands that do not have a predefined key location are flagged with
'movablekeys', and these commands' keys are determined by the command
'COMMAND GETKEYS'.
"""
def __init__(self, redis_connection):
self.commands = {}
self.initialize(redis_connection)
def initialize(self, r):
commands = r.command()
uppercase_commands = []
for cmd in commands:
if any(x.isupper() for x in cmd):
uppercase_commands.append(cmd)
for cmd in uppercase_commands:
commands[cmd.lower()] = commands.pop(cmd)
self.commands = commands
# As soon as this PR is merged into Redis, we should reimplement
# our logic to use COMMAND INFO changes to determine the key positions
# https://github.com/redis/redis/pull/8324
def get_keys(self, redis_conn, *args):
"""
Get the keys from the passed command.
NOTE: Due to a bug in redis<7.0, this function does not work properly
for EVAL or EVALSHA when the `numkeys` arg is 0.
- issue: https://github.com/redis/redis/issues/9493
- fix: https://github.com/redis/redis/pull/9733
So, don't use this function with EVAL or EVALSHA.
"""
if len(args) < 2:
# The command has no keys in it
return None
cmd_name = args[0].lower()
if cmd_name not in self.commands:
# try to split the command name and to take only the main command,
# e.g. 'memory' for 'memory usage'
cmd_name_split = cmd_name.split()
cmd_name = cmd_name_split[0]
if cmd_name in self.commands:
# save the splitted command to args
args = cmd_name_split + list(args[1:])
else:
# We'll try to reinitialize the commands cache, if the engine
# version has changed, the commands may not be current
self.initialize(redis_conn)
if cmd_name not in self.commands:
raise RedisError(
f"{cmd_name.upper()} command doesn't exist in Redis commands"
)
command = self.commands.get(cmd_name)
if "movablekeys" in command["flags"]:
keys = self._get_moveable_keys(redis_conn, *args)
elif "pubsub" in command["flags"] or command["name"] == "pubsub":
keys = self._get_pubsub_keys(*args)
else:
if (
command["step_count"] == 0
and command["first_key_pos"] == 0
and command["last_key_pos"] == 0
):
is_subcmd = False
if "subcommands" in command:
subcmd_name = f"{cmd_name}|{args[1].lower()}"
for subcmd in command["subcommands"]:
if str_if_bytes(subcmd[0]) == subcmd_name:
command = self.parse_subcommand(subcmd)
is_subcmd = True
# The command doesn't have keys in it
if not is_subcmd:
return None
last_key_pos = command["last_key_pos"]
if last_key_pos < 0:
last_key_pos = len(args) - abs(last_key_pos)
keys_pos = list(
range(command["first_key_pos"], last_key_pos + 1, command["step_count"])
)
keys = [args[pos] for pos in keys_pos]
return keys
def _get_moveable_keys(self, redis_conn, *args):
"""
NOTE: Due to a bug in redis<7.0, this function does not work properly
for EVAL or EVALSHA when the `numkeys` arg is 0.
- issue: https://github.com/redis/redis/issues/9493
- fix: https://github.com/redis/redis/pull/9733
So, don't use this function with EVAL or EVALSHA.
"""
# The command name should be splitted into separate arguments,
# e.g. 'MEMORY USAGE' will be splitted into ['MEMORY', 'USAGE']
pieces = args[0].split() + list(args[1:])
try:
keys = redis_conn.execute_command("COMMAND GETKEYS", *pieces)
except ResponseError as e:
message = e.__str__()
if (
"Invalid arguments" in message
or "The command has no key arguments" in message
):
return None
else:
raise e
return keys
class AsyncCommandsParser(AbstractCommandsParser):
"""
Parses Redis commands to get command keys.
COMMAND output is used to determine key locations.
Commands that do not have a predefined key location are flagged with 'movablekeys',
and these commands' keys are determined by the command 'COMMAND GETKEYS'.
NOTE: Due to a bug in redis<7.0, this does not work properly
for EVAL or EVALSHA when the `numkeys` arg is 0.
- issue: https://github.com/redis/redis/issues/9493
- fix: https://github.com/redis/redis/pull/9733
So, don't use this with EVAL or EVALSHA.
"""
__slots__ = ("commands", "node")
def __init__(self) -> None:
self.commands: Dict[str, Union[int, Dict[str, Any]]] = {}
async def initialize(self, node: Optional["ClusterNode"] = None) -> None:
if node:
self.node = node
commands = await self.node.execute_command("COMMAND")
self.commands = {cmd.lower(): command for cmd, command in commands.items()}
# As soon as this PR is merged into Redis, we should reimplement
# our logic to use COMMAND INFO changes to determine the key positions
# https://github.com/redis/redis/pull/8324
async def get_keys(self, *args: Any) -> Optional[Tuple[str, ...]]:
"""
Get the keys from the passed command.
NOTE: Due to a bug in redis<7.0, this function does not work properly
for EVAL or EVALSHA when the `numkeys` arg is 0.
- issue: https://github.com/redis/redis/issues/9493
- fix: https://github.com/redis/redis/pull/9733
So, don't use this function with EVAL or EVALSHA.
"""
if len(args) < 2:
# The command has no keys in it
return None
cmd_name = args[0].lower()
if cmd_name not in self.commands:
# try to split the command name and to take only the main command,
# e.g. 'memory' for 'memory usage'
cmd_name_split = cmd_name.split()
cmd_name = cmd_name_split[0]
if cmd_name in self.commands:
# save the splitted command to args
args = cmd_name_split + list(args[1:])
else:
# We'll try to reinitialize the commands cache, if the engine
# version has changed, the commands may not be current
await self.initialize()
if cmd_name not in self.commands:
raise RedisError(
f"{cmd_name.upper()} command doesn't exist in Redis commands"
)
command = self.commands.get(cmd_name)
if "movablekeys" in command["flags"]:
keys = await self._get_moveable_keys(*args)
elif "pubsub" in command["flags"] or command["name"] == "pubsub":
keys = self._get_pubsub_keys(*args)
else:
if (
command["step_count"] == 0
and command["first_key_pos"] == 0
and command["last_key_pos"] == 0
):
is_subcmd = False
if "subcommands" in command:
subcmd_name = f"{cmd_name}|{args[1].lower()}"
for subcmd in command["subcommands"]:
if str_if_bytes(subcmd[0]) == subcmd_name:
command = self.parse_subcommand(subcmd)
is_subcmd = True
# The command doesn't have keys in it
if not is_subcmd:
return None
last_key_pos = command["last_key_pos"]
if last_key_pos < 0:
last_key_pos = len(args) - abs(last_key_pos)
keys_pos = list(
range(command["first_key_pos"], last_key_pos + 1, command["step_count"])
)
keys = [args[pos] for pos in keys_pos]
return keys
async def _get_moveable_keys(self, *args: Any) -> Optional[Tuple[str, ...]]:
try:
keys = await self.node.execute_command("COMMAND GETKEYS", *args)
except ResponseError as e:
message = e.__str__()
if (
"Invalid arguments" in message
or "The command has no key arguments" in message
):
return None
else:
raise e
return keys

View File

@@ -0,0 +1,44 @@
from ..exceptions import DataError
class Encoder:
"Encode strings to bytes-like and decode bytes-like to strings"
__slots__ = "encoding", "encoding_errors", "decode_responses"
def __init__(self, encoding, encoding_errors, decode_responses):
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
def encode(self, value):
"Return a bytestring or bytes-like representation of the value"
if isinstance(value, (bytes, memoryview)):
return value
elif isinstance(value, bool):
# special case bool since it is a subclass of int
raise DataError(
"Invalid input of type: 'bool'. Convert to a "
"bytes, string, int or float first."
)
elif isinstance(value, (int, float)):
value = repr(value).encode()
elif not isinstance(value, str):
# a value we don't know how to deal with. throw an error
typename = type(value).__name__
raise DataError(
f"Invalid input of type: '{typename}'. "
f"Convert to a bytes, string, int or float first."
)
if isinstance(value, str):
value = value.encode(self.encoding, self.encoding_errors)
return value
def decode(self, value, force=False):
"Return a unicode string from the bytes-like representation"
if self.decode_responses or force:
if isinstance(value, memoryview):
value = value.tobytes()
if isinstance(value, bytes):
value = value.decode(self.encoding, self.encoding_errors)
return value

View File

@@ -0,0 +1,883 @@
import datetime
from redis.utils import str_if_bytes
def timestamp_to_datetime(response):
"Converts a unix timestamp to a Python datetime object"
if not response:
return None
try:
response = int(response)
except ValueError:
return None
return datetime.datetime.fromtimestamp(response)
def parse_debug_object(response):
"Parse the results of Redis's DEBUG OBJECT command into a Python dict"
# The 'type' of the object is the first item in the response, but isn't
# prefixed with a name
response = str_if_bytes(response)
response = "type:" + response
response = dict(kv.split(":") for kv in response.split())
# parse some expected int values from the string response
# note: this cmd isn't spec'd so these may not appear in all redis versions
int_fields = ("refcount", "serializedlength", "lru", "lru_seconds_idle")
for field in int_fields:
if field in response:
response[field] = int(response[field])
return response
def parse_info(response):
"""Parse the result of Redis's INFO command into a Python dict"""
info = {}
response = str_if_bytes(response)
def get_value(value):
if "," not in value and "=" not in value:
try:
if "." in value:
return float(value)
else:
return int(value)
except ValueError:
return value
elif "=" not in value:
return [get_value(v) for v in value.split(",") if v]
else:
sub_dict = {}
for item in value.split(","):
if not item:
continue
if "=" in item:
k, v = item.rsplit("=", 1)
sub_dict[k] = get_value(v)
else:
sub_dict[item] = True
return sub_dict
for line in response.splitlines():
if line and not line.startswith("#"):
if line.find(":") != -1:
# Split, the info fields keys and values.
# Note that the value may contain ':'. but the 'host:'
# pseudo-command is the only case where the key contains ':'
key, value = line.split(":", 1)
if key == "cmdstat_host":
key, value = line.rsplit(":", 1)
if key == "module":
# Hardcode a list for key 'modules' since there could be
# multiple lines that started with 'module'
info.setdefault("modules", []).append(get_value(value))
else:
info[key] = get_value(value)
else:
# if the line isn't splittable, append it to the "__raw__" key
info.setdefault("__raw__", []).append(line)
return info
def parse_memory_stats(response, **kwargs):
"""Parse the results of MEMORY STATS"""
stats = pairs_to_dict(response, decode_keys=True, decode_string_values=True)
for key, value in stats.items():
if key.startswith("db.") and isinstance(value, list):
stats[key] = pairs_to_dict(
value, decode_keys=True, decode_string_values=True
)
return stats
SENTINEL_STATE_TYPES = {
"can-failover-its-master": int,
"config-epoch": int,
"down-after-milliseconds": int,
"failover-timeout": int,
"info-refresh": int,
"last-hello-message": int,
"last-ok-ping-reply": int,
"last-ping-reply": int,
"last-ping-sent": int,
"master-link-down-time": int,
"master-port": int,
"num-other-sentinels": int,
"num-slaves": int,
"o-down-time": int,
"pending-commands": int,
"parallel-syncs": int,
"port": int,
"quorum": int,
"role-reported-time": int,
"s-down-time": int,
"slave-priority": int,
"slave-repl-offset": int,
"voted-leader-epoch": int,
}
def parse_sentinel_state(item):
result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)
flags = set(result["flags"].split(","))
for name, flag in (
("is_master", "master"),
("is_slave", "slave"),
("is_sdown", "s_down"),
("is_odown", "o_down"),
("is_sentinel", "sentinel"),
("is_disconnected", "disconnected"),
("is_master_down", "master_down"),
):
result[name] = flag in flags
return result
def parse_sentinel_master(response):
return parse_sentinel_state(map(str_if_bytes, response))
def parse_sentinel_state_resp3(response):
result = {}
for key in response:
try:
value = SENTINEL_STATE_TYPES[key](str_if_bytes(response[key]))
result[str_if_bytes(key)] = value
except Exception:
result[str_if_bytes(key)] = response[str_if_bytes(key)]
flags = set(result["flags"].split(","))
result["flags"] = flags
return result
def parse_sentinel_masters(response):
result = {}
for item in response:
state = parse_sentinel_state(map(str_if_bytes, item))
result[state["name"]] = state
return result
def parse_sentinel_masters_resp3(response):
return [parse_sentinel_state(master) for master in response]
def parse_sentinel_slaves_and_sentinels(response):
return [parse_sentinel_state(map(str_if_bytes, item)) for item in response]
def parse_sentinel_slaves_and_sentinels_resp3(response):
return [parse_sentinel_state_resp3(item) for item in response]
def parse_sentinel_get_master(response):
return response and (response[0], int(response[1])) or None
def pairs_to_dict(response, decode_keys=False, decode_string_values=False):
"""Create a dict given a list of key/value pairs"""
if response is None:
return {}
if decode_keys or decode_string_values:
# the iter form is faster, but I don't know how to make that work
# with a str_if_bytes() map
keys = response[::2]
if decode_keys:
keys = map(str_if_bytes, keys)
values = response[1::2]
if decode_string_values:
values = map(str_if_bytes, values)
return dict(zip(keys, values))
else:
it = iter(response)
return dict(zip(it, it))
def pairs_to_dict_typed(response, type_info):
it = iter(response)
result = {}
for key, value in zip(it, it):
if key in type_info:
try:
value = type_info[key](value)
except Exception:
# if for some reason the value can't be coerced, just use
# the string value
pass
result[key] = value
return result
def zset_score_pairs(response, **options):
"""
If ``withscores`` is specified in the options, return the response as
a list of (value, score) pairs
"""
if not response or not options.get("withscores"):
return response
score_cast_func = options.get("score_cast_func", float)
it = iter(response)
return list(zip(it, map(score_cast_func, it)))
def sort_return_tuples(response, **options):
"""
If ``groups`` is specified, return the response as a list of
n-element tuples with n being the value found in options['groups']
"""
if not response or not options.get("groups"):
return response
n = options["groups"]
return list(zip(*[response[i::n] for i in range(n)]))
def parse_stream_list(response):
if response is None:
return None
data = []
for r in response:
if r is not None:
data.append((r[0], pairs_to_dict(r[1])))
else:
data.append((None, None))
return data
def pairs_to_dict_with_str_keys(response):
return pairs_to_dict(response, decode_keys=True)
def parse_list_of_dicts(response):
return list(map(pairs_to_dict_with_str_keys, response))
def parse_xclaim(response, **options):
if options.get("parse_justid", False):
return response
return parse_stream_list(response)
def parse_xautoclaim(response, **options):
if options.get("parse_justid", False):
return response[1]
response[1] = parse_stream_list(response[1])
return response
def parse_xinfo_stream(response, **options):
if isinstance(response, list):
data = pairs_to_dict(response, decode_keys=True)
else:
data = {str_if_bytes(k): v for k, v in response.items()}
if not options.get("full", False):
first = data.get("first-entry")
if first is not None and first[0] is not None:
data["first-entry"] = (first[0], pairs_to_dict(first[1]))
last = data["last-entry"]
if last is not None and last[0] is not None:
data["last-entry"] = (last[0], pairs_to_dict(last[1]))
else:
data["entries"] = {_id: pairs_to_dict(entry) for _id, entry in data["entries"]}
if len(data["groups"]) > 0 and isinstance(data["groups"][0], list):
data["groups"] = [
pairs_to_dict(group, decode_keys=True) for group in data["groups"]
]
for g in data["groups"]:
if g["consumers"] and g["consumers"][0] is not None:
g["consumers"] = [
pairs_to_dict(c, decode_keys=True) for c in g["consumers"]
]
else:
data["groups"] = [
{str_if_bytes(k): v for k, v in group.items()}
for group in data["groups"]
]
return data
def parse_xread(response):
if response is None:
return []
return [[r[0], parse_stream_list(r[1])] for r in response]
def parse_xread_resp3(response):
if response is None:
return {}
return {key: [parse_stream_list(value)] for key, value in response.items()}
def parse_xpending(response, **options):
if options.get("parse_detail", False):
return parse_xpending_range(response)
consumers = [{"name": n, "pending": int(p)} for n, p in response[3] or []]
return {
"pending": response[0],
"min": response[1],
"max": response[2],
"consumers": consumers,
}
def parse_xpending_range(response):
k = ("message_id", "consumer", "time_since_delivered", "times_delivered")
return [dict(zip(k, r)) for r in response]
def float_or_none(response):
if response is None:
return None
return float(response)
def bool_ok(response, **options):
return str_if_bytes(response) == "OK"
def parse_zadd(response, **options):
if response is None:
return None
if options.get("as_score"):
return float(response)
return int(response)
def parse_client_list(response, **options):
clients = []
for c in str_if_bytes(response).splitlines():
# Values might contain '='
clients.append(dict(pair.split("=", 1) for pair in c.split(" ")))
return clients
def parse_config_get(response, **options):
response = [str_if_bytes(i) if i is not None else None for i in response]
return response and pairs_to_dict(response) or {}
def parse_scan(response, **options):
cursor, r = response
return int(cursor), r
def parse_hscan(response, **options):
cursor, r = response
no_values = options.get("no_values", False)
if no_values:
payload = r or []
else:
payload = r and pairs_to_dict(r) or {}
return int(cursor), payload
def parse_zscan(response, **options):
score_cast_func = options.get("score_cast_func", float)
cursor, r = response
it = iter(r)
return int(cursor), list(zip(it, map(score_cast_func, it)))
def parse_zmscore(response, **options):
# zmscore: list of scores (double precision floating point number) or nil
return [float(score) if score is not None else None for score in response]
def parse_slowlog_get(response, **options):
space = " " if options.get("decode_responses", False) else b" "
def parse_item(item):
result = {"id": item[0], "start_time": int(item[1]), "duration": int(item[2])}
# Redis Enterprise injects another entry at index [3], which has
# the complexity info (i.e. the value N in case the command has
# an O(N) complexity) instead of the command.
if isinstance(item[3], list):
result["command"] = space.join(item[3])
# These fields are optional, depends on environment.
if len(item) >= 6:
result["client_address"] = item[4]
result["client_name"] = item[5]
else:
result["complexity"] = item[3]
result["command"] = space.join(item[4])
# These fields are optional, depends on environment.
if len(item) >= 7:
result["client_address"] = item[5]
result["client_name"] = item[6]
return result
return [parse_item(item) for item in response]
def parse_stralgo(response, **options):
"""
Parse the response from `STRALGO` command.
Without modifiers the returned value is string.
When LEN is given the command returns the length of the result
(i.e integer).
When IDX is given the command returns a dictionary with the LCS
length and all the ranges in both the strings, start and end
offset for each string, where there are matches.
When WITHMATCHLEN is given, each array representing a match will
also have the length of the match at the beginning of the array.
"""
if options.get("len", False):
return int(response)
if options.get("idx", False):
if options.get("withmatchlen", False):
matches = [
[(int(match[-1]))] + list(map(tuple, match[:-1]))
for match in response[1]
]
else:
matches = [list(map(tuple, match)) for match in response[1]]
return {
str_if_bytes(response[0]): matches,
str_if_bytes(response[2]): int(response[3]),
}
return str_if_bytes(response)
def parse_cluster_info(response, **options):
response = str_if_bytes(response)
return dict(line.split(":") for line in response.splitlines() if line)
def _parse_node_line(line):
line_items = line.split(" ")
node_id, addr, flags, master_id, ping, pong, epoch, connected = line.split(" ")[:8]
ip = addr.split("@")[0]
hostname = addr.split("@")[1].split(",")[1] if "@" in addr and "," in addr else ""
node_dict = {
"node_id": node_id,
"hostname": hostname,
"flags": flags,
"master_id": master_id,
"last_ping_sent": ping,
"last_pong_rcvd": pong,
"epoch": epoch,
"slots": [],
"migrations": [],
"connected": True if connected == "connected" else False,
}
if len(line_items) >= 9:
slots, migrations = _parse_slots(line_items[8:])
node_dict["slots"], node_dict["migrations"] = slots, migrations
return ip, node_dict
def _parse_slots(slot_ranges):
slots, migrations = [], []
for s_range in slot_ranges:
if "->-" in s_range:
slot_id, dst_node_id = s_range[1:-1].split("->-", 1)
migrations.append(
{"slot": slot_id, "node_id": dst_node_id, "state": "migrating"}
)
elif "-<-" in s_range:
slot_id, src_node_id = s_range[1:-1].split("-<-", 1)
migrations.append(
{"slot": slot_id, "node_id": src_node_id, "state": "importing"}
)
else:
s_range = [sl for sl in s_range.split("-")]
slots.append(s_range)
return slots, migrations
def parse_cluster_nodes(response, **options):
"""
@see: https://redis.io/commands/cluster-nodes # string / bytes
@see: https://redis.io/commands/cluster-replicas # list of string / bytes
"""
if isinstance(response, (str, bytes)):
response = response.splitlines()
return dict(_parse_node_line(str_if_bytes(node)) for node in response)
def parse_geosearch_generic(response, **options):
"""
Parse the response of 'GEOSEARCH', GEORADIUS' and 'GEORADIUSBYMEMBER'
commands according to 'withdist', 'withhash' and 'withcoord' labels.
"""
try:
if options["store"] or options["store_dist"]:
# `store` and `store_dist` cant be combined
# with other command arguments.
# relevant to 'GEORADIUS' and 'GEORADIUSBYMEMBER'
return response
except KeyError: # it means the command was sent via execute_command
return response
if not isinstance(response, list):
response_list = [response]
else:
response_list = response
if not options["withdist"] and not options["withcoord"] and not options["withhash"]:
# just a bunch of places
return response_list
cast = {
"withdist": float,
"withcoord": lambda ll: (float(ll[0]), float(ll[1])),
"withhash": int,
}
# zip all output results with each casting function to get
# the properly native Python value.
f = [lambda x: x]
f += [cast[o] for o in ["withdist", "withhash", "withcoord"] if options[o]]
return [list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list]
def parse_command(response, **options):
commands = {}
for command in response:
cmd_dict = {}
cmd_name = str_if_bytes(command[0])
cmd_dict["name"] = cmd_name
cmd_dict["arity"] = int(command[1])
cmd_dict["flags"] = [str_if_bytes(flag) for flag in command[2]]
cmd_dict["first_key_pos"] = command[3]
cmd_dict["last_key_pos"] = command[4]
cmd_dict["step_count"] = command[5]
if len(command) > 7:
cmd_dict["tips"] = command[7]
cmd_dict["key_specifications"] = command[8]
cmd_dict["subcommands"] = command[9]
commands[cmd_name] = cmd_dict
return commands
def parse_command_resp3(response, **options):
commands = {}
for command in response:
cmd_dict = {}
cmd_name = str_if_bytes(command[0])
cmd_dict["name"] = cmd_name
cmd_dict["arity"] = command[1]
cmd_dict["flags"] = {str_if_bytes(flag) for flag in command[2]}
cmd_dict["first_key_pos"] = command[3]
cmd_dict["last_key_pos"] = command[4]
cmd_dict["step_count"] = command[5]
cmd_dict["acl_categories"] = command[6]
if len(command) > 7:
cmd_dict["tips"] = command[7]
cmd_dict["key_specifications"] = command[8]
cmd_dict["subcommands"] = command[9]
commands[cmd_name] = cmd_dict
return commands
def parse_pubsub_numsub(response, **options):
return list(zip(response[0::2], response[1::2]))
def parse_client_kill(response, **options):
if isinstance(response, int):
return response
return str_if_bytes(response) == "OK"
def parse_acl_getuser(response, **options):
if response is None:
return None
if isinstance(response, list):
data = pairs_to_dict(response, decode_keys=True)
else:
data = {str_if_bytes(key): value for key, value in response.items()}
# convert everything but user-defined data in 'keys' to native strings
data["flags"] = list(map(str_if_bytes, data["flags"]))
data["passwords"] = list(map(str_if_bytes, data["passwords"]))
data["commands"] = str_if_bytes(data["commands"])
if isinstance(data["keys"], str) or isinstance(data["keys"], bytes):
data["keys"] = list(str_if_bytes(data["keys"]).split(" "))
if data["keys"] == [""]:
data["keys"] = []
if "channels" in data:
if isinstance(data["channels"], str) or isinstance(data["channels"], bytes):
data["channels"] = list(str_if_bytes(data["channels"]).split(" "))
if data["channels"] == [""]:
data["channels"] = []
if "selectors" in data:
if data["selectors"] != [] and isinstance(data["selectors"][0], list):
data["selectors"] = [
list(map(str_if_bytes, selector)) for selector in data["selectors"]
]
elif data["selectors"] != []:
data["selectors"] = [
{str_if_bytes(k): str_if_bytes(v) for k, v in selector.items()}
for selector in data["selectors"]
]
# split 'commands' into separate 'categories' and 'commands' lists
commands, categories = [], []
for command in data["commands"].split(" "):
categories.append(command) if "@" in command else commands.append(command)
data["commands"] = commands
data["categories"] = categories
data["enabled"] = "on" in data["flags"]
return data
def parse_acl_log(response, **options):
if response is None:
return None
if isinstance(response, list):
data = []
for log in response:
log_data = pairs_to_dict(log, True, True)
client_info = log_data.get("client-info", "")
log_data["client-info"] = parse_client_info(client_info)
# float() is lossy comparing to the "double" in C
log_data["age-seconds"] = float(log_data["age-seconds"])
data.append(log_data)
else:
data = bool_ok(response)
return data
def parse_client_info(value):
"""
Parsing client-info in ACL Log in following format.
"key1=value1 key2=value2 key3=value3"
"""
client_info = {}
for info in str_if_bytes(value).strip().split():
key, value = info.split("=")
client_info[key] = value
# Those fields are defined as int in networking.c
for int_key in {
"id",
"age",
"idle",
"db",
"sub",
"psub",
"multi",
"qbuf",
"qbuf-free",
"obl",
"argv-mem",
"oll",
"omem",
"tot-mem",
}:
if int_key in client_info:
client_info[int_key] = int(client_info[int_key])
return client_info
def parse_set_result(response, **options):
"""
Handle SET result since GET argument is available since Redis 6.2.
Parsing SET result into:
- BOOL
- String when GET argument is used
"""
if options.get("get"):
# Redis will return a getCommand result.
# See `setGenericCommand` in t_string.c
return response
return response and str_if_bytes(response) == "OK"
def string_keys_to_dict(key_string, callback):
return dict.fromkeys(key_string.split(), callback)
_RedisCallbacks = {
**string_keys_to_dict(
"AUTH COPY EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST PSETEX "
"PEXPIRE PEXPIREAT RENAMENX SETEX SETNX SMOVE",
bool,
),
**string_keys_to_dict("HINCRBYFLOAT INCRBYFLOAT", float),
**string_keys_to_dict(
"ASKING FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE "
"RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH",
bool_ok,
),
**string_keys_to_dict("XREAD XREADGROUP", parse_xread),
**string_keys_to_dict(
"GEORADIUS GEORADIUSBYMEMBER GEOSEARCH",
parse_geosearch_generic,
),
**string_keys_to_dict("XRANGE XREVRANGE", parse_stream_list),
"ACL GETUSER": parse_acl_getuser,
"ACL LOAD": bool_ok,
"ACL LOG": parse_acl_log,
"ACL SETUSER": bool_ok,
"ACL SAVE": bool_ok,
"CLIENT INFO": parse_client_info,
"CLIENT KILL": parse_client_kill,
"CLIENT LIST": parse_client_list,
"CLIENT PAUSE": bool_ok,
"CLIENT SETINFO": bool_ok,
"CLIENT SETNAME": bool_ok,
"CLIENT UNBLOCK": bool,
"CLUSTER ADDSLOTS": bool_ok,
"CLUSTER ADDSLOTSRANGE": bool_ok,
"CLUSTER DELSLOTS": bool_ok,
"CLUSTER DELSLOTSRANGE": bool_ok,
"CLUSTER FAILOVER": bool_ok,
"CLUSTER FORGET": bool_ok,
"CLUSTER INFO": parse_cluster_info,
"CLUSTER MEET": bool_ok,
"CLUSTER NODES": parse_cluster_nodes,
"CLUSTER REPLICAS": parse_cluster_nodes,
"CLUSTER REPLICATE": bool_ok,
"CLUSTER RESET": bool_ok,
"CLUSTER SAVECONFIG": bool_ok,
"CLUSTER SET-CONFIG-EPOCH": bool_ok,
"CLUSTER SETSLOT": bool_ok,
"CLUSTER SLAVES": parse_cluster_nodes,
"COMMAND": parse_command,
"CONFIG RESETSTAT": bool_ok,
"CONFIG SET": bool_ok,
"FUNCTION DELETE": bool_ok,
"FUNCTION FLUSH": bool_ok,
"FUNCTION RESTORE": bool_ok,
"GEODIST": float_or_none,
"HSCAN": parse_hscan,
"INFO": parse_info,
"LASTSAVE": timestamp_to_datetime,
"MEMORY PURGE": bool_ok,
"MODULE LOAD": bool,
"MODULE UNLOAD": bool,
"PING": lambda r: str_if_bytes(r) == "PONG",
"PUBSUB NUMSUB": parse_pubsub_numsub,
"PUBSUB SHARDNUMSUB": parse_pubsub_numsub,
"QUIT": bool_ok,
"SET": parse_set_result,
"SCAN": parse_scan,
"SCRIPT EXISTS": lambda r: list(map(bool, r)),
"SCRIPT FLUSH": bool_ok,
"SCRIPT KILL": bool_ok,
"SCRIPT LOAD": str_if_bytes,
"SENTINEL CKQUORUM": bool_ok,
"SENTINEL FAILOVER": bool_ok,
"SENTINEL FLUSHCONFIG": bool_ok,
"SENTINEL GET-MASTER-ADDR-BY-NAME": parse_sentinel_get_master,
"SENTINEL MONITOR": bool_ok,
"SENTINEL RESET": bool_ok,
"SENTINEL REMOVE": bool_ok,
"SENTINEL SET": bool_ok,
"SLOWLOG GET": parse_slowlog_get,
"SLOWLOG RESET": bool_ok,
"SORT": sort_return_tuples,
"SSCAN": parse_scan,
"TIME": lambda x: (int(x[0]), int(x[1])),
"XAUTOCLAIM": parse_xautoclaim,
"XCLAIM": parse_xclaim,
"XGROUP CREATE": bool_ok,
"XGROUP DESTROY": bool,
"XGROUP SETID": bool_ok,
"XINFO STREAM": parse_xinfo_stream,
"XPENDING": parse_xpending,
"ZSCAN": parse_zscan,
}
_RedisCallbacksRESP2 = {
**string_keys_to_dict(
"SDIFF SINTER SMEMBERS SUNION", lambda r: r and set(r) or set()
),
**string_keys_to_dict(
"ZDIFF ZINTER ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZRANK ZREVRANGE "
"ZREVRANGEBYSCORE ZREVRANK ZUNION",
zset_score_pairs,
),
**string_keys_to_dict("ZINCRBY ZSCORE", float_or_none),
**string_keys_to_dict("BGREWRITEAOF BGSAVE", lambda r: True),
**string_keys_to_dict("BLPOP BRPOP", lambda r: r and tuple(r) or None),
**string_keys_to_dict(
"BZPOPMAX BZPOPMIN", lambda r: r and (r[0], r[1], float(r[2])) or None
),
"ACL CAT": lambda r: list(map(str_if_bytes, r)),
"ACL GENPASS": str_if_bytes,
"ACL HELP": lambda r: list(map(str_if_bytes, r)),
"ACL LIST": lambda r: list(map(str_if_bytes, r)),
"ACL USERS": lambda r: list(map(str_if_bytes, r)),
"ACL WHOAMI": str_if_bytes,
"CLIENT GETNAME": str_if_bytes,
"CLIENT TRACKINGINFO": lambda r: list(map(str_if_bytes, r)),
"CLUSTER GETKEYSINSLOT": lambda r: list(map(str_if_bytes, r)),
"COMMAND GETKEYS": lambda r: list(map(str_if_bytes, r)),
"CONFIG GET": parse_config_get,
"DEBUG OBJECT": parse_debug_object,
"GEOHASH": lambda r: list(map(str_if_bytes, r)),
"GEOPOS": lambda r: list(
map(lambda ll: (float(ll[0]), float(ll[1])) if ll is not None else None, r)
),
"HGETALL": lambda r: r and pairs_to_dict(r) or {},
"MEMORY STATS": parse_memory_stats,
"MODULE LIST": lambda r: [pairs_to_dict(m) for m in r],
"RESET": str_if_bytes,
"SENTINEL MASTER": parse_sentinel_master,
"SENTINEL MASTERS": parse_sentinel_masters,
"SENTINEL SENTINELS": parse_sentinel_slaves_and_sentinels,
"SENTINEL SLAVES": parse_sentinel_slaves_and_sentinels,
"STRALGO": parse_stralgo,
"XINFO CONSUMERS": parse_list_of_dicts,
"XINFO GROUPS": parse_list_of_dicts,
"ZADD": parse_zadd,
"ZMSCORE": parse_zmscore,
}
_RedisCallbacksRESP3 = {
**string_keys_to_dict(
"SDIFF SINTER SMEMBERS SUNION", lambda r: r and set(r) or set()
),
**string_keys_to_dict(
"ZRANGE ZINTER ZPOPMAX ZPOPMIN ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE "
"ZUNION HGETALL XREADGROUP",
lambda r, **kwargs: r,
),
**string_keys_to_dict("XREAD XREADGROUP", parse_xread_resp3),
"ACL LOG": lambda r: (
[
{str_if_bytes(key): str_if_bytes(value) for key, value in x.items()}
for x in r
]
if isinstance(r, list)
else bool_ok(r)
),
"COMMAND": parse_command_resp3,
"CONFIG GET": lambda r: {
str_if_bytes(key) if key is not None else None: (
str_if_bytes(value) if value is not None else None
)
for key, value in r.items()
},
"MEMORY STATS": lambda r: {str_if_bytes(key): value for key, value in r.items()},
"SENTINEL MASTER": parse_sentinel_state_resp3,
"SENTINEL MASTERS": parse_sentinel_masters_resp3,
"SENTINEL SENTINELS": parse_sentinel_slaves_and_sentinels_resp3,
"SENTINEL SLAVES": parse_sentinel_slaves_and_sentinels_resp3,
"STRALGO": lambda r, **options: (
{str_if_bytes(key): str_if_bytes(value) for key, value in r.items()}
if isinstance(r, dict)
else str_if_bytes(r)
),
"XINFO CONSUMERS": lambda r: [
{str_if_bytes(key): value for key, value in x.items()} for x in r
],
"XINFO GROUPS": lambda r: [
{str_if_bytes(key): value for key, value in d.items()} for d in r
],
}

View File

@@ -0,0 +1,295 @@
import asyncio
import socket
import sys
from logging import getLogger
from typing import Callable, List, Optional, TypedDict, Union
if sys.version_info.major >= 3 and sys.version_info.minor >= 11:
from asyncio import timeout as async_timeout
else:
from async_timeout import timeout as async_timeout
from ..exceptions import ConnectionError, InvalidResponse, RedisError
from ..typing import EncodableT
from ..utils import HIREDIS_AVAILABLE
from .base import (
AsyncBaseParser,
AsyncPushNotificationsParser,
BaseParser,
PushNotificationsParser,
)
from .socket import (
NONBLOCKING_EXCEPTION_ERROR_NUMBERS,
NONBLOCKING_EXCEPTIONS,
SENTINEL,
SERVER_CLOSED_CONNECTION_ERROR,
)
# Used to signal that hiredis-py does not have enough data to parse.
# Using `False` or `None` is not reliable, given that the parser can
# return `False` or `None` for legitimate reasons from RESP payloads.
NOT_ENOUGH_DATA = object()
class _HiredisReaderArgs(TypedDict, total=False):
protocolError: Callable[[str], Exception]
replyError: Callable[[str], Exception]
encoding: Optional[str]
errors: Optional[str]
class _HiredisParser(BaseParser, PushNotificationsParser):
"Parser class for connections using Hiredis"
def __init__(self, socket_read_size):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not installed")
self.socket_read_size = socket_read_size
self._buffer = bytearray(socket_read_size)
self.pubsub_push_handler_func = self.handle_pubsub_push_response
self.invalidation_push_handler_func = None
self._hiredis_PushNotificationType = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def handle_pubsub_push_response(self, response):
logger = getLogger("push_response")
logger.debug("Push response: " + str(response))
return response
def on_connect(self, connection, **kwargs):
import hiredis
self._sock = connection._sock
self._socket_timeout = connection.socket_timeout
kwargs = {
"protocolError": InvalidResponse,
"replyError": self.parse_error,
"errors": connection.encoder.encoding_errors,
"notEnoughData": NOT_ENOUGH_DATA,
}
if connection.encoder.decode_responses:
kwargs["encoding"] = connection.encoder.encoding
self._reader = hiredis.Reader(**kwargs)
self._next_response = NOT_ENOUGH_DATA
try:
self._hiredis_PushNotificationType = hiredis.PushNotification
except AttributeError:
# hiredis < 3.2
self._hiredis_PushNotificationType = None
def on_disconnect(self):
self._sock = None
self._reader = None
self._next_response = NOT_ENOUGH_DATA
def can_read(self, timeout):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._next_response is NOT_ENOUGH_DATA:
self._next_response = self._reader.gets()
if self._next_response is NOT_ENOUGH_DATA:
return self.read_from_socket(timeout=timeout, raise_on_timeout=False)
return True
def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True):
sock = self._sock
custom_timeout = timeout is not SENTINEL
try:
if custom_timeout:
sock.settimeout(timeout)
bufflen = self._sock.recv_into(self._buffer)
if bufflen == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
self._reader.feed(self._buffer, 0, bufflen)
# data was read from the socket and added to the buffer.
# return True to indicate that data was read.
return True
except socket.timeout:
if raise_on_timeout:
raise TimeoutError("Timeout reading from socket")
return False
except NONBLOCKING_EXCEPTIONS as ex:
# if we're in nonblocking mode and the recv raises a
# blocking error, simply return False indicating that
# there's no data to be read. otherwise raise the
# original exception.
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
raise ConnectionError(f"Error while reading from socket: {ex.args}")
finally:
if custom_timeout:
sock.settimeout(self._socket_timeout)
def read_response(self, disable_decoding=False, push_request=False):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
# _next_response might be cached from a can_read() call
if self._next_response is not NOT_ENOUGH_DATA:
response = self._next_response
self._next_response = NOT_ENOUGH_DATA
if self._hiredis_PushNotificationType is not None and isinstance(
response, self._hiredis_PushNotificationType
):
response = self.handle_push_response(response)
if not push_request:
return self.read_response(
disable_decoding=disable_decoding, push_request=push_request
)
else:
return response
return response
if disable_decoding:
response = self._reader.gets(False)
else:
response = self._reader.gets()
while response is NOT_ENOUGH_DATA:
self.read_from_socket()
if disable_decoding:
response = self._reader.gets(False)
else:
response = self._reader.gets()
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif self._hiredis_PushNotificationType is not None and isinstance(
response, self._hiredis_PushNotificationType
):
response = self.handle_push_response(response)
if not push_request:
return self.read_response(
disable_decoding=disable_decoding, push_request=push_request
)
else:
return response
elif (
isinstance(response, list)
and response
and isinstance(response[0], ConnectionError)
):
raise response[0]
return response
class _AsyncHiredisParser(AsyncBaseParser, AsyncPushNotificationsParser):
"""Async implementation of parser class for connections using Hiredis"""
__slots__ = ("_reader",)
def __init__(self, socket_read_size: int):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not available.")
super().__init__(socket_read_size=socket_read_size)
self._reader = None
self.pubsub_push_handler_func = self.handle_pubsub_push_response
self.invalidation_push_handler_func = None
self._hiredis_PushNotificationType = None
async def handle_pubsub_push_response(self, response):
logger = getLogger("push_response")
logger.debug("Push response: " + str(response))
return response
def on_connect(self, connection):
import hiredis
self._stream = connection._reader
kwargs: _HiredisReaderArgs = {
"protocolError": InvalidResponse,
"replyError": self.parse_error,
"notEnoughData": NOT_ENOUGH_DATA,
}
if connection.encoder.decode_responses:
kwargs["encoding"] = connection.encoder.encoding
kwargs["errors"] = connection.encoder.encoding_errors
self._reader = hiredis.Reader(**kwargs)
self._connected = True
try:
self._hiredis_PushNotificationType = getattr(
hiredis, "PushNotification", None
)
except AttributeError:
# hiredis < 3.2
self._hiredis_PushNotificationType = None
def on_disconnect(self):
self._connected = False
async def can_read_destructive(self):
if not self._connected:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._reader.gets() is not NOT_ENOUGH_DATA:
return True
try:
async with async_timeout(0):
return await self.read_from_socket()
except asyncio.TimeoutError:
return False
async def read_from_socket(self):
buffer = await self._stream.read(self._read_size)
if not buffer or not isinstance(buffer, bytes):
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
self._reader.feed(buffer)
# data was read from the socket and added to the buffer.
# return True to indicate that data was read.
return True
async def read_response(
self, disable_decoding: bool = False, push_request: bool = False
) -> Union[EncodableT, List[EncodableT]]:
# If `on_disconnect()` has been called, prohibit any more reads
# even if they could happen because data might be present.
# We still allow reads in progress to finish
if not self._connected:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
if disable_decoding:
response = self._reader.gets(False)
else:
response = self._reader.gets()
while response is NOT_ENOUGH_DATA:
await self.read_from_socket()
if disable_decoding:
response = self._reader.gets(False)
else:
response = self._reader.gets()
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif self._hiredis_PushNotificationType is not None and isinstance(
response, self._hiredis_PushNotificationType
):
response = await self.handle_push_response(response)
if not push_request:
return await self.read_response(
disable_decoding=disable_decoding, push_request=push_request
)
else:
return response
elif (
isinstance(response, list)
and response
and isinstance(response[0], ConnectionError)
):
raise response[0]
return response

View File

@@ -0,0 +1,132 @@
from typing import Any, Union
from ..exceptions import ConnectionError, InvalidResponse, ResponseError
from ..typing import EncodableT
from .base import _AsyncRESPBase, _RESPBase
from .socket import SERVER_CLOSED_CONNECTION_ERROR
class _RESP2Parser(_RESPBase):
"""RESP2 protocol implementation"""
def read_response(self, disable_decoding=False):
pos = self._buffer.get_pos() if self._buffer else None
try:
result = self._read_response(disable_decoding=disable_decoding)
except BaseException:
if self._buffer:
self._buffer.rewind(pos)
raise
else:
self._buffer.purge()
return result
def _read_response(self, disable_decoding=False):
raw = self._buffer.readline()
if not raw:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = raw[:1], raw[1:]
# server returned an error
if byte == b"-":
response = response.decode("utf-8", errors="replace")
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == b"+":
pass
# int value
elif byte == b":":
return int(response)
# bulk response
elif byte == b"$" and response == b"-1":
return None
elif byte == b"$":
response = self._buffer.read(int(response))
# multi-bulk response
elif byte == b"*" and response == b"-1":
return None
elif byte == b"*":
response = [
self._read_response(disable_decoding=disable_decoding)
for i in range(int(response))
]
else:
raise InvalidResponse(f"Protocol Error: {raw!r}")
if disable_decoding is False:
response = self.encoder.decode(response)
return response
class _AsyncRESP2Parser(_AsyncRESPBase):
"""Async class for the RESP2 protocol"""
async def read_response(self, disable_decoding: bool = False):
if not self._connected:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._chunks:
# augment parsing buffer with previously read data
self._buffer += b"".join(self._chunks)
self._chunks.clear()
self._pos = 0
response = await self._read_response(disable_decoding=disable_decoding)
# Successfully parsing a response allows us to clear our parsing buffer
self._clear()
return response
async def _read_response(
self, disable_decoding: bool = False
) -> Union[EncodableT, ResponseError, None]:
raw = await self._readline()
response: Any
byte, response = raw[:1], raw[1:]
# server returned an error
if byte == b"-":
response = response.decode("utf-8", errors="replace")
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
self._clear() # Successful parse
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == b"+":
pass
# int value
elif byte == b":":
return int(response)
# bulk response
elif byte == b"$" and response == b"-1":
return None
elif byte == b"$":
response = await self._read(int(response))
# multi-bulk response
elif byte == b"*" and response == b"-1":
return None
elif byte == b"*":
response = [
(await self._read_response(disable_decoding))
for _ in range(int(response)) # noqa
]
else:
raise InvalidResponse(f"Protocol Error: {raw!r}")
if disable_decoding is False:
response = self.encoder.decode(response)
return response

View File

@@ -0,0 +1,257 @@
from logging import getLogger
from typing import Any, Union
from ..exceptions import ConnectionError, InvalidResponse, ResponseError
from ..typing import EncodableT
from .base import (
AsyncPushNotificationsParser,
PushNotificationsParser,
_AsyncRESPBase,
_RESPBase,
)
from .socket import SERVER_CLOSED_CONNECTION_ERROR
class _RESP3Parser(_RESPBase, PushNotificationsParser):
"""RESP3 protocol implementation"""
def __init__(self, socket_read_size):
super().__init__(socket_read_size)
self.pubsub_push_handler_func = self.handle_pubsub_push_response
self.invalidation_push_handler_func = None
def handle_pubsub_push_response(self, response):
logger = getLogger("push_response")
logger.debug("Push response: " + str(response))
return response
def read_response(self, disable_decoding=False, push_request=False):
pos = self._buffer.get_pos() if self._buffer else None
try:
result = self._read_response(
disable_decoding=disable_decoding, push_request=push_request
)
except BaseException:
if self._buffer:
self._buffer.rewind(pos)
raise
else:
self._buffer.purge()
return result
def _read_response(self, disable_decoding=False, push_request=False):
raw = self._buffer.readline()
if not raw:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = raw[:1], raw[1:]
# server returned an error
if byte in (b"-", b"!"):
if byte == b"!":
response = self._buffer.read(int(response))
response = response.decode("utf-8", errors="replace")
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == b"+":
pass
# null value
elif byte == b"_":
return None
# int and big int values
elif byte in (b":", b"("):
return int(response)
# double value
elif byte == b",":
return float(response)
# bool value
elif byte == b"#":
return response == b"t"
# bulk response
elif byte == b"$":
response = self._buffer.read(int(response))
# verbatim string response
elif byte == b"=":
response = self._buffer.read(int(response))[4:]
# array response
elif byte == b"*":
response = [
self._read_response(disable_decoding=disable_decoding)
for _ in range(int(response))
]
# set response
elif byte == b"~":
# redis can return unhashable types (like dict) in a set,
# so we return sets as list, all the time, for predictability
response = [
self._read_response(disable_decoding=disable_decoding)
for _ in range(int(response))
]
# map response
elif byte == b"%":
# We cannot use a dict-comprehension to parse stream.
# Evaluation order of key:val expression in dict comprehension only
# became defined to be left-right in version 3.8
resp_dict = {}
for _ in range(int(response)):
key = self._read_response(disable_decoding=disable_decoding)
resp_dict[key] = self._read_response(
disable_decoding=disable_decoding, push_request=push_request
)
response = resp_dict
# push response
elif byte == b">":
response = [
self._read_response(
disable_decoding=disable_decoding, push_request=push_request
)
for _ in range(int(response))
]
response = self.handle_push_response(response)
if not push_request:
return self._read_response(
disable_decoding=disable_decoding, push_request=push_request
)
else:
return response
else:
raise InvalidResponse(f"Protocol Error: {raw!r}")
if isinstance(response, bytes) and disable_decoding is False:
response = self.encoder.decode(response)
return response
class _AsyncRESP3Parser(_AsyncRESPBase, AsyncPushNotificationsParser):
def __init__(self, socket_read_size):
super().__init__(socket_read_size)
self.pubsub_push_handler_func = self.handle_pubsub_push_response
self.invalidation_push_handler_func = None
async def handle_pubsub_push_response(self, response):
logger = getLogger("push_response")
logger.debug("Push response: " + str(response))
return response
async def read_response(
self, disable_decoding: bool = False, push_request: bool = False
):
if self._chunks:
# augment parsing buffer with previously read data
self._buffer += b"".join(self._chunks)
self._chunks.clear()
self._pos = 0
response = await self._read_response(
disable_decoding=disable_decoding, push_request=push_request
)
# Successfully parsing a response allows us to clear our parsing buffer
self._clear()
return response
async def _read_response(
self, disable_decoding: bool = False, push_request: bool = False
) -> Union[EncodableT, ResponseError, None]:
if not self._stream or not self.encoder:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
raw = await self._readline()
response: Any
byte, response = raw[:1], raw[1:]
# if byte not in (b"-", b"+", b":", b"$", b"*"):
# raise InvalidResponse(f"Protocol Error: {raw!r}")
# server returned an error
if byte in (b"-", b"!"):
if byte == b"!":
response = await self._read(int(response))
response = response.decode("utf-8", errors="replace")
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
self._clear() # Successful parse
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == b"+":
pass
# null value
elif byte == b"_":
return None
# int and big int values
elif byte in (b":", b"("):
return int(response)
# double value
elif byte == b",":
return float(response)
# bool value
elif byte == b"#":
return response == b"t"
# bulk response
elif byte == b"$":
response = await self._read(int(response))
# verbatim string response
elif byte == b"=":
response = (await self._read(int(response)))[4:]
# array response
elif byte == b"*":
response = [
(await self._read_response(disable_decoding=disable_decoding))
for _ in range(int(response))
]
# set response
elif byte == b"~":
# redis can return unhashable types (like dict) in a set,
# so we always convert to a list, to have predictable return types
response = [
(await self._read_response(disable_decoding=disable_decoding))
for _ in range(int(response))
]
# map response
elif byte == b"%":
# We cannot use a dict-comprehension to parse stream.
# Evaluation order of key:val expression in dict comprehension only
# became defined to be left-right in version 3.8
resp_dict = {}
for _ in range(int(response)):
key = await self._read_response(disable_decoding=disable_decoding)
resp_dict[key] = await self._read_response(
disable_decoding=disable_decoding, push_request=push_request
)
response = resp_dict
# push response
elif byte == b">":
response = [
(
await self._read_response(
disable_decoding=disable_decoding, push_request=push_request
)
)
for _ in range(int(response))
]
response = await self.handle_push_response(response)
if not push_request:
return await self._read_response(
disable_decoding=disable_decoding, push_request=push_request
)
else:
return response
else:
raise InvalidResponse(f"Protocol Error: {raw!r}")
if isinstance(response, bytes) and disable_decoding is False:
response = self.encoder.decode(response)
return response

View File

@@ -0,0 +1,162 @@
import errno
import io
import socket
from io import SEEK_END
from typing import Optional, Union
from ..exceptions import ConnectionError, TimeoutError
from ..utils import SSL_AVAILABLE
NONBLOCKING_EXCEPTION_ERROR_NUMBERS = {BlockingIOError: errno.EWOULDBLOCK}
if SSL_AVAILABLE:
import ssl
if hasattr(ssl, "SSLWantReadError"):
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantReadError] = 2
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantWriteError] = 2
else:
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLError] = 2
NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys())
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
SENTINEL = object()
SYM_CRLF = b"\r\n"
class SocketBuffer:
def __init__(
self, socket: socket.socket, socket_read_size: int, socket_timeout: float
):
self._sock = socket
self.socket_read_size = socket_read_size
self.socket_timeout = socket_timeout
self._buffer = io.BytesIO()
def unread_bytes(self) -> int:
"""
Remaining unread length of buffer
"""
pos = self._buffer.tell()
end = self._buffer.seek(0, SEEK_END)
self._buffer.seek(pos)
return end - pos
def _read_from_socket(
self,
length: Optional[int] = None,
timeout: Union[float, object] = SENTINEL,
raise_on_timeout: Optional[bool] = True,
) -> bool:
sock = self._sock
socket_read_size = self.socket_read_size
marker = 0
custom_timeout = timeout is not SENTINEL
buf = self._buffer
current_pos = buf.tell()
buf.seek(0, SEEK_END)
if custom_timeout:
sock.settimeout(timeout)
try:
while True:
data = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
marker += data_length
if length is not None and length > marker:
continue
return True
except socket.timeout:
if raise_on_timeout:
raise TimeoutError("Timeout reading from socket")
return False
except NONBLOCKING_EXCEPTIONS as ex:
# if we're in nonblocking mode and the recv raises a
# blocking error, simply return False indicating that
# there's no data to be read. otherwise raise the
# original exception.
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
raise ConnectionError(f"Error while reading from socket: {ex.args}")
finally:
buf.seek(current_pos)
if custom_timeout:
sock.settimeout(self.socket_timeout)
def can_read(self, timeout: float) -> bool:
return bool(self.unread_bytes()) or self._read_from_socket(
timeout=timeout, raise_on_timeout=False
)
def read(self, length: int) -> bytes:
length = length + 2 # make sure to read the \r\n terminator
# BufferIO will return less than requested if buffer is short
data = self._buffer.read(length)
missing = length - len(data)
if missing:
# fill up the buffer and read the remainder
self._read_from_socket(missing)
data += self._buffer.read(missing)
return data[:-2]
def readline(self) -> bytes:
buf = self._buffer
data = buf.readline()
while not data.endswith(SYM_CRLF):
# there's more data in the socket that we need
self._read_from_socket()
data += buf.readline()
return data[:-2]
def get_pos(self) -> int:
"""
Get current read position
"""
return self._buffer.tell()
def rewind(self, pos: int) -> None:
"""
Rewind the buffer to a specific position, to re-start reading
"""
self._buffer.seek(pos)
def purge(self) -> None:
"""
After a successful read, purge the read part of buffer
"""
unread = self.unread_bytes()
# Only if we have read all of the buffer do we truncate, to
# reduce the amount of memory thrashing. This heuristic
# can be changed or removed later.
if unread > 0:
return
if unread > 0:
# move unread data to the front
view = self._buffer.getbuffer()
view[:unread] = view[-unread:]
self._buffer.truncate(unread)
self._buffer.seek(0)
def close(self) -> None:
try:
self._buffer.close()
except Exception:
# issue #633 suggests the purge/close somehow raised a
# BadFileDescriptor error. Perhaps the client ran out of
# memory or something else? It's probably OK to ignore
# any error being raised from purge/close since we're
# removing the reference to the instance below.
pass
self._buffer = None
self._sock = None

View File

@@ -0,0 +1,64 @@
from redis.asyncio.client import Redis, StrictRedis
from redis.asyncio.cluster import RedisCluster
from redis.asyncio.connection import (
BlockingConnectionPool,
Connection,
ConnectionPool,
SSLConnection,
UnixDomainSocketConnection,
)
from redis.asyncio.sentinel import (
Sentinel,
SentinelConnectionPool,
SentinelManagedConnection,
SentinelManagedSSLConnection,
)
from redis.asyncio.utils import from_url
from redis.backoff import default_backoff
from redis.exceptions import (
AuthenticationError,
AuthenticationWrongNumberOfArgsError,
BusyLoadingError,
ChildDeadlockedError,
ConnectionError,
DataError,
InvalidResponse,
OutOfMemoryError,
PubSubError,
ReadOnlyError,
RedisError,
ResponseError,
TimeoutError,
WatchError,
)
__all__ = [
"AuthenticationError",
"AuthenticationWrongNumberOfArgsError",
"BlockingConnectionPool",
"BusyLoadingError",
"ChildDeadlockedError",
"Connection",
"ConnectionError",
"ConnectionPool",
"DataError",
"from_url",
"default_backoff",
"InvalidResponse",
"PubSubError",
"OutOfMemoryError",
"ReadOnlyError",
"Redis",
"RedisCluster",
"RedisError",
"ResponseError",
"Sentinel",
"SentinelConnectionPool",
"SentinelManagedConnection",
"SentinelManagedSSLConnection",
"SSLConnection",
"StrictRedis",
"TimeoutError",
"UnixDomainSocketConnection",
"WatchError",
]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,334 @@
import asyncio
import logging
import threading
import uuid
from types import SimpleNamespace
from typing import TYPE_CHECKING, Awaitable, Optional, Union
from redis.exceptions import LockError, LockNotOwnedError
from redis.typing import Number
if TYPE_CHECKING:
from redis.asyncio import Redis, RedisCluster
logger = logging.getLogger(__name__)
class Lock:
"""
A shared, distributed Lock. Using Redis for locking allows the Lock
to be shared across processes and/or machines.
It's left to the user to resolve deadlock issues and make sure
multiple clients play nicely together.
"""
lua_release = None
lua_extend = None
lua_reacquire = None
# KEYS[1] - lock name
# ARGV[1] - token
# return 1 if the lock was released, otherwise 0
LUA_RELEASE_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token or token ~= ARGV[1] then
return 0
end
redis.call('del', KEYS[1])
return 1
"""
# KEYS[1] - lock name
# ARGV[1] - token
# ARGV[2] - additional milliseconds
# ARGV[3] - "0" if the additional time should be added to the lock's
# existing ttl or "1" if the existing ttl should be replaced
# return 1 if the locks time was extended, otherwise 0
LUA_EXTEND_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token or token ~= ARGV[1] then
return 0
end
local expiration = redis.call('pttl', KEYS[1])
if not expiration then
expiration = 0
end
if expiration < 0 then
return 0
end
local newttl = ARGV[2]
if ARGV[3] == "0" then
newttl = ARGV[2] + expiration
end
redis.call('pexpire', KEYS[1], newttl)
return 1
"""
# KEYS[1] - lock name
# ARGV[1] - token
# ARGV[2] - milliseconds
# return 1 if the locks time was reacquired, otherwise 0
LUA_REACQUIRE_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token or token ~= ARGV[1] then
return 0
end
redis.call('pexpire', KEYS[1], ARGV[2])
return 1
"""
def __init__(
self,
redis: Union["Redis", "RedisCluster"],
name: Union[str, bytes, memoryview],
timeout: Optional[float] = None,
sleep: float = 0.1,
blocking: bool = True,
blocking_timeout: Optional[Number] = None,
thread_local: bool = True,
raise_on_release_error: bool = True,
):
"""
Create a new Lock instance named ``name`` using the Redis client
supplied by ``redis``.
``timeout`` indicates a maximum life for the lock in seconds.
By default, it will remain locked until release() is called.
``timeout`` can be specified as a float or integer, both representing
the number of seconds to wait.
``sleep`` indicates the amount of time to sleep in seconds per loop
iteration when the lock is in blocking mode and another client is
currently holding the lock.
``blocking`` indicates whether calling ``acquire`` should block until
the lock has been acquired or to fail immediately, causing ``acquire``
to return False and the lock not being acquired. Defaults to True.
Note this value can be overridden by passing a ``blocking``
argument to ``acquire``.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it's available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
``raise_on_release_error`` indicates whether to raise an exception when
the lock is no longer owned when exiting the context manager. By default,
this is True, meaning an exception will be raised. If False, the warning
will be logged and the exception will be suppressed.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
thread local storage.
"""
self.redis = redis
self.name = name
self.timeout = timeout
self.sleep = sleep
self.blocking = blocking
self.blocking_timeout = blocking_timeout
self.thread_local = bool(thread_local)
self.local = threading.local() if self.thread_local else SimpleNamespace()
self.raise_on_release_error = raise_on_release_error
self.local.token = None
self.register_scripts()
def register_scripts(self):
cls = self.__class__
client = self.redis
if cls.lua_release is None:
cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT)
if cls.lua_extend is None:
cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT)
if cls.lua_reacquire is None:
cls.lua_reacquire = client.register_script(cls.LUA_REACQUIRE_SCRIPT)
async def __aenter__(self):
if await self.acquire():
return self
raise LockError("Unable to acquire lock within the time specified")
async def __aexit__(self, exc_type, exc_value, traceback):
try:
await self.release()
except LockError:
if self.raise_on_release_error:
raise
logger.warning(
"Lock was unlocked or no longer owned when exiting context manager."
)
async def acquire(
self,
blocking: Optional[bool] = None,
blocking_timeout: Optional[Number] = None,
token: Optional[Union[str, bytes]] = None,
):
"""
Use Redis to hold a shared, distributed lock named ``name``.
Returns True once the lock is acquired.
If ``blocking`` is False, always return immediately. If the lock
was acquired, return True, otherwise return False.
``blocking_timeout`` specifies the maximum number of seconds to
wait trying to acquire the lock.
``token`` specifies the token value to be used. If provided, token
must be a bytes object or a string that can be encoded to a bytes
object with the default encoding. If a token isn't specified, a UUID
will be generated.
"""
sleep = self.sleep
if token is None:
token = uuid.uuid1().hex.encode()
else:
try:
encoder = self.redis.connection_pool.get_encoder()
except AttributeError:
# Cluster
encoder = self.redis.get_encoder()
token = encoder.encode(token)
if blocking is None:
blocking = self.blocking
if blocking_timeout is None:
blocking_timeout = self.blocking_timeout
stop_trying_at = None
if blocking_timeout is not None:
stop_trying_at = asyncio.get_running_loop().time() + blocking_timeout
while True:
if await self.do_acquire(token):
self.local.token = token
return True
if not blocking:
return False
next_try_at = asyncio.get_running_loop().time() + sleep
if stop_trying_at is not None and next_try_at > stop_trying_at:
return False
await asyncio.sleep(sleep)
async def do_acquire(self, token: Union[str, bytes]) -> bool:
if self.timeout:
# convert to milliseconds
timeout = int(self.timeout * 1000)
else:
timeout = None
if await self.redis.set(self.name, token, nx=True, px=timeout):
return True
return False
async def locked(self) -> bool:
"""
Returns True if this key is locked by any process, otherwise False.
"""
return await self.redis.get(self.name) is not None
async def owned(self) -> bool:
"""
Returns True if this key is locked by this lock, otherwise False.
"""
stored_token = await self.redis.get(self.name)
# need to always compare bytes to bytes
# TODO: this can be simplified when the context manager is finished
if stored_token and not isinstance(stored_token, bytes):
try:
encoder = self.redis.connection_pool.get_encoder()
except AttributeError:
# Cluster
encoder = self.redis.get_encoder()
stored_token = encoder.encode(stored_token)
return self.local.token is not None and stored_token == self.local.token
def release(self) -> Awaitable[None]:
"""Releases the already acquired lock"""
expected_token = self.local.token
if expected_token is None:
raise LockError(
"Cannot release a lock that's not owned or is already unlocked.",
lock_name=self.name,
)
self.local.token = None
return self.do_release(expected_token)
async def do_release(self, expected_token: bytes) -> None:
if not bool(
await self.lua_release(
keys=[self.name], args=[expected_token], client=self.redis
)
):
raise LockNotOwnedError("Cannot release a lock that's no longer owned")
def extend(
self, additional_time: Number, replace_ttl: bool = False
) -> Awaitable[bool]:
"""
Adds more time to an already acquired lock.
``additional_time`` can be specified as an integer or a float, both
representing the number of seconds to add.
``replace_ttl`` if False (the default), add `additional_time` to
the lock's existing ttl. If True, replace the lock's ttl with
`additional_time`.
"""
if self.local.token is None:
raise LockError("Cannot extend an unlocked lock")
if self.timeout is None:
raise LockError("Cannot extend a lock with no timeout")
return self.do_extend(additional_time, replace_ttl)
async def do_extend(self, additional_time, replace_ttl) -> bool:
additional_time = int(additional_time * 1000)
if not bool(
await self.lua_extend(
keys=[self.name],
args=[self.local.token, additional_time, replace_ttl and "1" or "0"],
client=self.redis,
)
):
raise LockNotOwnedError("Cannot extend a lock that's no longer owned")
return True
def reacquire(self) -> Awaitable[bool]:
"""
Resets a TTL of an already acquired lock back to a timeout value.
"""
if self.local.token is None:
raise LockError("Cannot reacquire an unlocked lock")
if self.timeout is None:
raise LockError("Cannot reacquire a lock with no timeout")
return self.do_reacquire()
async def do_reacquire(self) -> bool:
timeout = int(self.timeout * 1000)
if not bool(
await self.lua_reacquire(
keys=[self.name], args=[self.local.token, timeout], client=self.redis
)
):
raise LockNotOwnedError("Cannot reacquire a lock that's no longer owned")
return True

View File

@@ -0,0 +1,58 @@
from asyncio import sleep
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Tuple, Type, TypeVar
from redis.exceptions import ConnectionError, RedisError, TimeoutError
from redis.retry import AbstractRetry
T = TypeVar("T")
if TYPE_CHECKING:
from redis.backoff import AbstractBackoff
class Retry(AbstractRetry[RedisError]):
__hash__ = AbstractRetry.__hash__
def __init__(
self,
backoff: "AbstractBackoff",
retries: int,
supported_errors: Tuple[Type[RedisError], ...] = (
ConnectionError,
TimeoutError,
),
):
super().__init__(backoff, retries, supported_errors)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Retry):
return NotImplemented
return (
self._backoff == other._backoff
and self._retries == other._retries
and set(self._supported_errors) == set(other._supported_errors)
)
async def call_with_retry(
self, do: Callable[[], Awaitable[T]], fail: Callable[[RedisError], Any]
) -> T:
"""
Execute an operation that might fail and returns its result, or
raise the exception that was thrown depending on the `Backoff` object.
`do`: the operation to call. Expects no argument.
`fail`: the failure handler, expects the last error that was thrown
"""
self._backoff.reset()
failures = 0
while True:
try:
return await do()
except self._supported_errors as error:
failures += 1
await fail(error)
if self._retries >= 0 and failures > self._retries:
raise error
backoff = self._backoff.compute(failures)
if backoff > 0:
await sleep(backoff)

View File

@@ -0,0 +1,404 @@
import asyncio
import random
import weakref
from typing import AsyncIterator, Iterable, Mapping, Optional, Sequence, Tuple, Type
from redis.asyncio.client import Redis
from redis.asyncio.connection import (
Connection,
ConnectionPool,
EncodableT,
SSLConnection,
)
from redis.commands import AsyncSentinelCommands
from redis.exceptions import (
ConnectionError,
ReadOnlyError,
ResponseError,
TimeoutError,
)
class MasterNotFoundError(ConnectionError):
pass
class SlaveNotFoundError(ConnectionError):
pass
class SentinelManagedConnection(Connection):
def __init__(self, **kwargs):
self.connection_pool = kwargs.pop("connection_pool")
super().__init__(**kwargs)
def __repr__(self):
s = f"<{self.__class__.__module__}.{self.__class__.__name__}"
if self.host:
host_info = f",host={self.host},port={self.port}"
s += host_info
return s + ")>"
async def connect_to(self, address):
self.host, self.port = address
await self.connect_check_health(
check_health=self.connection_pool.check_connection,
retry_socket_connect=False,
)
async def _connect_retry(self):
if self._reader:
return # already connected
if self.connection_pool.is_master:
await self.connect_to(await self.connection_pool.get_master_address())
else:
async for slave in self.connection_pool.rotate_slaves():
try:
return await self.connect_to(slave)
except ConnectionError:
continue
raise SlaveNotFoundError # Never be here
async def connect(self):
return await self.retry.call_with_retry(
self._connect_retry,
lambda error: asyncio.sleep(0),
)
async def read_response(
self,
disable_decoding: bool = False,
timeout: Optional[float] = None,
*,
disconnect_on_error: Optional[float] = True,
push_request: Optional[bool] = False,
):
try:
return await super().read_response(
disable_decoding=disable_decoding,
timeout=timeout,
disconnect_on_error=disconnect_on_error,
push_request=push_request,
)
except ReadOnlyError:
if self.connection_pool.is_master:
# When talking to a master, a ReadOnlyError when likely
# indicates that the previous master that we're still connected
# to has been demoted to a slave and there's a new master.
# calling disconnect will force the connection to re-query
# sentinel during the next connect() attempt.
await self.disconnect()
raise ConnectionError("The previous master is now a slave")
raise
class SentinelManagedSSLConnection(SentinelManagedConnection, SSLConnection):
pass
class SentinelConnectionPool(ConnectionPool):
"""
Sentinel backed connection pool.
If ``check_connection`` flag is set to True, SentinelManagedConnection
sends a PING command right after establishing the connection.
"""
def __init__(self, service_name, sentinel_manager, **kwargs):
kwargs["connection_class"] = kwargs.get(
"connection_class",
(
SentinelManagedSSLConnection
if kwargs.pop("ssl", False)
else SentinelManagedConnection
),
)
self.is_master = kwargs.pop("is_master", True)
self.check_connection = kwargs.pop("check_connection", False)
super().__init__(**kwargs)
self.connection_kwargs["connection_pool"] = weakref.proxy(self)
self.service_name = service_name
self.sentinel_manager = sentinel_manager
self.master_address = None
self.slave_rr_counter = None
def __repr__(self):
return (
f"<{self.__class__.__module__}.{self.__class__.__name__}"
f"(service={self.service_name}({self.is_master and 'master' or 'slave'}))>"
)
def reset(self):
super().reset()
self.master_address = None
self.slave_rr_counter = None
def owns_connection(self, connection: Connection):
check = not self.is_master or (
self.is_master and self.master_address == (connection.host, connection.port)
)
return check and super().owns_connection(connection)
async def get_master_address(self):
master_address = await self.sentinel_manager.discover_master(self.service_name)
if self.is_master:
if self.master_address != master_address:
self.master_address = master_address
# disconnect any idle connections so that they reconnect
# to the new master the next time that they are used.
await self.disconnect(inuse_connections=False)
return master_address
async def rotate_slaves(self) -> AsyncIterator:
"""Round-robin slave balancer"""
slaves = await self.sentinel_manager.discover_slaves(self.service_name)
if slaves:
if self.slave_rr_counter is None:
self.slave_rr_counter = random.randint(0, len(slaves) - 1)
for _ in range(len(slaves)):
self.slave_rr_counter = (self.slave_rr_counter + 1) % len(slaves)
slave = slaves[self.slave_rr_counter]
yield slave
# Fallback to the master connection
try:
yield await self.get_master_address()
except MasterNotFoundError:
pass
raise SlaveNotFoundError(f"No slave found for {self.service_name!r}")
class Sentinel(AsyncSentinelCommands):
"""
Redis Sentinel cluster client
>>> from redis.sentinel import Sentinel
>>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)
>>> master = sentinel.master_for('mymaster', socket_timeout=0.1)
>>> await master.set('foo', 'bar')
>>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1)
>>> await slave.get('foo')
b'bar'
``sentinels`` is a list of sentinel nodes. Each node is represented by
a pair (hostname, port).
``min_other_sentinels`` defined a minimum number of peers for a sentinel.
When querying a sentinel, if it doesn't meet this threshold, responses
from that sentinel won't be considered valid.
``sentinel_kwargs`` is a dictionary of connection arguments used when
connecting to sentinel instances. Any argument that can be passed to
a normal Redis connection can be specified here. If ``sentinel_kwargs`` is
not specified, any socket_timeout and socket_keepalive options specified
in ``connection_kwargs`` will be used.
``connection_kwargs`` are keyword arguments that will be used when
establishing a connection to a Redis server.
"""
def __init__(
self,
sentinels,
min_other_sentinels=0,
sentinel_kwargs=None,
force_master_ip=None,
**connection_kwargs,
):
# if sentinel_kwargs isn't defined, use the socket_* options from
# connection_kwargs
if sentinel_kwargs is None:
sentinel_kwargs = {
k: v for k, v in connection_kwargs.items() if k.startswith("socket_")
}
self.sentinel_kwargs = sentinel_kwargs
self.sentinels = [
Redis(host=hostname, port=port, **self.sentinel_kwargs)
for hostname, port in sentinels
]
self.min_other_sentinels = min_other_sentinels
self.connection_kwargs = connection_kwargs
self._force_master_ip = force_master_ip
async def execute_command(self, *args, **kwargs):
"""
Execute Sentinel command in sentinel nodes.
once - If set to True, then execute the resulting command on a single
node at random, rather than across the entire sentinel cluster.
"""
once = bool(kwargs.pop("once", False))
# Check if command is supposed to return the original
# responses instead of boolean value.
return_responses = bool(kwargs.pop("return_responses", False))
if once:
response = await random.choice(self.sentinels).execute_command(
*args, **kwargs
)
if return_responses:
return [response]
else:
return True if response else False
tasks = [
asyncio.Task(sentinel.execute_command(*args, **kwargs))
for sentinel in self.sentinels
]
responses = await asyncio.gather(*tasks)
if return_responses:
return responses
return all(responses)
def __repr__(self):
sentinel_addresses = []
for sentinel in self.sentinels:
sentinel_addresses.append(
f"{sentinel.connection_pool.connection_kwargs['host']}:"
f"{sentinel.connection_pool.connection_kwargs['port']}"
)
return (
f"<{self.__class__}.{self.__class__.__name__}"
f"(sentinels=[{','.join(sentinel_addresses)}])>"
)
def check_master_state(self, state: dict, service_name: str) -> bool:
if not state["is_master"] or state["is_sdown"] or state["is_odown"]:
return False
# Check if our sentinel doesn't see other nodes
if state["num-other-sentinels"] < self.min_other_sentinels:
return False
return True
async def discover_master(self, service_name: str):
"""
Asks sentinel servers for the Redis master's address corresponding
to the service labeled ``service_name``.
Returns a pair (address, port) or raises MasterNotFoundError if no
master is found.
"""
collected_errors = list()
for sentinel_no, sentinel in enumerate(self.sentinels):
try:
masters = await sentinel.sentinel_masters()
except (ConnectionError, TimeoutError) as e:
collected_errors.append(f"{sentinel} - {e!r}")
continue
state = masters.get(service_name)
if state and self.check_master_state(state, service_name):
# Put this sentinel at the top of the list
self.sentinels[0], self.sentinels[sentinel_no] = (
sentinel,
self.sentinels[0],
)
ip = (
self._force_master_ip
if self._force_master_ip is not None
else state["ip"]
)
return ip, state["port"]
error_info = ""
if len(collected_errors) > 0:
error_info = f" : {', '.join(collected_errors)}"
raise MasterNotFoundError(f"No master found for {service_name!r}{error_info}")
def filter_slaves(
self, slaves: Iterable[Mapping]
) -> Sequence[Tuple[EncodableT, EncodableT]]:
"""Remove slaves that are in an ODOWN or SDOWN state"""
slaves_alive = []
for slave in slaves:
if slave["is_odown"] or slave["is_sdown"]:
continue
slaves_alive.append((slave["ip"], slave["port"]))
return slaves_alive
async def discover_slaves(
self, service_name: str
) -> Sequence[Tuple[EncodableT, EncodableT]]:
"""Returns a list of alive slaves for service ``service_name``"""
for sentinel in self.sentinels:
try:
slaves = await sentinel.sentinel_slaves(service_name)
except (ConnectionError, ResponseError, TimeoutError):
continue
slaves = self.filter_slaves(slaves)
if slaves:
return slaves
return []
def master_for(
self,
service_name: str,
redis_class: Type[Redis] = Redis,
connection_pool_class: Type[SentinelConnectionPool] = SentinelConnectionPool,
**kwargs,
):
"""
Returns a redis client instance for the ``service_name`` master.
Sentinel client will detect failover and reconnect Redis clients
automatically.
A :py:class:`~redis.sentinel.SentinelConnectionPool` class is
used to retrieve the master's address before establishing a new
connection.
NOTE: If the master's address has changed, any cached connections to
the old master are closed.
By default clients will be a :py:class:`~redis.Redis` instance.
Specify a different class to the ``redis_class`` argument if you
desire something different.
The ``connection_pool_class`` specifies the connection pool to
use. The :py:class:`~redis.sentinel.SentinelConnectionPool`
will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs["is_master"] = True
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
connection_pool = connection_pool_class(service_name, self, **connection_kwargs)
# The Redis object "owns" the pool
return redis_class.from_pool(connection_pool)
def slave_for(
self,
service_name: str,
redis_class: Type[Redis] = Redis,
connection_pool_class: Type[SentinelConnectionPool] = SentinelConnectionPool,
**kwargs,
):
"""
Returns redis client instance for the ``service_name`` slave(s).
A SentinelConnectionPool class is used to retrieve the slave's
address before establishing a new connection.
By default clients will be a :py:class:`~redis.Redis` instance.
Specify a different class to the ``redis_class`` argument if you
desire something different.
The ``connection_pool_class`` specifies the connection pool to use.
The SentinelConnectionPool will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs["is_master"] = False
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
connection_pool = connection_pool_class(service_name, self, **connection_kwargs)
# The Redis object "owns" the pool
return redis_class.from_pool(connection_pool)

View File

@@ -0,0 +1,28 @@
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from redis.asyncio.client import Pipeline, Redis
def from_url(url, **kwargs):
"""
Returns an active Redis client generated from the given database URL.
Will attempt to extract the database id from the path url fragment, if
none is provided.
"""
from redis.asyncio.client import Redis
return Redis.from_url(url, **kwargs)
class pipeline: # noqa: N801
def __init__(self, redis_obj: "Redis"):
self.p: "Pipeline" = redis_obj.pipeline()
async def __aenter__(self) -> "Pipeline":
return self.p
async def __aexit__(self, exc_type, exc_value, traceback):
await self.p.execute()
del self.p

View File

@@ -0,0 +1,31 @@
from typing import Iterable
class RequestTokenErr(Exception):
"""
Represents an exception during token request.
"""
def __init__(self, *args):
super().__init__(*args)
class InvalidTokenSchemaErr(Exception):
"""
Represents an exception related to invalid token schema.
"""
def __init__(self, missing_fields: Iterable[str] = []):
super().__init__(
"Unexpected token schema. Following fields are missing: "
+ ", ".join(missing_fields)
)
class TokenRenewalErr(Exception):
"""
Represents an exception during token renewal process.
"""
def __init__(self, *args):
super().__init__(*args)

View File

@@ -0,0 +1,28 @@
from abc import ABC, abstractmethod
from redis.auth.token import TokenInterface
"""
This interface is the facade of an identity provider
"""
class IdentityProviderInterface(ABC):
"""
Receive a token from the identity provider.
Receiving a token only works when being authenticated.
"""
@abstractmethod
def request_token(self, force_refresh=False) -> TokenInterface:
pass
class IdentityProviderConfigInterface(ABC):
"""
Configuration class that provides a configured identity provider.
"""
@abstractmethod
def get_provider(self) -> IdentityProviderInterface:
pass

View File

@@ -0,0 +1,130 @@
from abc import ABC, abstractmethod
from datetime import datetime, timezone
from redis.auth.err import InvalidTokenSchemaErr
class TokenInterface(ABC):
@abstractmethod
def is_expired(self) -> bool:
pass
@abstractmethod
def ttl(self) -> float:
pass
@abstractmethod
def try_get(self, key: str) -> str:
pass
@abstractmethod
def get_value(self) -> str:
pass
@abstractmethod
def get_expires_at_ms(self) -> float:
pass
@abstractmethod
def get_received_at_ms(self) -> float:
pass
class TokenResponse:
def __init__(self, token: TokenInterface):
self._token = token
def get_token(self) -> TokenInterface:
return self._token
def get_ttl_ms(self) -> float:
return self._token.get_expires_at_ms() - self._token.get_received_at_ms()
class SimpleToken(TokenInterface):
def __init__(
self, value: str, expires_at_ms: float, received_at_ms: float, claims: dict
) -> None:
self.value = value
self.expires_at = expires_at_ms
self.received_at = received_at_ms
self.claims = claims
def ttl(self) -> float:
if self.expires_at == -1:
return -1
return self.expires_at - (datetime.now(timezone.utc).timestamp() * 1000)
def is_expired(self) -> bool:
if self.expires_at == -1:
return False
return self.ttl() <= 0
def try_get(self, key: str) -> str:
return self.claims.get(key)
def get_value(self) -> str:
return self.value
def get_expires_at_ms(self) -> float:
return self.expires_at
def get_received_at_ms(self) -> float:
return self.received_at
class JWToken(TokenInterface):
REQUIRED_FIELDS = {"exp"}
def __init__(self, token: str):
try:
import jwt
except ImportError as ie:
raise ImportError(
f"The PyJWT library is required for {self.__class__.__name__}.",
) from ie
self._value = token
self._decoded = jwt.decode(
self._value,
options={"verify_signature": False},
algorithms=[jwt.get_unverified_header(self._value).get("alg")],
)
self._validate_token()
def is_expired(self) -> bool:
exp = self._decoded["exp"]
if exp == -1:
return False
return (
self._decoded["exp"] * 1000 <= datetime.now(timezone.utc).timestamp() * 1000
)
def ttl(self) -> float:
exp = self._decoded["exp"]
if exp == -1:
return -1
return (
self._decoded["exp"] * 1000 - datetime.now(timezone.utc).timestamp() * 1000
)
def try_get(self, key: str) -> str:
return self._decoded.get(key)
def get_value(self) -> str:
return self._value
def get_expires_at_ms(self) -> float:
return float(self._decoded["exp"] * 1000)
def get_received_at_ms(self) -> float:
return datetime.now(timezone.utc).timestamp() * 1000
def _validate_token(self):
actual_fields = {x for x in self._decoded.keys()}
if len(self.REQUIRED_FIELDS - actual_fields) != 0:
raise InvalidTokenSchemaErr(self.REQUIRED_FIELDS - actual_fields)

View File

@@ -0,0 +1,370 @@
import asyncio
import logging
import threading
from datetime import datetime, timezone
from time import sleep
from typing import Any, Awaitable, Callable, Union
from redis.auth.err import RequestTokenErr, TokenRenewalErr
from redis.auth.idp import IdentityProviderInterface
from redis.auth.token import TokenResponse
logger = logging.getLogger(__name__)
class CredentialsListener:
"""
Listeners that will be notified on events related to credentials.
Accepts callbacks and awaitable callbacks.
"""
def __init__(self):
self._on_next = None
self._on_error = None
@property
def on_next(self) -> Union[Callable[[Any], None], Awaitable]:
return self._on_next
@on_next.setter
def on_next(self, callback: Union[Callable[[Any], None], Awaitable]) -> None:
self._on_next = callback
@property
def on_error(self) -> Union[Callable[[Exception], None], Awaitable]:
return self._on_error
@on_error.setter
def on_error(self, callback: Union[Callable[[Exception], None], Awaitable]) -> None:
self._on_error = callback
class RetryPolicy:
def __init__(self, max_attempts: int, delay_in_ms: float):
self.max_attempts = max_attempts
self.delay_in_ms = delay_in_ms
def get_max_attempts(self) -> int:
"""
Retry attempts before exception will be thrown.
:return: int
"""
return self.max_attempts
def get_delay_in_ms(self) -> float:
"""
Delay between retries in seconds.
:return: int
"""
return self.delay_in_ms
class TokenManagerConfig:
def __init__(
self,
expiration_refresh_ratio: float,
lower_refresh_bound_millis: int,
token_request_execution_timeout_in_ms: int,
retry_policy: RetryPolicy,
):
self._expiration_refresh_ratio = expiration_refresh_ratio
self._lower_refresh_bound_millis = lower_refresh_bound_millis
self._token_request_execution_timeout_in_ms = (
token_request_execution_timeout_in_ms
)
self._retry_policy = retry_policy
def get_expiration_refresh_ratio(self) -> float:
"""
Represents the ratio of a token's lifetime at which a refresh should be triggered. # noqa: E501
For example, a value of 0.75 means the token should be refreshed
when 75% of its lifetime has elapsed (or when 25% of its lifetime remains).
:return: float
"""
return self._expiration_refresh_ratio
def get_lower_refresh_bound_millis(self) -> int:
"""
Represents the minimum time in milliseconds before token expiration
to trigger a refresh, in milliseconds.
This value sets a fixed lower bound for when a token refresh should occur,
regardless of the token's total lifetime.
If set to 0 there will be no lower bound and the refresh will be triggered
based on the expirationRefreshRatio only.
:return: int
"""
return self._lower_refresh_bound_millis
def get_token_request_execution_timeout_in_ms(self) -> int:
"""
Represents the maximum time in milliseconds to wait
for a token request to complete.
:return: int
"""
return self._token_request_execution_timeout_in_ms
def get_retry_policy(self) -> RetryPolicy:
"""
Represents the retry policy for token requests.
:return: RetryPolicy
"""
return self._retry_policy
class TokenManager:
def __init__(
self, identity_provider: IdentityProviderInterface, config: TokenManagerConfig
):
self._idp = identity_provider
self._config = config
self._next_timer = None
self._listener = None
self._init_timer = None
self._retries = 0
def __del__(self):
logger.info("Token manager are disposed")
self.stop()
def start(
self,
listener: CredentialsListener,
skip_initial: bool = False,
) -> Callable[[], None]:
self._listener = listener
try:
loop = asyncio.get_running_loop()
except RuntimeError:
# Run loop in a separate thread to unblock main thread.
loop = asyncio.new_event_loop()
thread = threading.Thread(
target=_start_event_loop_in_thread, args=(loop,), daemon=True
)
thread.start()
# Event to block for initial execution.
init_event = asyncio.Event()
self._init_timer = loop.call_later(
0, self._renew_token, skip_initial, init_event
)
logger.info("Token manager started")
# Blocks in thread-safe manner.
asyncio.run_coroutine_threadsafe(init_event.wait(), loop).result()
return self.stop
async def start_async(
self,
listener: CredentialsListener,
block_for_initial: bool = False,
initial_delay_in_ms: float = 0,
skip_initial: bool = False,
) -> Callable[[], None]:
self._listener = listener
loop = asyncio.get_running_loop()
init_event = asyncio.Event()
# Wraps the async callback with async wrapper to schedule with loop.call_later()
wrapped = _async_to_sync_wrapper(
loop, self._renew_token_async, skip_initial, init_event
)
self._init_timer = loop.call_later(initial_delay_in_ms / 1000, wrapped)
logger.info("Token manager started")
if block_for_initial:
await init_event.wait()
return self.stop
def stop(self):
if self._init_timer is not None:
self._init_timer.cancel()
if self._next_timer is not None:
self._next_timer.cancel()
def acquire_token(self, force_refresh=False) -> TokenResponse:
try:
token = self._idp.request_token(force_refresh)
except RequestTokenErr as e:
if self._retries < self._config.get_retry_policy().get_max_attempts():
self._retries += 1
sleep(self._config.get_retry_policy().get_delay_in_ms() / 1000)
return self.acquire_token(force_refresh)
else:
raise e
self._retries = 0
return TokenResponse(token)
async def acquire_token_async(self, force_refresh=False) -> TokenResponse:
try:
token = self._idp.request_token(force_refresh)
except RequestTokenErr as e:
if self._retries < self._config.get_retry_policy().get_max_attempts():
self._retries += 1
await asyncio.sleep(
self._config.get_retry_policy().get_delay_in_ms() / 1000
)
return await self.acquire_token_async(force_refresh)
else:
raise e
self._retries = 0
return TokenResponse(token)
def _calculate_renewal_delay(self, expire_date: float, issue_date: float) -> float:
delay_for_lower_refresh = self._delay_for_lower_refresh(expire_date)
delay_for_ratio_refresh = self._delay_for_ratio_refresh(expire_date, issue_date)
delay = min(delay_for_ratio_refresh, delay_for_lower_refresh)
return 0 if delay < 0 else delay / 1000
def _delay_for_lower_refresh(self, expire_date: float):
return (
expire_date
- self._config.get_lower_refresh_bound_millis()
- (datetime.now(timezone.utc).timestamp() * 1000)
)
def _delay_for_ratio_refresh(self, expire_date: float, issue_date: float):
token_ttl = expire_date - issue_date
refresh_before = token_ttl - (
token_ttl * self._config.get_expiration_refresh_ratio()
)
return (
expire_date
- refresh_before
- (datetime.now(timezone.utc).timestamp() * 1000)
)
def _renew_token(
self, skip_initial: bool = False, init_event: asyncio.Event = None
):
"""
Task to renew token from identity provider.
Schedules renewal tasks based on token TTL.
"""
try:
token_res = self.acquire_token(force_refresh=True)
delay = self._calculate_renewal_delay(
token_res.get_token().get_expires_at_ms(),
token_res.get_token().get_received_at_ms(),
)
if token_res.get_token().is_expired():
raise TokenRenewalErr("Requested token is expired")
if self._listener.on_next is None:
logger.warning(
"No registered callback for token renewal task. Renewal cancelled"
)
return
if not skip_initial:
try:
self._listener.on_next(token_res.get_token())
except Exception as e:
raise TokenRenewalErr(e)
if delay <= 0:
return
loop = asyncio.get_running_loop()
self._next_timer = loop.call_later(delay, self._renew_token)
logger.info(f"Next token renewal scheduled in {delay} seconds")
return token_res
except Exception as e:
if self._listener.on_error is None:
raise e
self._listener.on_error(e)
finally:
if init_event:
init_event.set()
async def _renew_token_async(
self, skip_initial: bool = False, init_event: asyncio.Event = None
):
"""
Async task to renew tokens from identity provider.
Schedules renewal tasks based on token TTL.
"""
try:
token_res = await self.acquire_token_async(force_refresh=True)
delay = self._calculate_renewal_delay(
token_res.get_token().get_expires_at_ms(),
token_res.get_token().get_received_at_ms(),
)
if token_res.get_token().is_expired():
raise TokenRenewalErr("Requested token is expired")
if self._listener.on_next is None:
logger.warning(
"No registered callback for token renewal task. Renewal cancelled"
)
return
if not skip_initial:
try:
await self._listener.on_next(token_res.get_token())
except Exception as e:
raise TokenRenewalErr(e)
if delay <= 0:
return
loop = asyncio.get_running_loop()
wrapped = _async_to_sync_wrapper(loop, self._renew_token_async)
logger.info(f"Next token renewal scheduled in {delay} seconds")
loop.call_later(delay, wrapped)
except Exception as e:
if self._listener.on_error is None:
raise e
await self._listener.on_error(e)
finally:
if init_event:
init_event.set()
def _async_to_sync_wrapper(loop, coro_func, *args, **kwargs):
"""
Wraps an asynchronous function so it can be used with loop.call_later.
:param loop: The event loop in which the coroutine will be executed.
:param coro_func: The coroutine function to wrap.
:param args: Positional arguments to pass to the coroutine function.
:param kwargs: Keyword arguments to pass to the coroutine function.
:return: A regular function suitable for loop.call_later.
"""
def wrapped():
# Schedule the coroutine in the event loop
asyncio.ensure_future(coro_func(*args, **kwargs), loop=loop)
return wrapped
def _start_event_loop_in_thread(event_loop: asyncio.AbstractEventLoop):
"""
Starts event loop in a thread.
Used to be able to schedule tasks using loop.call_later.
:param event_loop:
:return:
"""
asyncio.set_event_loop(event_loop)
event_loop.run_forever()

View File

@@ -0,0 +1,183 @@
import random
from abc import ABC, abstractmethod
# Maximum backoff between each retry in seconds
DEFAULT_CAP = 0.512
# Minimum backoff between each retry in seconds
DEFAULT_BASE = 0.008
class AbstractBackoff(ABC):
"""Backoff interface"""
def reset(self):
"""
Reset internal state before an operation.
`reset` is called once at the beginning of
every call to `Retry.call_with_retry`
"""
pass
@abstractmethod
def compute(self, failures: int) -> float:
"""Compute backoff in seconds upon failure"""
pass
class ConstantBackoff(AbstractBackoff):
"""Constant backoff upon failure"""
def __init__(self, backoff: float) -> None:
"""`backoff`: backoff time in seconds"""
self._backoff = backoff
def __hash__(self) -> int:
return hash((self._backoff,))
def __eq__(self, other) -> bool:
if not isinstance(other, ConstantBackoff):
return NotImplemented
return self._backoff == other._backoff
def compute(self, failures: int) -> float:
return self._backoff
class NoBackoff(ConstantBackoff):
"""No backoff upon failure"""
def __init__(self) -> None:
super().__init__(0)
class ExponentialBackoff(AbstractBackoff):
"""Exponential backoff upon failure"""
def __init__(self, cap: float = DEFAULT_CAP, base: float = DEFAULT_BASE):
"""
`cap`: maximum backoff time in seconds
`base`: base backoff time in seconds
"""
self._cap = cap
self._base = base
def __hash__(self) -> int:
return hash((self._base, self._cap))
def __eq__(self, other) -> bool:
if not isinstance(other, ExponentialBackoff):
return NotImplemented
return self._base == other._base and self._cap == other._cap
def compute(self, failures: int) -> float:
return min(self._cap, self._base * 2**failures)
class FullJitterBackoff(AbstractBackoff):
"""Full jitter backoff upon failure"""
def __init__(self, cap: float = DEFAULT_CAP, base: float = DEFAULT_BASE) -> None:
"""
`cap`: maximum backoff time in seconds
`base`: base backoff time in seconds
"""
self._cap = cap
self._base = base
def __hash__(self) -> int:
return hash((self._base, self._cap))
def __eq__(self, other) -> bool:
if not isinstance(other, FullJitterBackoff):
return NotImplemented
return self._base == other._base and self._cap == other._cap
def compute(self, failures: int) -> float:
return random.uniform(0, min(self._cap, self._base * 2**failures))
class EqualJitterBackoff(AbstractBackoff):
"""Equal jitter backoff upon failure"""
def __init__(self, cap: float = DEFAULT_CAP, base: float = DEFAULT_BASE) -> None:
"""
`cap`: maximum backoff time in seconds
`base`: base backoff time in seconds
"""
self._cap = cap
self._base = base
def __hash__(self) -> int:
return hash((self._base, self._cap))
def __eq__(self, other) -> bool:
if not isinstance(other, EqualJitterBackoff):
return NotImplemented
return self._base == other._base and self._cap == other._cap
def compute(self, failures: int) -> float:
temp = min(self._cap, self._base * 2**failures) / 2
return temp + random.uniform(0, temp)
class DecorrelatedJitterBackoff(AbstractBackoff):
"""Decorrelated jitter backoff upon failure"""
def __init__(self, cap: float = DEFAULT_CAP, base: float = DEFAULT_BASE) -> None:
"""
`cap`: maximum backoff time in seconds
`base`: base backoff time in seconds
"""
self._cap = cap
self._base = base
self._previous_backoff = 0
def __hash__(self) -> int:
return hash((self._base, self._cap))
def __eq__(self, other) -> bool:
if not isinstance(other, DecorrelatedJitterBackoff):
return NotImplemented
return self._base == other._base and self._cap == other._cap
def reset(self) -> None:
self._previous_backoff = 0
def compute(self, failures: int) -> float:
max_backoff = max(self._base, self._previous_backoff * 3)
temp = random.uniform(self._base, max_backoff)
self._previous_backoff = min(self._cap, temp)
return self._previous_backoff
class ExponentialWithJitterBackoff(AbstractBackoff):
"""Exponential backoff upon failure, with jitter"""
def __init__(self, cap: float = DEFAULT_CAP, base: float = DEFAULT_BASE) -> None:
"""
`cap`: maximum backoff time in seconds
`base`: base backoff time in seconds
"""
self._cap = cap
self._base = base
def __hash__(self) -> int:
return hash((self._base, self._cap))
def __eq__(self, other) -> bool:
if not isinstance(other, ExponentialWithJitterBackoff):
return NotImplemented
return self._base == other._base and self._cap == other._cap
def compute(self, failures: int) -> float:
return min(self._cap, random.random() * self._base * 2**failures)
def default_backoff():
return EqualJitterBackoff()

View File

@@ -0,0 +1,401 @@
from abc import ABC, abstractmethod
from collections import OrderedDict
from dataclasses import dataclass
from enum import Enum
from typing import Any, List, Optional, Union
class CacheEntryStatus(Enum):
VALID = "VALID"
IN_PROGRESS = "IN_PROGRESS"
class EvictionPolicyType(Enum):
time_based = "time_based"
frequency_based = "frequency_based"
@dataclass(frozen=True)
class CacheKey:
command: str
redis_keys: tuple
class CacheEntry:
def __init__(
self,
cache_key: CacheKey,
cache_value: bytes,
status: CacheEntryStatus,
connection_ref,
):
self.cache_key = cache_key
self.cache_value = cache_value
self.status = status
self.connection_ref = connection_ref
def __hash__(self):
return hash(
(self.cache_key, self.cache_value, self.status, self.connection_ref)
)
def __eq__(self, other):
return hash(self) == hash(other)
class EvictionPolicyInterface(ABC):
@property
@abstractmethod
def cache(self):
pass
@cache.setter
def cache(self, value):
pass
@property
@abstractmethod
def type(self) -> EvictionPolicyType:
pass
@abstractmethod
def evict_next(self) -> CacheKey:
pass
@abstractmethod
def evict_many(self, count: int) -> List[CacheKey]:
pass
@abstractmethod
def touch(self, cache_key: CacheKey) -> None:
pass
class CacheConfigurationInterface(ABC):
@abstractmethod
def get_cache_class(self):
pass
@abstractmethod
def get_max_size(self) -> int:
pass
@abstractmethod
def get_eviction_policy(self):
pass
@abstractmethod
def is_exceeds_max_size(self, count: int) -> bool:
pass
@abstractmethod
def is_allowed_to_cache(self, command: str) -> bool:
pass
class CacheInterface(ABC):
@property
@abstractmethod
def collection(self) -> OrderedDict:
pass
@property
@abstractmethod
def config(self) -> CacheConfigurationInterface:
pass
@property
@abstractmethod
def eviction_policy(self) -> EvictionPolicyInterface:
pass
@property
@abstractmethod
def size(self) -> int:
pass
@abstractmethod
def get(self, key: CacheKey) -> Union[CacheEntry, None]:
pass
@abstractmethod
def set(self, entry: CacheEntry) -> bool:
pass
@abstractmethod
def delete_by_cache_keys(self, cache_keys: List[CacheKey]) -> List[bool]:
pass
@abstractmethod
def delete_by_redis_keys(self, redis_keys: List[bytes]) -> List[bool]:
pass
@abstractmethod
def flush(self) -> int:
pass
@abstractmethod
def is_cachable(self, key: CacheKey) -> bool:
pass
class DefaultCache(CacheInterface):
def __init__(
self,
cache_config: CacheConfigurationInterface,
) -> None:
self._cache = OrderedDict()
self._cache_config = cache_config
self._eviction_policy = self._cache_config.get_eviction_policy().value()
self._eviction_policy.cache = self
@property
def collection(self) -> OrderedDict:
return self._cache
@property
def config(self) -> CacheConfigurationInterface:
return self._cache_config
@property
def eviction_policy(self) -> EvictionPolicyInterface:
return self._eviction_policy
@property
def size(self) -> int:
return len(self._cache)
def set(self, entry: CacheEntry) -> bool:
if not self.is_cachable(entry.cache_key):
return False
self._cache[entry.cache_key] = entry
self._eviction_policy.touch(entry.cache_key)
if self._cache_config.is_exceeds_max_size(len(self._cache)):
self._eviction_policy.evict_next()
return True
def get(self, key: CacheKey) -> Union[CacheEntry, None]:
entry = self._cache.get(key, None)
if entry is None:
return None
self._eviction_policy.touch(key)
return entry
def delete_by_cache_keys(self, cache_keys: List[CacheKey]) -> List[bool]:
response = []
for key in cache_keys:
if self.get(key) is not None:
self._cache.pop(key)
response.append(True)
else:
response.append(False)
return response
def delete_by_redis_keys(self, redis_keys: List[bytes]) -> List[bool]:
response = []
keys_to_delete = []
for redis_key in redis_keys:
if isinstance(redis_key, bytes):
redis_key = redis_key.decode()
for cache_key in self._cache:
if redis_key in cache_key.redis_keys:
keys_to_delete.append(cache_key)
response.append(True)
for key in keys_to_delete:
self._cache.pop(key)
return response
def flush(self) -> int:
elem_count = len(self._cache)
self._cache.clear()
return elem_count
def is_cachable(self, key: CacheKey) -> bool:
return self._cache_config.is_allowed_to_cache(key.command)
class LRUPolicy(EvictionPolicyInterface):
def __init__(self):
self.cache = None
@property
def cache(self):
return self._cache
@cache.setter
def cache(self, cache: CacheInterface):
self._cache = cache
@property
def type(self) -> EvictionPolicyType:
return EvictionPolicyType.time_based
def evict_next(self) -> CacheKey:
self._assert_cache()
popped_entry = self._cache.collection.popitem(last=False)
return popped_entry[0]
def evict_many(self, count: int) -> List[CacheKey]:
self._assert_cache()
if count > len(self._cache.collection):
raise ValueError("Evictions count is above cache size")
popped_keys = []
for _ in range(count):
popped_entry = self._cache.collection.popitem(last=False)
popped_keys.append(popped_entry[0])
return popped_keys
def touch(self, cache_key: CacheKey) -> None:
self._assert_cache()
if self._cache.collection.get(cache_key) is None:
raise ValueError("Given entry does not belong to the cache")
self._cache.collection.move_to_end(cache_key)
def _assert_cache(self):
if self.cache is None or not isinstance(self.cache, CacheInterface):
raise ValueError("Eviction policy should be associated with valid cache.")
class EvictionPolicy(Enum):
LRU = LRUPolicy
class CacheConfig(CacheConfigurationInterface):
DEFAULT_CACHE_CLASS = DefaultCache
DEFAULT_EVICTION_POLICY = EvictionPolicy.LRU
DEFAULT_MAX_SIZE = 10000
DEFAULT_ALLOW_LIST = [
"BITCOUNT",
"BITFIELD_RO",
"BITPOS",
"EXISTS",
"GEODIST",
"GEOHASH",
"GEOPOS",
"GEORADIUSBYMEMBER_RO",
"GEORADIUS_RO",
"GEOSEARCH",
"GET",
"GETBIT",
"GETRANGE",
"HEXISTS",
"HGET",
"HGETALL",
"HKEYS",
"HLEN",
"HMGET",
"HSTRLEN",
"HVALS",
"JSON.ARRINDEX",
"JSON.ARRLEN",
"JSON.GET",
"JSON.MGET",
"JSON.OBJKEYS",
"JSON.OBJLEN",
"JSON.RESP",
"JSON.STRLEN",
"JSON.TYPE",
"LCS",
"LINDEX",
"LLEN",
"LPOS",
"LRANGE",
"MGET",
"SCARD",
"SDIFF",
"SINTER",
"SINTERCARD",
"SISMEMBER",
"SMEMBERS",
"SMISMEMBER",
"SORT_RO",
"STRLEN",
"SUBSTR",
"SUNION",
"TS.GET",
"TS.INFO",
"TS.RANGE",
"TS.REVRANGE",
"TYPE",
"XLEN",
"XPENDING",
"XRANGE",
"XREAD",
"XREVRANGE",
"ZCARD",
"ZCOUNT",
"ZDIFF",
"ZINTER",
"ZINTERCARD",
"ZLEXCOUNT",
"ZMSCORE",
"ZRANGE",
"ZRANGEBYLEX",
"ZRANGEBYSCORE",
"ZRANK",
"ZREVRANGE",
"ZREVRANGEBYLEX",
"ZREVRANGEBYSCORE",
"ZREVRANK",
"ZSCORE",
"ZUNION",
]
def __init__(
self,
max_size: int = DEFAULT_MAX_SIZE,
cache_class: Any = DEFAULT_CACHE_CLASS,
eviction_policy: EvictionPolicy = DEFAULT_EVICTION_POLICY,
):
self._cache_class = cache_class
self._max_size = max_size
self._eviction_policy = eviction_policy
def get_cache_class(self):
return self._cache_class
def get_max_size(self) -> int:
return self._max_size
def get_eviction_policy(self) -> EvictionPolicy:
return self._eviction_policy
def is_exceeds_max_size(self, count: int) -> bool:
return count > self._max_size
def is_allowed_to_cache(self, command: str) -> bool:
return command in self.DEFAULT_ALLOW_LIST
class CacheFactoryInterface(ABC):
@abstractmethod
def get_cache(self) -> CacheInterface:
pass
class CacheFactory(CacheFactoryInterface):
def __init__(self, cache_config: Optional[CacheConfig] = None):
self._config = cache_config
if self._config is None:
self._config = CacheConfig()
def get_cache(self) -> CacheInterface:
cache_class = self._config.get_cache_class()
return cache_class(cache_config=self._config)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,18 @@
from .cluster import READ_COMMANDS, AsyncRedisClusterCommands, RedisClusterCommands
from .core import AsyncCoreCommands, CoreCommands
from .helpers import list_or_args
from .redismodules import AsyncRedisModuleCommands, RedisModuleCommands
from .sentinel import AsyncSentinelCommands, SentinelCommands
__all__ = [
"AsyncCoreCommands",
"AsyncRedisClusterCommands",
"AsyncRedisModuleCommands",
"AsyncSentinelCommands",
"CoreCommands",
"READ_COMMANDS",
"RedisClusterCommands",
"RedisModuleCommands",
"SentinelCommands",
"list_or_args",
]

View File

@@ -0,0 +1,253 @@
from redis._parsers.helpers import bool_ok
from ..helpers import get_protocol_version, parse_to_list
from .commands import * # noqa
from .info import BFInfo, CFInfo, CMSInfo, TDigestInfo, TopKInfo
class AbstractBloom:
"""
The client allows to interact with RedisBloom and use all of
it's functionality.
- BF for Bloom Filter
- CF for Cuckoo Filter
- CMS for Count-Min Sketch
- TOPK for TopK Data Structure
- TDIGEST for estimate rank statistics
"""
@staticmethod
def append_items(params, items):
"""Append ITEMS to params."""
params.extend(["ITEMS"])
params += items
@staticmethod
def append_error(params, error):
"""Append ERROR to params."""
if error is not None:
params.extend(["ERROR", error])
@staticmethod
def append_capacity(params, capacity):
"""Append CAPACITY to params."""
if capacity is not None:
params.extend(["CAPACITY", capacity])
@staticmethod
def append_expansion(params, expansion):
"""Append EXPANSION to params."""
if expansion is not None:
params.extend(["EXPANSION", expansion])
@staticmethod
def append_no_scale(params, noScale):
"""Append NONSCALING tag to params."""
if noScale is not None:
params.extend(["NONSCALING"])
@staticmethod
def append_weights(params, weights):
"""Append WEIGHTS to params."""
if len(weights) > 0:
params.append("WEIGHTS")
params += weights
@staticmethod
def append_no_create(params, noCreate):
"""Append NOCREATE tag to params."""
if noCreate is not None:
params.extend(["NOCREATE"])
@staticmethod
def append_items_and_increments(params, items, increments):
"""Append pairs of items and increments to params."""
for i in range(len(items)):
params.append(items[i])
params.append(increments[i])
@staticmethod
def append_values_and_weights(params, items, weights):
"""Append pairs of items and weights to params."""
for i in range(len(items)):
params.append(items[i])
params.append(weights[i])
@staticmethod
def append_max_iterations(params, max_iterations):
"""Append MAXITERATIONS to params."""
if max_iterations is not None:
params.extend(["MAXITERATIONS", max_iterations])
@staticmethod
def append_bucket_size(params, bucket_size):
"""Append BUCKETSIZE to params."""
if bucket_size is not None:
params.extend(["BUCKETSIZE", bucket_size])
class CMSBloom(CMSCommands, AbstractBloom):
def __init__(self, client, **kwargs):
"""Create a new RedisBloom client."""
# Set the module commands' callbacks
_MODULE_CALLBACKS = {
CMS_INITBYDIM: bool_ok,
CMS_INITBYPROB: bool_ok,
# CMS_INCRBY: spaceHolder,
# CMS_QUERY: spaceHolder,
CMS_MERGE: bool_ok,
}
_RESP2_MODULE_CALLBACKS = {
CMS_INFO: CMSInfo,
}
_RESP3_MODULE_CALLBACKS = {}
self.client = client
self.commandmixin = CMSCommands
self.execute_command = client.execute_command
if get_protocol_version(self.client) in ["3", 3]:
_MODULE_CALLBACKS.update(_RESP3_MODULE_CALLBACKS)
else:
_MODULE_CALLBACKS.update(_RESP2_MODULE_CALLBACKS)
for k, v in _MODULE_CALLBACKS.items():
self.client.set_response_callback(k, v)
class TOPKBloom(TOPKCommands, AbstractBloom):
def __init__(self, client, **kwargs):
"""Create a new RedisBloom client."""
# Set the module commands' callbacks
_MODULE_CALLBACKS = {
TOPK_RESERVE: bool_ok,
# TOPK_QUERY: spaceHolder,
# TOPK_COUNT: spaceHolder,
}
_RESP2_MODULE_CALLBACKS = {
TOPK_ADD: parse_to_list,
TOPK_INCRBY: parse_to_list,
TOPK_INFO: TopKInfo,
TOPK_LIST: parse_to_list,
}
_RESP3_MODULE_CALLBACKS = {}
self.client = client
self.commandmixin = TOPKCommands
self.execute_command = client.execute_command
if get_protocol_version(self.client) in ["3", 3]:
_MODULE_CALLBACKS.update(_RESP3_MODULE_CALLBACKS)
else:
_MODULE_CALLBACKS.update(_RESP2_MODULE_CALLBACKS)
for k, v in _MODULE_CALLBACKS.items():
self.client.set_response_callback(k, v)
class CFBloom(CFCommands, AbstractBloom):
def __init__(self, client, **kwargs):
"""Create a new RedisBloom client."""
# Set the module commands' callbacks
_MODULE_CALLBACKS = {
CF_RESERVE: bool_ok,
# CF_ADD: spaceHolder,
# CF_ADDNX: spaceHolder,
# CF_INSERT: spaceHolder,
# CF_INSERTNX: spaceHolder,
# CF_EXISTS: spaceHolder,
# CF_DEL: spaceHolder,
# CF_COUNT: spaceHolder,
# CF_SCANDUMP: spaceHolder,
# CF_LOADCHUNK: spaceHolder,
}
_RESP2_MODULE_CALLBACKS = {
CF_INFO: CFInfo,
}
_RESP3_MODULE_CALLBACKS = {}
self.client = client
self.commandmixin = CFCommands
self.execute_command = client.execute_command
if get_protocol_version(self.client) in ["3", 3]:
_MODULE_CALLBACKS.update(_RESP3_MODULE_CALLBACKS)
else:
_MODULE_CALLBACKS.update(_RESP2_MODULE_CALLBACKS)
for k, v in _MODULE_CALLBACKS.items():
self.client.set_response_callback(k, v)
class TDigestBloom(TDigestCommands, AbstractBloom):
def __init__(self, client, **kwargs):
"""Create a new RedisBloom client."""
# Set the module commands' callbacks
_MODULE_CALLBACKS = {
TDIGEST_CREATE: bool_ok,
# TDIGEST_RESET: bool_ok,
# TDIGEST_ADD: spaceHolder,
# TDIGEST_MERGE: spaceHolder,
}
_RESP2_MODULE_CALLBACKS = {
TDIGEST_BYRANK: parse_to_list,
TDIGEST_BYREVRANK: parse_to_list,
TDIGEST_CDF: parse_to_list,
TDIGEST_INFO: TDigestInfo,
TDIGEST_MIN: float,
TDIGEST_MAX: float,
TDIGEST_TRIMMED_MEAN: float,
TDIGEST_QUANTILE: parse_to_list,
}
_RESP3_MODULE_CALLBACKS = {}
self.client = client
self.commandmixin = TDigestCommands
self.execute_command = client.execute_command
if get_protocol_version(self.client) in ["3", 3]:
_MODULE_CALLBACKS.update(_RESP3_MODULE_CALLBACKS)
else:
_MODULE_CALLBACKS.update(_RESP2_MODULE_CALLBACKS)
for k, v in _MODULE_CALLBACKS.items():
self.client.set_response_callback(k, v)
class BFBloom(BFCommands, AbstractBloom):
def __init__(self, client, **kwargs):
"""Create a new RedisBloom client."""
# Set the module commands' callbacks
_MODULE_CALLBACKS = {
BF_RESERVE: bool_ok,
# BF_ADD: spaceHolder,
# BF_MADD: spaceHolder,
# BF_INSERT: spaceHolder,
# BF_EXISTS: spaceHolder,
# BF_MEXISTS: spaceHolder,
# BF_SCANDUMP: spaceHolder,
# BF_LOADCHUNK: spaceHolder,
# BF_CARD: spaceHolder,
}
_RESP2_MODULE_CALLBACKS = {
BF_INFO: BFInfo,
}
_RESP3_MODULE_CALLBACKS = {}
self.client = client
self.commandmixin = BFCommands
self.execute_command = client.execute_command
if get_protocol_version(self.client) in ["3", 3]:
_MODULE_CALLBACKS.update(_RESP3_MODULE_CALLBACKS)
else:
_MODULE_CALLBACKS.update(_RESP2_MODULE_CALLBACKS)
for k, v in _MODULE_CALLBACKS.items():
self.client.set_response_callback(k, v)

View File

@@ -0,0 +1,538 @@
from redis.client import NEVER_DECODE
from redis.utils import deprecated_function
BF_RESERVE = "BF.RESERVE"
BF_ADD = "BF.ADD"
BF_MADD = "BF.MADD"
BF_INSERT = "BF.INSERT"
BF_EXISTS = "BF.EXISTS"
BF_MEXISTS = "BF.MEXISTS"
BF_SCANDUMP = "BF.SCANDUMP"
BF_LOADCHUNK = "BF.LOADCHUNK"
BF_INFO = "BF.INFO"
BF_CARD = "BF.CARD"
CF_RESERVE = "CF.RESERVE"
CF_ADD = "CF.ADD"
CF_ADDNX = "CF.ADDNX"
CF_INSERT = "CF.INSERT"
CF_INSERTNX = "CF.INSERTNX"
CF_EXISTS = "CF.EXISTS"
CF_MEXISTS = "CF.MEXISTS"
CF_DEL = "CF.DEL"
CF_COUNT = "CF.COUNT"
CF_SCANDUMP = "CF.SCANDUMP"
CF_LOADCHUNK = "CF.LOADCHUNK"
CF_INFO = "CF.INFO"
CMS_INITBYDIM = "CMS.INITBYDIM"
CMS_INITBYPROB = "CMS.INITBYPROB"
CMS_INCRBY = "CMS.INCRBY"
CMS_QUERY = "CMS.QUERY"
CMS_MERGE = "CMS.MERGE"
CMS_INFO = "CMS.INFO"
TOPK_RESERVE = "TOPK.RESERVE"
TOPK_ADD = "TOPK.ADD"
TOPK_INCRBY = "TOPK.INCRBY"
TOPK_QUERY = "TOPK.QUERY"
TOPK_COUNT = "TOPK.COUNT"
TOPK_LIST = "TOPK.LIST"
TOPK_INFO = "TOPK.INFO"
TDIGEST_CREATE = "TDIGEST.CREATE"
TDIGEST_RESET = "TDIGEST.RESET"
TDIGEST_ADD = "TDIGEST.ADD"
TDIGEST_MERGE = "TDIGEST.MERGE"
TDIGEST_CDF = "TDIGEST.CDF"
TDIGEST_QUANTILE = "TDIGEST.QUANTILE"
TDIGEST_MIN = "TDIGEST.MIN"
TDIGEST_MAX = "TDIGEST.MAX"
TDIGEST_INFO = "TDIGEST.INFO"
TDIGEST_TRIMMED_MEAN = "TDIGEST.TRIMMED_MEAN"
TDIGEST_RANK = "TDIGEST.RANK"
TDIGEST_REVRANK = "TDIGEST.REVRANK"
TDIGEST_BYRANK = "TDIGEST.BYRANK"
TDIGEST_BYREVRANK = "TDIGEST.BYREVRANK"
class BFCommands:
"""Bloom Filter commands."""
def create(self, key, errorRate, capacity, expansion=None, noScale=None):
"""
Create a new Bloom Filter `key` with desired probability of false positives
`errorRate` expected entries to be inserted as `capacity`.
Default expansion value is 2. By default, filter is auto-scaling.
For more information see `BF.RESERVE <https://redis.io/commands/bf.reserve>`_.
""" # noqa
params = [key, errorRate, capacity]
self.append_expansion(params, expansion)
self.append_no_scale(params, noScale)
return self.execute_command(BF_RESERVE, *params)
reserve = create
def add(self, key, item):
"""
Add to a Bloom Filter `key` an `item`.
For more information see `BF.ADD <https://redis.io/commands/bf.add>`_.
""" # noqa
return self.execute_command(BF_ADD, key, item)
def madd(self, key, *items):
"""
Add to a Bloom Filter `key` multiple `items`.
For more information see `BF.MADD <https://redis.io/commands/bf.madd>`_.
""" # noqa
return self.execute_command(BF_MADD, key, *items)
def insert(
self,
key,
items,
capacity=None,
error=None,
noCreate=None,
expansion=None,
noScale=None,
):
"""
Add to a Bloom Filter `key` multiple `items`.
If `nocreate` remain `None` and `key` does not exist, a new Bloom Filter
`key` will be created with desired probability of false positives `errorRate`
and expected entries to be inserted as `size`.
For more information see `BF.INSERT <https://redis.io/commands/bf.insert>`_.
""" # noqa
params = [key]
self.append_capacity(params, capacity)
self.append_error(params, error)
self.append_expansion(params, expansion)
self.append_no_create(params, noCreate)
self.append_no_scale(params, noScale)
self.append_items(params, items)
return self.execute_command(BF_INSERT, *params)
def exists(self, key, item):
"""
Check whether an `item` exists in Bloom Filter `key`.
For more information see `BF.EXISTS <https://redis.io/commands/bf.exists>`_.
""" # noqa
return self.execute_command(BF_EXISTS, key, item)
def mexists(self, key, *items):
"""
Check whether `items` exist in Bloom Filter `key`.
For more information see `BF.MEXISTS <https://redis.io/commands/bf.mexists>`_.
""" # noqa
return self.execute_command(BF_MEXISTS, key, *items)
def scandump(self, key, iter):
"""
Begin an incremental save of the bloom filter `key`.
This is useful for large bloom filters which cannot fit into the normal SAVE and RESTORE model.
The first time this command is called, the value of `iter` should be 0.
This command will return successive (iter, data) pairs until (0, NULL) to indicate completion.
For more information see `BF.SCANDUMP <https://redis.io/commands/bf.scandump>`_.
""" # noqa
params = [key, iter]
options = {}
options[NEVER_DECODE] = []
return self.execute_command(BF_SCANDUMP, *params, **options)
def loadchunk(self, key, iter, data):
"""
Restore a filter previously saved using SCANDUMP.
See the SCANDUMP command for example usage.
This command will overwrite any bloom filter stored under key.
Ensure that the bloom filter will not be modified between invocations.
For more information see `BF.LOADCHUNK <https://redis.io/commands/bf.loadchunk>`_.
""" # noqa
return self.execute_command(BF_LOADCHUNK, key, iter, data)
def info(self, key):
"""
Return capacity, size, number of filters, number of items inserted, and expansion rate.
For more information see `BF.INFO <https://redis.io/commands/bf.info>`_.
""" # noqa
return self.execute_command(BF_INFO, key)
def card(self, key):
"""
Returns the cardinality of a Bloom filter - number of items that were added to a Bloom filter and detected as unique
(items that caused at least one bit to be set in at least one sub-filter).
For more information see `BF.CARD <https://redis.io/commands/bf.card>`_.
""" # noqa
return self.execute_command(BF_CARD, key)
class CFCommands:
"""Cuckoo Filter commands."""
def create(
self, key, capacity, expansion=None, bucket_size=None, max_iterations=None
):
"""
Create a new Cuckoo Filter `key` an initial `capacity` items.
For more information see `CF.RESERVE <https://redis.io/commands/cf.reserve>`_.
""" # noqa
params = [key, capacity]
self.append_expansion(params, expansion)
self.append_bucket_size(params, bucket_size)
self.append_max_iterations(params, max_iterations)
return self.execute_command(CF_RESERVE, *params)
reserve = create
def add(self, key, item):
"""
Add an `item` to a Cuckoo Filter `key`.
For more information see `CF.ADD <https://redis.io/commands/cf.add>`_.
""" # noqa
return self.execute_command(CF_ADD, key, item)
def addnx(self, key, item):
"""
Add an `item` to a Cuckoo Filter `key` only if item does not yet exist.
Command might be slower that `add`.
For more information see `CF.ADDNX <https://redis.io/commands/cf.addnx>`_.
""" # noqa
return self.execute_command(CF_ADDNX, key, item)
def insert(self, key, items, capacity=None, nocreate=None):
"""
Add multiple `items` to a Cuckoo Filter `key`, allowing the filter
to be created with a custom `capacity` if it does not yet exist.
`items` must be provided as a list.
For more information see `CF.INSERT <https://redis.io/commands/cf.insert>`_.
""" # noqa
params = [key]
self.append_capacity(params, capacity)
self.append_no_create(params, nocreate)
self.append_items(params, items)
return self.execute_command(CF_INSERT, *params)
def insertnx(self, key, items, capacity=None, nocreate=None):
"""
Add multiple `items` to a Cuckoo Filter `key` only if they do not exist yet,
allowing the filter to be created with a custom `capacity` if it does not yet exist.
`items` must be provided as a list.
For more information see `CF.INSERTNX <https://redis.io/commands/cf.insertnx>`_.
""" # noqa
params = [key]
self.append_capacity(params, capacity)
self.append_no_create(params, nocreate)
self.append_items(params, items)
return self.execute_command(CF_INSERTNX, *params)
def exists(self, key, item):
"""
Check whether an `item` exists in Cuckoo Filter `key`.
For more information see `CF.EXISTS <https://redis.io/commands/cf.exists>`_.
""" # noqa
return self.execute_command(CF_EXISTS, key, item)
def mexists(self, key, *items):
"""
Check whether an `items` exist in Cuckoo Filter `key`.
For more information see `CF.MEXISTS <https://redis.io/commands/cf.mexists>`_.
""" # noqa
return self.execute_command(CF_MEXISTS, key, *items)
def delete(self, key, item):
"""
Delete `item` from `key`.
For more information see `CF.DEL <https://redis.io/commands/cf.del>`_.
""" # noqa
return self.execute_command(CF_DEL, key, item)
def count(self, key, item):
"""
Return the number of times an `item` may be in the `key`.
For more information see `CF.COUNT <https://redis.io/commands/cf.count>`_.
""" # noqa
return self.execute_command(CF_COUNT, key, item)
def scandump(self, key, iter):
"""
Begin an incremental save of the Cuckoo filter `key`.
This is useful for large Cuckoo filters which cannot fit into the normal
SAVE and RESTORE model.
The first time this command is called, the value of `iter` should be 0.
This command will return successive (iter, data) pairs until
(0, NULL) to indicate completion.
For more information see `CF.SCANDUMP <https://redis.io/commands/cf.scandump>`_.
""" # noqa
return self.execute_command(CF_SCANDUMP, key, iter)
def loadchunk(self, key, iter, data):
"""
Restore a filter previously saved using SCANDUMP. See the SCANDUMP command for example usage.
This command will overwrite any Cuckoo filter stored under key.
Ensure that the Cuckoo filter will not be modified between invocations.
For more information see `CF.LOADCHUNK <https://redis.io/commands/cf.loadchunk>`_.
""" # noqa
return self.execute_command(CF_LOADCHUNK, key, iter, data)
def info(self, key):
"""
Return size, number of buckets, number of filter, number of items inserted,
number of items deleted, bucket size, expansion rate, and max iteration.
For more information see `CF.INFO <https://redis.io/commands/cf.info>`_.
""" # noqa
return self.execute_command(CF_INFO, key)
class TOPKCommands:
"""TOP-k Filter commands."""
def reserve(self, key, k, width, depth, decay):
"""
Create a new Top-K Filter `key` with desired probability of false
positives `errorRate` expected entries to be inserted as `size`.
For more information see `TOPK.RESERVE <https://redis.io/commands/topk.reserve>`_.
""" # noqa
return self.execute_command(TOPK_RESERVE, key, k, width, depth, decay)
def add(self, key, *items):
"""
Add one `item` or more to a Top-K Filter `key`.
For more information see `TOPK.ADD <https://redis.io/commands/topk.add>`_.
""" # noqa
return self.execute_command(TOPK_ADD, key, *items)
def incrby(self, key, items, increments):
"""
Add/increase `items` to a Top-K Sketch `key` by ''increments''.
Both `items` and `increments` are lists.
For more information see `TOPK.INCRBY <https://redis.io/commands/topk.incrby>`_.
Example:
>>> topkincrby('A', ['foo'], [1])
""" # noqa
params = [key]
self.append_items_and_increments(params, items, increments)
return self.execute_command(TOPK_INCRBY, *params)
def query(self, key, *items):
"""
Check whether one `item` or more is a Top-K item at `key`.
For more information see `TOPK.QUERY <https://redis.io/commands/topk.query>`_.
""" # noqa
return self.execute_command(TOPK_QUERY, key, *items)
@deprecated_function(version="4.4.0", reason="deprecated since redisbloom 2.4.0")
def count(self, key, *items):
"""
Return count for one `item` or more from `key`.
For more information see `TOPK.COUNT <https://redis.io/commands/topk.count>`_.
""" # noqa
return self.execute_command(TOPK_COUNT, key, *items)
def list(self, key, withcount=False):
"""
Return full list of items in Top-K list of `key`.
If `withcount` set to True, return full list of items
with probabilistic count in Top-K list of `key`.
For more information see `TOPK.LIST <https://redis.io/commands/topk.list>`_.
""" # noqa
params = [key]
if withcount:
params.append("WITHCOUNT")
return self.execute_command(TOPK_LIST, *params)
def info(self, key):
"""
Return k, width, depth and decay values of `key`.
For more information see `TOPK.INFO <https://redis.io/commands/topk.info>`_.
""" # noqa
return self.execute_command(TOPK_INFO, key)
class TDigestCommands:
def create(self, key, compression=100):
"""
Allocate the memory and initialize the t-digest.
For more information see `TDIGEST.CREATE <https://redis.io/commands/tdigest.create>`_.
""" # noqa
return self.execute_command(TDIGEST_CREATE, key, "COMPRESSION", compression)
def reset(self, key):
"""
Reset the sketch `key` to zero - empty out the sketch and re-initialize it.
For more information see `TDIGEST.RESET <https://redis.io/commands/tdigest.reset>`_.
""" # noqa
return self.execute_command(TDIGEST_RESET, key)
def add(self, key, values):
"""
Adds one or more observations to a t-digest sketch `key`.
For more information see `TDIGEST.ADD <https://redis.io/commands/tdigest.add>`_.
""" # noqa
return self.execute_command(TDIGEST_ADD, key, *values)
def merge(self, destination_key, num_keys, *keys, compression=None, override=False):
"""
Merges all of the values from `keys` to 'destination-key' sketch.
It is mandatory to provide the `num_keys` before passing the input keys and
the other (optional) arguments.
If `destination_key` already exists its values are merged with the input keys.
If you wish to override the destination key contents use the `OVERRIDE` parameter.
For more information see `TDIGEST.MERGE <https://redis.io/commands/tdigest.merge>`_.
""" # noqa
params = [destination_key, num_keys, *keys]
if compression is not None:
params.extend(["COMPRESSION", compression])
if override:
params.append("OVERRIDE")
return self.execute_command(TDIGEST_MERGE, *params)
def min(self, key):
"""
Return minimum value from the sketch `key`. Will return DBL_MAX if the sketch is empty.
For more information see `TDIGEST.MIN <https://redis.io/commands/tdigest.min>`_.
""" # noqa
return self.execute_command(TDIGEST_MIN, key)
def max(self, key):
"""
Return maximum value from the sketch `key`. Will return DBL_MIN if the sketch is empty.
For more information see `TDIGEST.MAX <https://redis.io/commands/tdigest.max>`_.
""" # noqa
return self.execute_command(TDIGEST_MAX, key)
def quantile(self, key, quantile, *quantiles):
"""
Returns estimates of one or more cutoffs such that a specified fraction of the
observations added to this t-digest would be less than or equal to each of the
specified cutoffs. (Multiple quantiles can be returned with one call)
For more information see `TDIGEST.QUANTILE <https://redis.io/commands/tdigest.quantile>`_.
""" # noqa
return self.execute_command(TDIGEST_QUANTILE, key, quantile, *quantiles)
def cdf(self, key, value, *values):
"""
Return double fraction of all points added which are <= value.
For more information see `TDIGEST.CDF <https://redis.io/commands/tdigest.cdf>`_.
""" # noqa
return self.execute_command(TDIGEST_CDF, key, value, *values)
def info(self, key):
"""
Return Compression, Capacity, Merged Nodes, Unmerged Nodes, Merged Weight, Unmerged Weight
and Total Compressions.
For more information see `TDIGEST.INFO <https://redis.io/commands/tdigest.info>`_.
""" # noqa
return self.execute_command(TDIGEST_INFO, key)
def trimmed_mean(self, key, low_cut_quantile, high_cut_quantile):
"""
Return mean value from the sketch, excluding observation values outside
the low and high cutoff quantiles.
For more information see `TDIGEST.TRIMMED_MEAN <https://redis.io/commands/tdigest.trimmed_mean>`_.
""" # noqa
return self.execute_command(
TDIGEST_TRIMMED_MEAN, key, low_cut_quantile, high_cut_quantile
)
def rank(self, key, value, *values):
"""
Retrieve the estimated rank of value (the number of observations in the sketch
that are smaller than value + half the number of observations that are equal to value).
For more information see `TDIGEST.RANK <https://redis.io/commands/tdigest.rank>`_.
""" # noqa
return self.execute_command(TDIGEST_RANK, key, value, *values)
def revrank(self, key, value, *values):
"""
Retrieve the estimated rank of value (the number of observations in the sketch
that are larger than value + half the number of observations that are equal to value).
For more information see `TDIGEST.REVRANK <https://redis.io/commands/tdigest.revrank>`_.
""" # noqa
return self.execute_command(TDIGEST_REVRANK, key, value, *values)
def byrank(self, key, rank, *ranks):
"""
Retrieve an estimation of the value with the given rank.
For more information see `TDIGEST.BY_RANK <https://redis.io/commands/tdigest.by_rank>`_.
""" # noqa
return self.execute_command(TDIGEST_BYRANK, key, rank, *ranks)
def byrevrank(self, key, rank, *ranks):
"""
Retrieve an estimation of the value with the given reverse rank.
For more information see `TDIGEST.BY_REVRANK <https://redis.io/commands/tdigest.by_revrank>`_.
""" # noqa
return self.execute_command(TDIGEST_BYREVRANK, key, rank, *ranks)
class CMSCommands:
"""Count-Min Sketch Commands"""
def initbydim(self, key, width, depth):
"""
Initialize a Count-Min Sketch `key` to dimensions (`width`, `depth`) specified by user.
For more information see `CMS.INITBYDIM <https://redis.io/commands/cms.initbydim>`_.
""" # noqa
return self.execute_command(CMS_INITBYDIM, key, width, depth)
def initbyprob(self, key, error, probability):
"""
Initialize a Count-Min Sketch `key` to characteristics (`error`, `probability`) specified by user.
For more information see `CMS.INITBYPROB <https://redis.io/commands/cms.initbyprob>`_.
""" # noqa
return self.execute_command(CMS_INITBYPROB, key, error, probability)
def incrby(self, key, items, increments):
"""
Add/increase `items` to a Count-Min Sketch `key` by ''increments''.
Both `items` and `increments` are lists.
For more information see `CMS.INCRBY <https://redis.io/commands/cms.incrby>`_.
Example:
>>> cmsincrby('A', ['foo'], [1])
""" # noqa
params = [key]
self.append_items_and_increments(params, items, increments)
return self.execute_command(CMS_INCRBY, *params)
def query(self, key, *items):
"""
Return count for an `item` from `key`. Multiple items can be queried with one call.
For more information see `CMS.QUERY <https://redis.io/commands/cms.query>`_.
""" # noqa
return self.execute_command(CMS_QUERY, key, *items)
def merge(self, destKey, numKeys, srcKeys, weights=[]):
"""
Merge `numKeys` of sketches into `destKey`. Sketches specified in `srcKeys`.
All sketches must have identical width and depth.
`Weights` can be used to multiply certain sketches. Default weight is 1.
Both `srcKeys` and `weights` are lists.
For more information see `CMS.MERGE <https://redis.io/commands/cms.merge>`_.
""" # noqa
params = [destKey, numKeys]
params += srcKeys
self.append_weights(params, weights)
return self.execute_command(CMS_MERGE, *params)
def info(self, key):
"""
Return width, depth and total count of the sketch.
For more information see `CMS.INFO <https://redis.io/commands/cms.info>`_.
""" # noqa
return self.execute_command(CMS_INFO, key)

View File

@@ -0,0 +1,120 @@
from ..helpers import nativestr
class BFInfo:
capacity = None
size = None
filterNum = None
insertedNum = None
expansionRate = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.capacity = response["Capacity"]
self.size = response["Size"]
self.filterNum = response["Number of filters"]
self.insertedNum = response["Number of items inserted"]
self.expansionRate = response["Expansion rate"]
def get(self, item):
try:
return self.__getitem__(item)
except AttributeError:
return None
def __getitem__(self, item):
return getattr(self, item)
class CFInfo:
size = None
bucketNum = None
filterNum = None
insertedNum = None
deletedNum = None
bucketSize = None
expansionRate = None
maxIteration = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.size = response["Size"]
self.bucketNum = response["Number of buckets"]
self.filterNum = response["Number of filters"]
self.insertedNum = response["Number of items inserted"]
self.deletedNum = response["Number of items deleted"]
self.bucketSize = response["Bucket size"]
self.expansionRate = response["Expansion rate"]
self.maxIteration = response["Max iterations"]
def get(self, item):
try:
return self.__getitem__(item)
except AttributeError:
return None
def __getitem__(self, item):
return getattr(self, item)
class CMSInfo:
width = None
depth = None
count = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.width = response["width"]
self.depth = response["depth"]
self.count = response["count"]
def __getitem__(self, item):
return getattr(self, item)
class TopKInfo:
k = None
width = None
depth = None
decay = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.k = response["k"]
self.width = response["width"]
self.depth = response["depth"]
self.decay = response["decay"]
def __getitem__(self, item):
return getattr(self, item)
class TDigestInfo:
compression = None
capacity = None
merged_nodes = None
unmerged_nodes = None
merged_weight = None
unmerged_weight = None
total_compressions = None
memory_usage = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.compression = response["Compression"]
self.capacity = response["Capacity"]
self.merged_nodes = response["Merged nodes"]
self.unmerged_nodes = response["Unmerged nodes"]
self.merged_weight = response["Merged weight"]
self.unmerged_weight = response["Unmerged weight"]
self.total_compressions = response["Total compressions"]
self.memory_usage = response["Memory usage"]
def get(self, item):
try:
return self.__getitem__(item)
except AttributeError:
return None
def __getitem__(self, item):
return getattr(self, item)

View File

@@ -0,0 +1,919 @@
import asyncio
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
NoReturn,
Optional,
Union,
)
from redis.crc import key_slot
from redis.exceptions import RedisClusterException, RedisError
from redis.typing import (
AnyKeyT,
ClusterCommandsProtocol,
EncodableT,
KeysT,
KeyT,
PatternT,
ResponseT,
)
from .core import (
ACLCommands,
AsyncACLCommands,
AsyncDataAccessCommands,
AsyncFunctionCommands,
AsyncManagementCommands,
AsyncModuleCommands,
AsyncScriptCommands,
DataAccessCommands,
FunctionCommands,
ManagementCommands,
ModuleCommands,
PubSubCommands,
ScriptCommands,
)
from .helpers import list_or_args
from .redismodules import AsyncRedisModuleCommands, RedisModuleCommands
if TYPE_CHECKING:
from redis.asyncio.cluster import TargetNodesT
# Not complete, but covers the major ones
# https://redis.io/commands
READ_COMMANDS = frozenset(
[
"BITCOUNT",
"BITPOS",
"EVAL_RO",
"EVALSHA_RO",
"EXISTS",
"GEODIST",
"GEOHASH",
"GEOPOS",
"GEORADIUS",
"GEORADIUSBYMEMBER",
"GET",
"GETBIT",
"GETRANGE",
"HEXISTS",
"HGET",
"HGETALL",
"HKEYS",
"HLEN",
"HMGET",
"HSTRLEN",
"HVALS",
"KEYS",
"LINDEX",
"LLEN",
"LRANGE",
"MGET",
"PTTL",
"RANDOMKEY",
"SCARD",
"SDIFF",
"SINTER",
"SISMEMBER",
"SMEMBERS",
"SRANDMEMBER",
"STRLEN",
"SUNION",
"TTL",
"ZCARD",
"ZCOUNT",
"ZRANGE",
"ZSCORE",
]
)
class ClusterMultiKeyCommands(ClusterCommandsProtocol):
"""
A class containing commands that handle more than one key
"""
def _partition_keys_by_slot(self, keys: Iterable[KeyT]) -> Dict[int, List[KeyT]]:
"""Split keys into a dictionary that maps a slot to a list of keys."""
slots_to_keys = {}
for key in keys:
slot = key_slot(self.encoder.encode(key))
slots_to_keys.setdefault(slot, []).append(key)
return slots_to_keys
def _partition_pairs_by_slot(
self, mapping: Mapping[AnyKeyT, EncodableT]
) -> Dict[int, List[EncodableT]]:
"""Split pairs into a dictionary that maps a slot to a list of pairs."""
slots_to_pairs = {}
for pair in mapping.items():
slot = key_slot(self.encoder.encode(pair[0]))
slots_to_pairs.setdefault(slot, []).extend(pair)
return slots_to_pairs
def _execute_pipeline_by_slot(
self, command: str, slots_to_args: Mapping[int, Iterable[EncodableT]]
) -> List[Any]:
read_from_replicas = self.read_from_replicas and command in READ_COMMANDS
pipe = self.pipeline()
[
pipe.execute_command(
command,
*slot_args,
target_nodes=[
self.nodes_manager.get_node_from_slot(slot, read_from_replicas)
],
)
for slot, slot_args in slots_to_args.items()
]
return pipe.execute()
def _reorder_keys_by_command(
self,
keys: Iterable[KeyT],
slots_to_args: Mapping[int, Iterable[EncodableT]],
responses: Iterable[Any],
) -> List[Any]:
results = {
k: v
for slot_values, response in zip(slots_to_args.values(), responses)
for k, v in zip(slot_values, response)
}
return [results[key] for key in keys]
def mget_nonatomic(self, keys: KeysT, *args: KeyT) -> List[Optional[Any]]:
"""
Splits the keys into different slots and then calls MGET
for the keys of every slot. This operation will not be atomic
if keys belong to more than one slot.
Returns a list of values ordered identically to ``keys``
For more information see https://redis.io/commands/mget
"""
# Concatenate all keys into a list
keys = list_or_args(keys, args)
# Split keys into slots
slots_to_keys = self._partition_keys_by_slot(keys)
# Execute commands using a pipeline
res = self._execute_pipeline_by_slot("MGET", slots_to_keys)
# Reorder keys in the order the user provided & return
return self._reorder_keys_by_command(keys, slots_to_keys, res)
def mset_nonatomic(self, mapping: Mapping[AnyKeyT, EncodableT]) -> List[bool]:
"""
Sets key/values based on a mapping. Mapping is a dictionary of
key/value pairs. Both keys and values should be strings or types that
can be cast to a string via str().
Splits the keys into different slots and then calls MSET
for the keys of every slot. This operation will not be atomic
if keys belong to more than one slot.
For more information see https://redis.io/commands/mset
"""
# Partition the keys by slot
slots_to_pairs = self._partition_pairs_by_slot(mapping)
# Execute commands using a pipeline & return list of replies
return self._execute_pipeline_by_slot("MSET", slots_to_pairs)
def _split_command_across_slots(self, command: str, *keys: KeyT) -> int:
"""
Runs the given command once for the keys
of each slot. Returns the sum of the return values.
"""
# Partition the keys by slot
slots_to_keys = self._partition_keys_by_slot(keys)
# Sum up the reply from each command
return sum(self._execute_pipeline_by_slot(command, slots_to_keys))
def exists(self, *keys: KeyT) -> ResponseT:
"""
Returns the number of ``names`` that exist in the
whole cluster. The keys are first split up into slots
and then an EXISTS command is sent for every slot
For more information see https://redis.io/commands/exists
"""
return self._split_command_across_slots("EXISTS", *keys)
def delete(self, *keys: KeyT) -> ResponseT:
"""
Deletes the given keys in the cluster.
The keys are first split up into slots
and then an DEL command is sent for every slot
Non-existent keys are ignored.
Returns the number of keys that were deleted.
For more information see https://redis.io/commands/del
"""
return self._split_command_across_slots("DEL", *keys)
def touch(self, *keys: KeyT) -> ResponseT:
"""
Updates the last access time of given keys across the
cluster.
The keys are first split up into slots
and then an TOUCH command is sent for every slot
Non-existent keys are ignored.
Returns the number of keys that were touched.
For more information see https://redis.io/commands/touch
"""
return self._split_command_across_slots("TOUCH", *keys)
def unlink(self, *keys: KeyT) -> ResponseT:
"""
Remove the specified keys in a different thread.
The keys are first split up into slots
and then an TOUCH command is sent for every slot
Non-existent keys are ignored.
Returns the number of keys that were unlinked.
For more information see https://redis.io/commands/unlink
"""
return self._split_command_across_slots("UNLINK", *keys)
class AsyncClusterMultiKeyCommands(ClusterMultiKeyCommands):
"""
A class containing commands that handle more than one key
"""
async def mget_nonatomic(self, keys: KeysT, *args: KeyT) -> List[Optional[Any]]:
"""
Splits the keys into different slots and then calls MGET
for the keys of every slot. This operation will not be atomic
if keys belong to more than one slot.
Returns a list of values ordered identically to ``keys``
For more information see https://redis.io/commands/mget
"""
# Concatenate all keys into a list
keys = list_or_args(keys, args)
# Split keys into slots
slots_to_keys = self._partition_keys_by_slot(keys)
# Execute commands using a pipeline
res = await self._execute_pipeline_by_slot("MGET", slots_to_keys)
# Reorder keys in the order the user provided & return
return self._reorder_keys_by_command(keys, slots_to_keys, res)
async def mset_nonatomic(self, mapping: Mapping[AnyKeyT, EncodableT]) -> List[bool]:
"""
Sets key/values based on a mapping. Mapping is a dictionary of
key/value pairs. Both keys and values should be strings or types that
can be cast to a string via str().
Splits the keys into different slots and then calls MSET
for the keys of every slot. This operation will not be atomic
if keys belong to more than one slot.
For more information see https://redis.io/commands/mset
"""
# Partition the keys by slot
slots_to_pairs = self._partition_pairs_by_slot(mapping)
# Execute commands using a pipeline & return list of replies
return await self._execute_pipeline_by_slot("MSET", slots_to_pairs)
async def _split_command_across_slots(self, command: str, *keys: KeyT) -> int:
"""
Runs the given command once for the keys
of each slot. Returns the sum of the return values.
"""
# Partition the keys by slot
slots_to_keys = self._partition_keys_by_slot(keys)
# Sum up the reply from each command
return sum(await self._execute_pipeline_by_slot(command, slots_to_keys))
async def _execute_pipeline_by_slot(
self, command: str, slots_to_args: Mapping[int, Iterable[EncodableT]]
) -> List[Any]:
if self._initialize:
await self.initialize()
read_from_replicas = self.read_from_replicas and command in READ_COMMANDS
pipe = self.pipeline()
[
pipe.execute_command(
command,
*slot_args,
target_nodes=[
self.nodes_manager.get_node_from_slot(slot, read_from_replicas)
],
)
for slot, slot_args in slots_to_args.items()
]
return await pipe.execute()
class ClusterManagementCommands(ManagementCommands):
"""
A class for Redis Cluster management commands
The class inherits from Redis's core ManagementCommands class and do the
required adjustments to work with cluster mode
"""
def slaveof(self, *args, **kwargs) -> NoReturn:
"""
Make the server a replica of another instance, or promote it as master.
For more information see https://redis.io/commands/slaveof
"""
raise RedisClusterException("SLAVEOF is not supported in cluster mode")
def replicaof(self, *args, **kwargs) -> NoReturn:
"""
Make the server a replica of another instance, or promote it as master.
For more information see https://redis.io/commands/replicaof
"""
raise RedisClusterException("REPLICAOF is not supported in cluster mode")
def swapdb(self, *args, **kwargs) -> NoReturn:
"""
Swaps two Redis databases.
For more information see https://redis.io/commands/swapdb
"""
raise RedisClusterException("SWAPDB is not supported in cluster mode")
def cluster_myid(self, target_node: "TargetNodesT") -> ResponseT:
"""
Returns the node's id.
:target_node: 'ClusterNode'
The node to execute the command on
For more information check https://redis.io/commands/cluster-myid/
"""
return self.execute_command("CLUSTER MYID", target_nodes=target_node)
def cluster_addslots(
self, target_node: "TargetNodesT", *slots: EncodableT
) -> ResponseT:
"""
Assign new hash slots to receiving node. Sends to specified node.
:target_node: 'ClusterNode'
The node to execute the command on
For more information see https://redis.io/commands/cluster-addslots
"""
return self.execute_command(
"CLUSTER ADDSLOTS", *slots, target_nodes=target_node
)
def cluster_addslotsrange(
self, target_node: "TargetNodesT", *slots: EncodableT
) -> ResponseT:
"""
Similar to the CLUSTER ADDSLOTS command.
The difference between the two commands is that ADDSLOTS takes a list of slots
to assign to the node, while ADDSLOTSRANGE takes a list of slot ranges
(specified by start and end slots) to assign to the node.
:target_node: 'ClusterNode'
The node to execute the command on
For more information see https://redis.io/commands/cluster-addslotsrange
"""
return self.execute_command(
"CLUSTER ADDSLOTSRANGE", *slots, target_nodes=target_node
)
def cluster_countkeysinslot(self, slot_id: int) -> ResponseT:
"""
Return the number of local keys in the specified hash slot
Send to node based on specified slot_id
For more information see https://redis.io/commands/cluster-countkeysinslot
"""
return self.execute_command("CLUSTER COUNTKEYSINSLOT", slot_id)
def cluster_count_failure_report(self, node_id: str) -> ResponseT:
"""
Return the number of failure reports active for a given node
Sends to a random node
For more information see https://redis.io/commands/cluster-count-failure-reports
"""
return self.execute_command("CLUSTER COUNT-FAILURE-REPORTS", node_id)
def cluster_delslots(self, *slots: EncodableT) -> List[bool]:
"""
Set hash slots as unbound in the cluster.
It determines by it self what node the slot is in and sends it there
Returns a list of the results for each processed slot.
For more information see https://redis.io/commands/cluster-delslots
"""
return [self.execute_command("CLUSTER DELSLOTS", slot) for slot in slots]
def cluster_delslotsrange(self, *slots: EncodableT) -> ResponseT:
"""
Similar to the CLUSTER DELSLOTS command.
The difference is that CLUSTER DELSLOTS takes a list of hash slots to remove
from the node, while CLUSTER DELSLOTSRANGE takes a list of slot ranges to remove
from the node.
For more information see https://redis.io/commands/cluster-delslotsrange
"""
return self.execute_command("CLUSTER DELSLOTSRANGE", *slots)
def cluster_failover(
self, target_node: "TargetNodesT", option: Optional[str] = None
) -> ResponseT:
"""
Forces a slave to perform a manual failover of its master
Sends to specified node
:target_node: 'ClusterNode'
The node to execute the command on
For more information see https://redis.io/commands/cluster-failover
"""
if option:
if option.upper() not in ["FORCE", "TAKEOVER"]:
raise RedisError(
f"Invalid option for CLUSTER FAILOVER command: {option}"
)
else:
return self.execute_command(
"CLUSTER FAILOVER", option, target_nodes=target_node
)
else:
return self.execute_command("CLUSTER FAILOVER", target_nodes=target_node)
def cluster_info(self, target_nodes: Optional["TargetNodesT"] = None) -> ResponseT:
"""
Provides info about Redis Cluster node state.
The command will be sent to a random node in the cluster if no target
node is specified.
For more information see https://redis.io/commands/cluster-info
"""
return self.execute_command("CLUSTER INFO", target_nodes=target_nodes)
def cluster_keyslot(self, key: str) -> ResponseT:
"""
Returns the hash slot of the specified key
Sends to random node in the cluster
For more information see https://redis.io/commands/cluster-keyslot
"""
return self.execute_command("CLUSTER KEYSLOT", key)
def cluster_meet(
self, host: str, port: int, target_nodes: Optional["TargetNodesT"] = None
) -> ResponseT:
"""
Force a node cluster to handshake with another node.
Sends to specified node.
For more information see https://redis.io/commands/cluster-meet
"""
return self.execute_command(
"CLUSTER MEET", host, port, target_nodes=target_nodes
)
def cluster_nodes(self) -> ResponseT:
"""
Get Cluster config for the node.
Sends to random node in the cluster
For more information see https://redis.io/commands/cluster-nodes
"""
return self.execute_command("CLUSTER NODES")
def cluster_replicate(
self, target_nodes: "TargetNodesT", node_id: str
) -> ResponseT:
"""
Reconfigure a node as a slave of the specified master node
For more information see https://redis.io/commands/cluster-replicate
"""
return self.execute_command(
"CLUSTER REPLICATE", node_id, target_nodes=target_nodes
)
def cluster_reset(
self, soft: bool = True, target_nodes: Optional["TargetNodesT"] = None
) -> ResponseT:
"""
Reset a Redis Cluster node
If 'soft' is True then it will send 'SOFT' argument
If 'soft' is False then it will send 'HARD' argument
For more information see https://redis.io/commands/cluster-reset
"""
return self.execute_command(
"CLUSTER RESET", b"SOFT" if soft else b"HARD", target_nodes=target_nodes
)
def cluster_save_config(
self, target_nodes: Optional["TargetNodesT"] = None
) -> ResponseT:
"""
Forces the node to save cluster state on disk
For more information see https://redis.io/commands/cluster-saveconfig
"""
return self.execute_command("CLUSTER SAVECONFIG", target_nodes=target_nodes)
def cluster_get_keys_in_slot(self, slot: int, num_keys: int) -> ResponseT:
"""
Returns the number of keys in the specified cluster slot
For more information see https://redis.io/commands/cluster-getkeysinslot
"""
return self.execute_command("CLUSTER GETKEYSINSLOT", slot, num_keys)
def cluster_set_config_epoch(
self, epoch: int, target_nodes: Optional["TargetNodesT"] = None
) -> ResponseT:
"""
Set the configuration epoch in a new node
For more information see https://redis.io/commands/cluster-set-config-epoch
"""
return self.execute_command(
"CLUSTER SET-CONFIG-EPOCH", epoch, target_nodes=target_nodes
)
def cluster_setslot(
self, target_node: "TargetNodesT", node_id: str, slot_id: int, state: str
) -> ResponseT:
"""
Bind an hash slot to a specific node
:target_node: 'ClusterNode'
The node to execute the command on
For more information see https://redis.io/commands/cluster-setslot
"""
if state.upper() in ("IMPORTING", "NODE", "MIGRATING"):
return self.execute_command(
"CLUSTER SETSLOT", slot_id, state, node_id, target_nodes=target_node
)
elif state.upper() == "STABLE":
raise RedisError('For "stable" state please use cluster_setslot_stable')
else:
raise RedisError(f"Invalid slot state: {state}")
def cluster_setslot_stable(self, slot_id: int) -> ResponseT:
"""
Clears migrating / importing state from the slot.
It determines by it self what node the slot is in and sends it there.
For more information see https://redis.io/commands/cluster-setslot
"""
return self.execute_command("CLUSTER SETSLOT", slot_id, "STABLE")
def cluster_replicas(
self, node_id: str, target_nodes: Optional["TargetNodesT"] = None
) -> ResponseT:
"""
Provides a list of replica nodes replicating from the specified primary
target node.
For more information see https://redis.io/commands/cluster-replicas
"""
return self.execute_command(
"CLUSTER REPLICAS", node_id, target_nodes=target_nodes
)
def cluster_slots(self, target_nodes: Optional["TargetNodesT"] = None) -> ResponseT:
"""
Get array of Cluster slot to node mappings
For more information see https://redis.io/commands/cluster-slots
"""
return self.execute_command("CLUSTER SLOTS", target_nodes=target_nodes)
def cluster_shards(self, target_nodes=None):
"""
Returns details about the shards of the cluster.
For more information see https://redis.io/commands/cluster-shards
"""
return self.execute_command("CLUSTER SHARDS", target_nodes=target_nodes)
def cluster_myshardid(self, target_nodes=None):
"""
Returns the shard ID of the node.
For more information see https://redis.io/commands/cluster-myshardid/
"""
return self.execute_command("CLUSTER MYSHARDID", target_nodes=target_nodes)
def cluster_links(self, target_node: "TargetNodesT") -> ResponseT:
"""
Each node in a Redis Cluster maintains a pair of long-lived TCP link with each
peer in the cluster: One for sending outbound messages towards the peer and one
for receiving inbound messages from the peer.
This command outputs information of all such peer links as an array.
For more information see https://redis.io/commands/cluster-links
"""
return self.execute_command("CLUSTER LINKS", target_nodes=target_node)
def cluster_flushslots(self, target_nodes: Optional["TargetNodesT"] = None) -> None:
raise NotImplementedError(
"CLUSTER FLUSHSLOTS is intentionally not implemented in the client."
)
def cluster_bumpepoch(self, target_nodes: Optional["TargetNodesT"] = None) -> None:
raise NotImplementedError(
"CLUSTER BUMPEPOCH is intentionally not implemented in the client."
)
def readonly(self, target_nodes: Optional["TargetNodesT"] = None) -> ResponseT:
"""
Enables read queries.
The command will be sent to the default cluster node if target_nodes is
not specified.
For more information see https://redis.io/commands/readonly
"""
if target_nodes == "replicas" or target_nodes == "all":
# read_from_replicas will only be enabled if the READONLY command
# is sent to all replicas
self.read_from_replicas = True
return self.execute_command("READONLY", target_nodes=target_nodes)
def readwrite(self, target_nodes: Optional["TargetNodesT"] = None) -> ResponseT:
"""
Disables read queries.
The command will be sent to the default cluster node if target_nodes is
not specified.
For more information see https://redis.io/commands/readwrite
"""
# Reset read from replicas flag
self.read_from_replicas = False
return self.execute_command("READWRITE", target_nodes=target_nodes)
class AsyncClusterManagementCommands(
ClusterManagementCommands, AsyncManagementCommands
):
"""
A class for Redis Cluster management commands
The class inherits from Redis's core ManagementCommands class and do the
required adjustments to work with cluster mode
"""
async def cluster_delslots(self, *slots: EncodableT) -> List[bool]:
"""
Set hash slots as unbound in the cluster.
It determines by it self what node the slot is in and sends it there
Returns a list of the results for each processed slot.
For more information see https://redis.io/commands/cluster-delslots
"""
return await asyncio.gather(
*(
asyncio.create_task(self.execute_command("CLUSTER DELSLOTS", slot))
for slot in slots
)
)
class ClusterDataAccessCommands(DataAccessCommands):
"""
A class for Redis Cluster Data Access Commands
The class inherits from Redis's core DataAccessCommand class and do the
required adjustments to work with cluster mode
"""
def stralgo(
self,
algo: Literal["LCS"],
value1: KeyT,
value2: KeyT,
specific_argument: Union[Literal["strings"], Literal["keys"]] = "strings",
len: bool = False,
idx: bool = False,
minmatchlen: Optional[int] = None,
withmatchlen: bool = False,
**kwargs,
) -> ResponseT:
"""
Implements complex algorithms that operate on strings.
Right now the only algorithm implemented is the LCS algorithm
(longest common substring). However new algorithms could be
implemented in the future.
``algo`` Right now must be LCS
``value1`` and ``value2`` Can be two strings or two keys
``specific_argument`` Specifying if the arguments to the algorithm
will be keys or strings. strings is the default.
``len`` Returns just the len of the match.
``idx`` Returns the match positions in each string.
``minmatchlen`` Restrict the list of matches to the ones of a given
minimal length. Can be provided only when ``idx`` set to True.
``withmatchlen`` Returns the matches with the len of the match.
Can be provided only when ``idx`` set to True.
For more information see https://redis.io/commands/stralgo
"""
target_nodes = kwargs.pop("target_nodes", None)
if specific_argument == "strings" and target_nodes is None:
target_nodes = "default-node"
kwargs.update({"target_nodes": target_nodes})
return super().stralgo(
algo,
value1,
value2,
specific_argument,
len,
idx,
minmatchlen,
withmatchlen,
**kwargs,
)
def scan_iter(
self,
match: Optional[PatternT] = None,
count: Optional[int] = None,
_type: Optional[str] = None,
**kwargs,
) -> Iterator:
# Do the first query with cursor=0 for all nodes
cursors, data = self.scan(match=match, count=count, _type=_type, **kwargs)
yield from data
cursors = {name: cursor for name, cursor in cursors.items() if cursor != 0}
if cursors:
# Get nodes by name
nodes = {name: self.get_node(node_name=name) for name in cursors.keys()}
# Iterate over each node till its cursor is 0
kwargs.pop("target_nodes", None)
while cursors:
for name, cursor in cursors.items():
cur, data = self.scan(
cursor=cursor,
match=match,
count=count,
_type=_type,
target_nodes=nodes[name],
**kwargs,
)
yield from data
cursors[name] = cur[name]
cursors = {
name: cursor for name, cursor in cursors.items() if cursor != 0
}
class AsyncClusterDataAccessCommands(
ClusterDataAccessCommands, AsyncDataAccessCommands
):
"""
A class for Redis Cluster Data Access Commands
The class inherits from Redis's core DataAccessCommand class and do the
required adjustments to work with cluster mode
"""
async def scan_iter(
self,
match: Optional[PatternT] = None,
count: Optional[int] = None,
_type: Optional[str] = None,
**kwargs,
) -> AsyncIterator:
# Do the first query with cursor=0 for all nodes
cursors, data = await self.scan(match=match, count=count, _type=_type, **kwargs)
for value in data:
yield value
cursors = {name: cursor for name, cursor in cursors.items() if cursor != 0}
if cursors:
# Get nodes by name
nodes = {name: self.get_node(node_name=name) for name in cursors.keys()}
# Iterate over each node till its cursor is 0
kwargs.pop("target_nodes", None)
while cursors:
for name, cursor in cursors.items():
cur, data = await self.scan(
cursor=cursor,
match=match,
count=count,
_type=_type,
target_nodes=nodes[name],
**kwargs,
)
for value in data:
yield value
cursors[name] = cur[name]
cursors = {
name: cursor for name, cursor in cursors.items() if cursor != 0
}
class RedisClusterCommands(
ClusterMultiKeyCommands,
ClusterManagementCommands,
ACLCommands,
PubSubCommands,
ClusterDataAccessCommands,
ScriptCommands,
FunctionCommands,
ModuleCommands,
RedisModuleCommands,
):
"""
A class for all Redis Cluster commands
For key-based commands, the target node(s) will be internally determined
by the keys' hash slot.
Non-key-based commands can be executed with the 'target_nodes' argument to
target specific nodes. By default, if target_nodes is not specified, the
command will be executed on the default cluster node.
:param :target_nodes: type can be one of the followings:
- nodes flag: ALL_NODES, PRIMARIES, REPLICAS, RANDOM
- 'ClusterNode'
- 'list(ClusterNodes)'
- 'dict(any:clusterNodes)'
for example:
r.cluster_info(target_nodes=RedisCluster.ALL_NODES)
"""
class AsyncRedisClusterCommands(
AsyncClusterMultiKeyCommands,
AsyncClusterManagementCommands,
AsyncACLCommands,
AsyncClusterDataAccessCommands,
AsyncScriptCommands,
AsyncFunctionCommands,
AsyncModuleCommands,
AsyncRedisModuleCommands,
):
"""
A class for all Redis Cluster commands
For key-based commands, the target node(s) will be internally determined
by the keys' hash slot.
Non-key-based commands can be executed with the 'target_nodes' argument to
target specific nodes. By default, if target_nodes is not specified, the
command will be executed on the default cluster node.
:param :target_nodes: type can be one of the followings:
- nodes flag: ALL_NODES, PRIMARIES, REPLICAS, RANDOM
- 'ClusterNode'
- 'list(ClusterNodes)'
- 'dict(any:clusterNodes)'
for example:
r.cluster_info(target_nodes=RedisCluster.ALL_NODES)
"""

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,118 @@
import copy
import random
import string
from typing import List, Tuple
import redis
from redis.typing import KeysT, KeyT
def list_or_args(keys: KeysT, args: Tuple[KeyT, ...]) -> List[KeyT]:
# returns a single new list combining keys and args
try:
iter(keys)
# a string or bytes instance can be iterated, but indicates
# keys wasn't passed as a list
if isinstance(keys, (bytes, str)):
keys = [keys]
else:
keys = list(keys)
except TypeError:
keys = [keys]
if args:
keys.extend(args)
return keys
def nativestr(x):
"""Return the decoded binary string, or a string, depending on type."""
r = x.decode("utf-8", "replace") if isinstance(x, bytes) else x
if r == "null":
return
return r
def delist(x):
"""Given a list of binaries, return the stringified version."""
if x is None:
return x
return [nativestr(obj) for obj in x]
def parse_to_list(response):
"""Optimistically parse the response to a list."""
res = []
special_values = {"infinity", "nan", "-infinity"}
if response is None:
return res
for item in response:
if item is None:
res.append(None)
continue
try:
item_str = nativestr(item)
except TypeError:
res.append(None)
continue
if isinstance(item_str, str) and item_str.lower() in special_values:
res.append(item_str) # Keep as string
else:
try:
res.append(int(item))
except ValueError:
try:
res.append(float(item))
except ValueError:
res.append(item_str)
return res
def parse_list_to_dict(response):
res = {}
for i in range(0, len(response), 2):
if isinstance(response[i], list):
res["Child iterators"].append(parse_list_to_dict(response[i]))
try:
if isinstance(response[i + 1], list):
res["Child iterators"].append(parse_list_to_dict(response[i + 1]))
except IndexError:
pass
elif isinstance(response[i + 1], list):
res["Child iterators"] = [parse_list_to_dict(response[i + 1])]
else:
try:
res[response[i]] = float(response[i + 1])
except (TypeError, ValueError):
res[response[i]] = response[i + 1]
return res
def random_string(length=10):
"""
Returns a random N character long string.
"""
return "".join( # nosec
random.choice(string.ascii_lowercase) for x in range(length)
)
def decode_dict_keys(obj):
"""Decode the keys of the given dictionary with utf-8."""
newobj = copy.copy(obj)
for k in obj.keys():
if isinstance(k, bytes):
newobj[k.decode("utf-8")] = newobj[k]
newobj.pop(k)
return newobj
def get_protocol_version(client):
if isinstance(client, redis.Redis) or isinstance(client, redis.asyncio.Redis):
return client.connection_pool.connection_kwargs.get("protocol")
elif isinstance(client, redis.cluster.AbstractRedisCluster):
return client.nodes_manager.connection_kwargs.get("protocol")

View File

@@ -0,0 +1,147 @@
from json import JSONDecodeError, JSONDecoder, JSONEncoder
import redis
from ..helpers import get_protocol_version, nativestr
from .commands import JSONCommands
from .decoders import bulk_of_jsons, decode_list
class JSON(JSONCommands):
"""
Create a client for talking to json.
:param decoder:
:type json.JSONDecoder: An instance of json.JSONDecoder
:param encoder:
:type json.JSONEncoder: An instance of json.JSONEncoder
"""
def __init__(
self, client, version=None, decoder=JSONDecoder(), encoder=JSONEncoder()
):
"""
Create a client for talking to json.
:param decoder:
:type json.JSONDecoder: An instance of json.JSONDecoder
:param encoder:
:type json.JSONEncoder: An instance of json.JSONEncoder
"""
# Set the module commands' callbacks
self._MODULE_CALLBACKS = {
"JSON.ARRPOP": self._decode,
"JSON.DEBUG": self._decode,
"JSON.GET": self._decode,
"JSON.MERGE": lambda r: r and nativestr(r) == "OK",
"JSON.MGET": bulk_of_jsons(self._decode),
"JSON.MSET": lambda r: r and nativestr(r) == "OK",
"JSON.RESP": self._decode,
"JSON.SET": lambda r: r and nativestr(r) == "OK",
"JSON.TOGGLE": self._decode,
}
_RESP2_MODULE_CALLBACKS = {
"JSON.ARRAPPEND": self._decode,
"JSON.ARRINDEX": self._decode,
"JSON.ARRINSERT": self._decode,
"JSON.ARRLEN": self._decode,
"JSON.ARRTRIM": self._decode,
"JSON.CLEAR": int,
"JSON.DEL": int,
"JSON.FORGET": int,
"JSON.GET": self._decode,
"JSON.NUMINCRBY": self._decode,
"JSON.NUMMULTBY": self._decode,
"JSON.OBJKEYS": self._decode,
"JSON.STRAPPEND": self._decode,
"JSON.OBJLEN": self._decode,
"JSON.STRLEN": self._decode,
"JSON.TOGGLE": self._decode,
}
_RESP3_MODULE_CALLBACKS = {}
self.client = client
self.execute_command = client.execute_command
self.MODULE_VERSION = version
if get_protocol_version(self.client) in ["3", 3]:
self._MODULE_CALLBACKS.update(_RESP3_MODULE_CALLBACKS)
else:
self._MODULE_CALLBACKS.update(_RESP2_MODULE_CALLBACKS)
for key, value in self._MODULE_CALLBACKS.items():
self.client.set_response_callback(key, value)
self.__encoder__ = encoder
self.__decoder__ = decoder
def _decode(self, obj):
"""Get the decoder."""
if obj is None:
return obj
try:
x = self.__decoder__.decode(obj)
if x is None:
raise TypeError
return x
except TypeError:
try:
return self.__decoder__.decode(obj.decode())
except AttributeError:
return decode_list(obj)
except (AttributeError, JSONDecodeError):
return decode_list(obj)
def _encode(self, obj):
"""Get the encoder."""
return self.__encoder__.encode(obj)
def pipeline(self, transaction=True, shard_hint=None):
"""Creates a pipeline for the JSON module, that can be used for executing
JSON commands, as well as classic core commands.
Usage example:
r = redis.Redis()
pipe = r.json().pipeline()
pipe.jsonset('foo', '.', {'hello!': 'world'})
pipe.jsonget('foo')
pipe.jsonget('notakey')
"""
if isinstance(self.client, redis.RedisCluster):
p = ClusterPipeline(
nodes_manager=self.client.nodes_manager,
commands_parser=self.client.commands_parser,
startup_nodes=self.client.nodes_manager.startup_nodes,
result_callbacks=self.client.result_callbacks,
cluster_response_callbacks=self.client.cluster_response_callbacks,
cluster_error_retry_attempts=self.client.retry.get_retries(),
read_from_replicas=self.client.read_from_replicas,
reinitialize_steps=self.client.reinitialize_steps,
lock=self.client._lock,
)
else:
p = Pipeline(
connection_pool=self.client.connection_pool,
response_callbacks=self._MODULE_CALLBACKS,
transaction=transaction,
shard_hint=shard_hint,
)
p._encode = self._encode
p._decode = self._decode
return p
class ClusterPipeline(JSONCommands, redis.cluster.ClusterPipeline):
"""Cluster pipeline for the module."""
class Pipeline(JSONCommands, redis.client.Pipeline):
"""Pipeline for the module."""

View File

@@ -0,0 +1,5 @@
from typing import List, Mapping, Union
JsonType = Union[
str, int, float, bool, None, Mapping[str, "JsonType"], List["JsonType"]
]

View File

@@ -0,0 +1,431 @@
import os
from json import JSONDecodeError, loads
from typing import Dict, List, Optional, Tuple, Union
from redis.exceptions import DataError
from redis.utils import deprecated_function
from ._util import JsonType
from .decoders import decode_dict_keys
from .path import Path
class JSONCommands:
"""json commands."""
def arrappend(
self, name: str, path: Optional[str] = Path.root_path(), *args: List[JsonType]
) -> List[Optional[int]]:
"""Append the objects ``args`` to the array under the
``path` in key ``name``.
For more information see `JSON.ARRAPPEND <https://redis.io/commands/json.arrappend>`_..
""" # noqa
pieces = [name, str(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command("JSON.ARRAPPEND", *pieces)
def arrindex(
self,
name: str,
path: str,
scalar: int,
start: Optional[int] = None,
stop: Optional[int] = None,
) -> List[Optional[int]]:
"""
Return the index of ``scalar`` in the JSON array under ``path`` at key
``name``.
The search can be limited using the optional inclusive ``start``
and exclusive ``stop`` indices.
For more information see `JSON.ARRINDEX <https://redis.io/commands/json.arrindex>`_.
""" # noqa
pieces = [name, str(path), self._encode(scalar)]
if start is not None:
pieces.append(start)
if stop is not None:
pieces.append(stop)
return self.execute_command("JSON.ARRINDEX", *pieces, keys=[name])
def arrinsert(
self, name: str, path: str, index: int, *args: List[JsonType]
) -> List[Optional[int]]:
"""Insert the objects ``args`` to the array at index ``index``
under the ``path` in key ``name``.
For more information see `JSON.ARRINSERT <https://redis.io/commands/json.arrinsert>`_.
""" # noqa
pieces = [name, str(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command("JSON.ARRINSERT", *pieces)
def arrlen(
self, name: str, path: Optional[str] = Path.root_path()
) -> List[Optional[int]]:
"""Return the length of the array JSON value under ``path``
at key``name``.
For more information see `JSON.ARRLEN <https://redis.io/commands/json.arrlen>`_.
""" # noqa
return self.execute_command("JSON.ARRLEN", name, str(path), keys=[name])
def arrpop(
self,
name: str,
path: Optional[str] = Path.root_path(),
index: Optional[int] = -1,
) -> List[Optional[str]]:
"""Pop the element at ``index`` in the array JSON value under
``path`` at key ``name``.
For more information see `JSON.ARRPOP <https://redis.io/commands/json.arrpop>`_.
""" # noqa
return self.execute_command("JSON.ARRPOP", name, str(path), index)
def arrtrim(
self, name: str, path: str, start: int, stop: int
) -> List[Optional[int]]:
"""Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``.
For more information see `JSON.ARRTRIM <https://redis.io/commands/json.arrtrim>`_.
""" # noqa
return self.execute_command("JSON.ARRTRIM", name, str(path), start, stop)
def type(self, name: str, path: Optional[str] = Path.root_path()) -> List[str]:
"""Get the type of the JSON value under ``path`` from key ``name``.
For more information see `JSON.TYPE <https://redis.io/commands/json.type>`_.
""" # noqa
return self.execute_command("JSON.TYPE", name, str(path), keys=[name])
def resp(self, name: str, path: Optional[str] = Path.root_path()) -> List:
"""Return the JSON value under ``path`` at key ``name``.
For more information see `JSON.RESP <https://redis.io/commands/json.resp>`_.
""" # noqa
return self.execute_command("JSON.RESP", name, str(path), keys=[name])
def objkeys(
self, name: str, path: Optional[str] = Path.root_path()
) -> List[Optional[List[str]]]:
"""Return the key names in the dictionary JSON value under ``path`` at
key ``name``.
For more information see `JSON.OBJKEYS <https://redis.io/commands/json.objkeys>`_.
""" # noqa
return self.execute_command("JSON.OBJKEYS", name, str(path), keys=[name])
def objlen(
self, name: str, path: Optional[str] = Path.root_path()
) -> List[Optional[int]]:
"""Return the length of the dictionary JSON value under ``path`` at key
``name``.
For more information see `JSON.OBJLEN <https://redis.io/commands/json.objlen>`_.
""" # noqa
return self.execute_command("JSON.OBJLEN", name, str(path), keys=[name])
def numincrby(self, name: str, path: str, number: int) -> str:
"""Increment the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``.
For more information see `JSON.NUMINCRBY <https://redis.io/commands/json.numincrby>`_.
""" # noqa
return self.execute_command(
"JSON.NUMINCRBY", name, str(path), self._encode(number)
)
@deprecated_function(version="4.0.0", reason="deprecated since redisjson 1.0.0")
def nummultby(self, name: str, path: str, number: int) -> str:
"""Multiply the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``.
For more information see `JSON.NUMMULTBY <https://redis.io/commands/json.nummultby>`_.
""" # noqa
return self.execute_command(
"JSON.NUMMULTBY", name, str(path), self._encode(number)
)
def clear(self, name: str, path: Optional[str] = Path.root_path()) -> int:
"""Empty arrays and objects (to have zero slots/keys without deleting the
array/object).
Return the count of cleared paths (ignoring non-array and non-objects
paths).
For more information see `JSON.CLEAR <https://redis.io/commands/json.clear>`_.
""" # noqa
return self.execute_command("JSON.CLEAR", name, str(path))
def delete(self, key: str, path: Optional[str] = Path.root_path()) -> int:
"""Delete the JSON value stored at key ``key`` under ``path``.
For more information see `JSON.DEL <https://redis.io/commands/json.del>`_.
"""
return self.execute_command("JSON.DEL", key, str(path))
# forget is an alias for delete
forget = delete
def get(
self, name: str, *args, no_escape: Optional[bool] = False
) -> Optional[List[JsonType]]:
"""
Get the object stored as a JSON value at key ``name``.
``args`` is zero or more paths, and defaults to root path
```no_escape`` is a boolean flag to add no_escape option to get
non-ascii characters
For more information see `JSON.GET <https://redis.io/commands/json.get>`_.
""" # noqa
pieces = [name]
if no_escape:
pieces.append("noescape")
if len(args) == 0:
pieces.append(Path.root_path())
else:
for p in args:
pieces.append(str(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command("JSON.GET", *pieces, keys=[name])
except TypeError:
return None
def mget(self, keys: List[str], path: str) -> List[JsonType]:
"""
Get the objects stored as a JSON values under ``path``. ``keys``
is a list of one or more keys.
For more information see `JSON.MGET <https://redis.io/commands/json.mget>`_.
""" # noqa
pieces = []
pieces += keys
pieces.append(str(path))
return self.execute_command("JSON.MGET", *pieces, keys=keys)
def set(
self,
name: str,
path: str,
obj: JsonType,
nx: Optional[bool] = False,
xx: Optional[bool] = False,
decode_keys: Optional[bool] = False,
) -> Optional[str]:
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``.
``nx`` if set to True, set ``value`` only if it does not exist.
``xx`` if set to True, set ``value`` only if it exists.
``decode_keys`` If set to True, the keys of ``obj`` will be decoded
with utf-8.
For the purpose of using this within a pipeline, this command is also
aliased to JSON.SET.
For more information see `JSON.SET <https://redis.io/commands/json.set>`_.
"""
if decode_keys:
obj = decode_dict_keys(obj)
pieces = [name, str(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception(
"nx and xx are mutually exclusive: use one, the "
"other or neither - but not both"
)
elif nx:
pieces.append("NX")
elif xx:
pieces.append("XX")
return self.execute_command("JSON.SET", *pieces)
def mset(self, triplets: List[Tuple[str, str, JsonType]]) -> Optional[str]:
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
for one or more keys.
``triplets`` is a list of one or more triplets of key, path, value.
For the purpose of using this within a pipeline, this command is also
aliased to JSON.MSET.
For more information see `JSON.MSET <https://redis.io/commands/json.mset>`_.
"""
pieces = []
for triplet in triplets:
pieces.extend([triplet[0], str(triplet[1]), self._encode(triplet[2])])
return self.execute_command("JSON.MSET", *pieces)
def merge(
self,
name: str,
path: str,
obj: JsonType,
decode_keys: Optional[bool] = False,
) -> Optional[str]:
"""
Merges a given JSON value into matching paths. Consequently, JSON values
at matching paths are updated, deleted, or expanded with new children
``decode_keys`` If set to True, the keys of ``obj`` will be decoded
with utf-8.
For more information see `JSON.MERGE <https://redis.io/commands/json.merge>`_.
"""
if decode_keys:
obj = decode_dict_keys(obj)
pieces = [name, str(path), self._encode(obj)]
return self.execute_command("JSON.MERGE", *pieces)
def set_file(
self,
name: str,
path: str,
file_name: str,
nx: Optional[bool] = False,
xx: Optional[bool] = False,
decode_keys: Optional[bool] = False,
) -> Optional[str]:
"""
Set the JSON value at key ``name`` under the ``path`` to the content
of the json file ``file_name``.
``nx`` if set to True, set ``value`` only if it does not exist.
``xx`` if set to True, set ``value`` only if it exists.
``decode_keys`` If set to True, the keys of ``obj`` will be decoded
with utf-8.
"""
with open(file_name) as fp:
file_content = loads(fp.read())
return self.set(name, path, file_content, nx=nx, xx=xx, decode_keys=decode_keys)
def set_path(
self,
json_path: str,
root_folder: str,
nx: Optional[bool] = False,
xx: Optional[bool] = False,
decode_keys: Optional[bool] = False,
) -> Dict[str, bool]:
"""
Iterate over ``root_folder`` and set each JSON file to a value
under ``json_path`` with the file name as the key.
``nx`` if set to True, set ``value`` only if it does not exist.
``xx`` if set to True, set ``value`` only if it exists.
``decode_keys`` If set to True, the keys of ``obj`` will be decoded
with utf-8.
"""
set_files_result = {}
for root, dirs, files in os.walk(root_folder):
for file in files:
file_path = os.path.join(root, file)
try:
file_name = file_path.rsplit(".")[0]
self.set_file(
file_name,
json_path,
file_path,
nx=nx,
xx=xx,
decode_keys=decode_keys,
)
set_files_result[file_path] = True
except JSONDecodeError:
set_files_result[file_path] = False
return set_files_result
def strlen(self, name: str, path: Optional[str] = None) -> List[Optional[int]]:
"""Return the length of the string JSON value under ``path`` at key
``name``.
For more information see `JSON.STRLEN <https://redis.io/commands/json.strlen>`_.
""" # noqa
pieces = [name]
if path is not None:
pieces.append(str(path))
return self.execute_command("JSON.STRLEN", *pieces, keys=[name])
def toggle(
self, name: str, path: Optional[str] = Path.root_path()
) -> Union[bool, List[Optional[int]]]:
"""Toggle boolean value under ``path`` at key ``name``.
returning the new value.
For more information see `JSON.TOGGLE <https://redis.io/commands/json.toggle>`_.
""" # noqa
return self.execute_command("JSON.TOGGLE", name, str(path))
def strappend(
self, name: str, value: str, path: Optional[str] = Path.root_path()
) -> Union[int, List[Optional[int]]]:
"""Append to the string JSON value. If two options are specified after
the key name, the path is determined to be the first. If a single
option is passed, then the root_path (i.e Path.root_path()) is used.
For more information see `JSON.STRAPPEND <https://redis.io/commands/json.strappend>`_.
""" # noqa
pieces = [name, str(path), self._encode(value)]
return self.execute_command("JSON.STRAPPEND", *pieces)
def debug(
self,
subcommand: str,
key: Optional[str] = None,
path: Optional[str] = Path.root_path(),
) -> Union[int, List[str]]:
"""Return the memory usage in bytes of a value under ``path`` from
key ``name``.
For more information see `JSON.DEBUG <https://redis.io/commands/json.debug>`_.
""" # noqa
valid_subcommands = ["MEMORY", "HELP"]
if subcommand not in valid_subcommands:
raise DataError("The only valid subcommands are ", str(valid_subcommands))
pieces = [subcommand]
if subcommand == "MEMORY":
if key is None:
raise DataError("No key specified")
pieces.append(key)
pieces.append(str(path))
return self.execute_command("JSON.DEBUG", *pieces)
@deprecated_function(
version="4.0.0", reason="redisjson-py supported this, call get directly."
)
def jsonget(self, *args, **kwargs):
return self.get(*args, **kwargs)
@deprecated_function(
version="4.0.0", reason="redisjson-py supported this, call get directly."
)
def jsonmget(self, *args, **kwargs):
return self.mget(*args, **kwargs)
@deprecated_function(
version="4.0.0", reason="redisjson-py supported this, call get directly."
)
def jsonset(self, *args, **kwargs):
return self.set(*args, **kwargs)

View File

@@ -0,0 +1,60 @@
import copy
import re
from ..helpers import nativestr
def bulk_of_jsons(d):
"""Replace serialized JSON values with objects in a
bulk array response (list).
"""
def _f(b):
for index, item in enumerate(b):
if item is not None:
b[index] = d(item)
return b
return _f
def decode_dict_keys(obj):
"""Decode the keys of the given dictionary with utf-8."""
newobj = copy.copy(obj)
for k in obj.keys():
if isinstance(k, bytes):
newobj[k.decode("utf-8")] = newobj[k]
newobj.pop(k)
return newobj
def unstring(obj):
"""
Attempt to parse string to native integer formats.
One can't simply call int/float in a try/catch because there is a
semantic difference between (for example) 15.0 and 15.
"""
floatreg = "^\\d+.\\d+$"
match = re.findall(floatreg, obj)
if match != []:
return float(match[0])
intreg = "^\\d+$"
match = re.findall(intreg, obj)
if match != []:
return int(match[0])
return obj
def decode_list(b):
"""
Given a non-deserializable object, make a best effort to
return a useful set of results.
"""
if isinstance(b, list):
return [nativestr(obj) for obj in b]
elif isinstance(b, bytes):
return unstring(nativestr(b))
elif isinstance(b, str):
return unstring(b)
return b

View File

@@ -0,0 +1,16 @@
class Path:
"""This class represents a path in a JSON value."""
strPath = ""
@staticmethod
def root_path():
"""Return the root path's string representation."""
return "."
def __init__(self, path):
"""Make a new path based on the string representation in `path`."""
self.strPath = path
def __repr__(self):
return self.strPath

View File

@@ -0,0 +1,101 @@
from __future__ import annotations
from json import JSONDecoder, JSONEncoder
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .bf import BFBloom, CFBloom, CMSBloom, TDigestBloom, TOPKBloom
from .json import JSON
from .search import AsyncSearch, Search
from .timeseries import TimeSeries
from .vectorset import VectorSet
class RedisModuleCommands:
"""This class contains the wrapper functions to bring supported redis
modules into the command namespace.
"""
def json(self, encoder=JSONEncoder(), decoder=JSONDecoder()) -> JSON:
"""Access the json namespace, providing support for redis json."""
from .json import JSON
jj = JSON(client=self, encoder=encoder, decoder=decoder)
return jj
def ft(self, index_name="idx") -> Search:
"""Access the search namespace, providing support for redis search."""
from .search import Search
s = Search(client=self, index_name=index_name)
return s
def ts(self) -> TimeSeries:
"""Access the timeseries namespace, providing support for
redis timeseries data.
"""
from .timeseries import TimeSeries
s = TimeSeries(client=self)
return s
def bf(self) -> BFBloom:
"""Access the bloom namespace."""
from .bf import BFBloom
bf = BFBloom(client=self)
return bf
def cf(self) -> CFBloom:
"""Access the bloom namespace."""
from .bf import CFBloom
cf = CFBloom(client=self)
return cf
def cms(self) -> CMSBloom:
"""Access the bloom namespace."""
from .bf import CMSBloom
cms = CMSBloom(client=self)
return cms
def topk(self) -> TOPKBloom:
"""Access the bloom namespace."""
from .bf import TOPKBloom
topk = TOPKBloom(client=self)
return topk
def tdigest(self) -> TDigestBloom:
"""Access the bloom namespace."""
from .bf import TDigestBloom
tdigest = TDigestBloom(client=self)
return tdigest
def vset(self) -> VectorSet:
"""Access the VectorSet commands namespace."""
from .vectorset import VectorSet
vset = VectorSet(client=self)
return vset
class AsyncRedisModuleCommands(RedisModuleCommands):
def ft(self, index_name="idx") -> AsyncSearch:
"""Access the search namespace, providing support for redis search."""
from .search import AsyncSearch
s = AsyncSearch(client=self, index_name=index_name)
return s

View File

@@ -0,0 +1,189 @@
import redis
from ...asyncio.client import Pipeline as AsyncioPipeline
from .commands import (
AGGREGATE_CMD,
CONFIG_CMD,
INFO_CMD,
PROFILE_CMD,
SEARCH_CMD,
SPELLCHECK_CMD,
SYNDUMP_CMD,
AsyncSearchCommands,
SearchCommands,
)
class Search(SearchCommands):
"""
Create a client for talking to search.
It abstracts the API of the module and lets you just use the engine.
"""
class BatchIndexer:
"""
A batch indexer allows you to automatically batch
document indexing in pipelines, flushing it every N documents.
"""
def __init__(self, client, chunk_size=1000):
self.client = client
self.execute_command = client.execute_command
self._pipeline = client.pipeline(transaction=False, shard_hint=None)
self.total = 0
self.chunk_size = chunk_size
self.current_chunk = 0
def __del__(self):
if self.current_chunk:
self.commit()
def add_document(
self,
doc_id,
nosave=False,
score=1.0,
payload=None,
replace=False,
partial=False,
no_create=False,
**fields,
):
"""
Add a document to the batch query
"""
self.client._add_document(
doc_id,
conn=self._pipeline,
nosave=nosave,
score=score,
payload=payload,
replace=replace,
partial=partial,
no_create=no_create,
**fields,
)
self.current_chunk += 1
self.total += 1
if self.current_chunk >= self.chunk_size:
self.commit()
def add_document_hash(self, doc_id, score=1.0, replace=False):
"""
Add a hash to the batch query
"""
self.client._add_document_hash(
doc_id, conn=self._pipeline, score=score, replace=replace
)
self.current_chunk += 1
self.total += 1
if self.current_chunk >= self.chunk_size:
self.commit()
def commit(self):
"""
Manually commit and flush the batch indexing query
"""
self._pipeline.execute()
self.current_chunk = 0
def __init__(self, client, index_name="idx"):
"""
Create a new Client for the given index_name.
The default name is `idx`
If conn is not None, we employ an already existing redis connection
"""
self._MODULE_CALLBACKS = {}
self.client = client
self.index_name = index_name
self.execute_command = client.execute_command
self._pipeline = client.pipeline
self._RESP2_MODULE_CALLBACKS = {
INFO_CMD: self._parse_info,
SEARCH_CMD: self._parse_search,
AGGREGATE_CMD: self._parse_aggregate,
PROFILE_CMD: self._parse_profile,
SPELLCHECK_CMD: self._parse_spellcheck,
CONFIG_CMD: self._parse_config_get,
SYNDUMP_CMD: self._parse_syndump,
}
def pipeline(self, transaction=True, shard_hint=None):
"""Creates a pipeline for the SEARCH module, that can be used for executing
SEARCH commands, as well as classic core commands.
"""
p = Pipeline(
connection_pool=self.client.connection_pool,
response_callbacks=self._MODULE_CALLBACKS,
transaction=transaction,
shard_hint=shard_hint,
)
p.index_name = self.index_name
return p
class AsyncSearch(Search, AsyncSearchCommands):
class BatchIndexer(Search.BatchIndexer):
"""
A batch indexer allows you to automatically batch
document indexing in pipelines, flushing it every N documents.
"""
async def add_document(
self,
doc_id,
nosave=False,
score=1.0,
payload=None,
replace=False,
partial=False,
no_create=False,
**fields,
):
"""
Add a document to the batch query
"""
self.client._add_document(
doc_id,
conn=self._pipeline,
nosave=nosave,
score=score,
payload=payload,
replace=replace,
partial=partial,
no_create=no_create,
**fields,
)
self.current_chunk += 1
self.total += 1
if self.current_chunk >= self.chunk_size:
await self.commit()
async def commit(self):
"""
Manually commit and flush the batch indexing query
"""
await self._pipeline.execute()
self.current_chunk = 0
def pipeline(self, transaction=True, shard_hint=None):
"""Creates a pipeline for the SEARCH module, that can be used for executing
SEARCH commands, as well as classic core commands.
"""
p = AsyncPipeline(
connection_pool=self.client.connection_pool,
response_callbacks=self._MODULE_CALLBACKS,
transaction=transaction,
shard_hint=shard_hint,
)
p.index_name = self.index_name
return p
class Pipeline(SearchCommands, redis.client.Pipeline):
"""Pipeline for the module."""
class AsyncPipeline(AsyncSearchCommands, AsyncioPipeline, Pipeline):
"""AsyncPipeline for the module."""

Some files were not shown because too many files have changed in this diff Show More