GNXSOFT.COM
This commit is contained in:
@@ -0,0 +1,76 @@
|
||||
"""Celery Application."""
|
||||
from celery import _state
|
||||
from celery._state import app_or_default, disable_trace, enable_trace, pop_current_task, push_current_task
|
||||
from celery.local import Proxy
|
||||
|
||||
from .base import Celery
|
||||
from .utils import AppPickler
|
||||
|
||||
__all__ = (
|
||||
'Celery', 'AppPickler', 'app_or_default', 'default_app',
|
||||
'bugreport', 'enable_trace', 'disable_trace', 'shared_task',
|
||||
'push_current_task', 'pop_current_task',
|
||||
)
|
||||
|
||||
#: Proxy always returning the app set as default.
|
||||
default_app = Proxy(lambda: _state.default_app)
|
||||
|
||||
|
||||
def bugreport(app=None):
|
||||
"""Return information useful in bug reports."""
|
||||
return (app or _state.get_current_app()).bugreport()
|
||||
|
||||
|
||||
def shared_task(*args, **kwargs):
|
||||
"""Create shared task (decorator).
|
||||
|
||||
This can be used by library authors to create tasks that'll work
|
||||
for any app environment.
|
||||
|
||||
Returns:
|
||||
~celery.local.Proxy: A proxy that always takes the task from the
|
||||
current apps task registry.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from celery import Celery, shared_task
|
||||
>>> @shared_task
|
||||
... def add(x, y):
|
||||
... return x + y
|
||||
...
|
||||
>>> app1 = Celery(broker='amqp://')
|
||||
>>> add.app is app1
|
||||
True
|
||||
>>> app2 = Celery(broker='redis://')
|
||||
>>> add.app is app2
|
||||
True
|
||||
"""
|
||||
def create_shared_task(**options):
|
||||
|
||||
def __inner(fun):
|
||||
name = options.get('name')
|
||||
# Set as shared task so that unfinalized apps,
|
||||
# and future apps will register a copy of this task.
|
||||
_state.connect_on_app_finalize(
|
||||
lambda app: app._task_from_fun(fun, **options)
|
||||
)
|
||||
|
||||
# Force all finalized apps to take this task as well.
|
||||
for app in _state._get_active_apps():
|
||||
if app.finalized:
|
||||
with app._finalize_mutex:
|
||||
app._task_from_fun(fun, **options)
|
||||
|
||||
# Return a proxy that always gets the task from the current
|
||||
# apps task registry.
|
||||
def task_by_cons():
|
||||
app = _state.get_current_app()
|
||||
return app.tasks[
|
||||
name or app.gen_task_name(fun.__name__, fun.__module__)
|
||||
]
|
||||
return Proxy(task_by_cons)
|
||||
return __inner
|
||||
|
||||
if len(args) == 1 and callable(args[0]):
|
||||
return create_shared_task(**kwargs)(args[0])
|
||||
return create_shared_task(*args, **kwargs)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
614
gnx-react/venv/lib/python3.12/site-packages/celery/app/amqp.py
Normal file
614
gnx-react/venv/lib/python3.12/site-packages/celery/app/amqp.py
Normal file
@@ -0,0 +1,614 @@
|
||||
"""Sending/Receiving Messages (Kombu integration)."""
|
||||
import numbers
|
||||
from collections import namedtuple
|
||||
from collections.abc import Mapping
|
||||
from datetime import timedelta
|
||||
from weakref import WeakValueDictionary
|
||||
|
||||
from kombu import Connection, Consumer, Exchange, Producer, Queue, pools
|
||||
from kombu.common import Broadcast
|
||||
from kombu.utils.functional import maybe_list
|
||||
from kombu.utils.objects import cached_property
|
||||
|
||||
from celery import signals
|
||||
from celery.utils.nodenames import anon_nodename
|
||||
from celery.utils.saferepr import saferepr
|
||||
from celery.utils.text import indent as textindent
|
||||
from celery.utils.time import maybe_make_aware
|
||||
|
||||
from . import routes as _routes
|
||||
|
||||
__all__ = ('AMQP', 'Queues', 'task_message')
|
||||
|
||||
#: earliest date supported by time.mktime.
|
||||
INT_MIN = -2147483648
|
||||
|
||||
#: Human readable queue declaration.
|
||||
QUEUE_FORMAT = """
|
||||
.> {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \
|
||||
key={0.routing_key}
|
||||
"""
|
||||
|
||||
task_message = namedtuple('task_message',
|
||||
('headers', 'properties', 'body', 'sent_event'))
|
||||
|
||||
|
||||
def utf8dict(d, encoding='utf-8'):
|
||||
return {k.decode(encoding) if isinstance(k, bytes) else k: v
|
||||
for k, v in d.items()}
|
||||
|
||||
|
||||
class Queues(dict):
|
||||
"""Queue name⇒ declaration mapping.
|
||||
|
||||
Arguments:
|
||||
queues (Iterable): Initial list/tuple or dict of queues.
|
||||
create_missing (bool): By default any unknown queues will be
|
||||
added automatically, but if this flag is disabled the occurrence
|
||||
of unknown queues in `wanted` will raise :exc:`KeyError`.
|
||||
max_priority (int): Default x-max-priority for queues with none set.
|
||||
"""
|
||||
|
||||
#: If set, this is a subset of queues to consume from.
|
||||
#: The rest of the queues are then used for routing only.
|
||||
_consume_from = None
|
||||
|
||||
def __init__(self, queues=None, default_exchange=None,
|
||||
create_missing=True, autoexchange=None,
|
||||
max_priority=None, default_routing_key=None):
|
||||
super().__init__()
|
||||
self.aliases = WeakValueDictionary()
|
||||
self.default_exchange = default_exchange
|
||||
self.default_routing_key = default_routing_key
|
||||
self.create_missing = create_missing
|
||||
self.autoexchange = Exchange if autoexchange is None else autoexchange
|
||||
self.max_priority = max_priority
|
||||
if queues is not None and not isinstance(queues, Mapping):
|
||||
queues = {q.name: q for q in queues}
|
||||
queues = queues or {}
|
||||
for name, q in queues.items():
|
||||
self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q)
|
||||
|
||||
def __getitem__(self, name):
|
||||
try:
|
||||
return self.aliases[name]
|
||||
except KeyError:
|
||||
return super().__getitem__(name)
|
||||
|
||||
def __setitem__(self, name, queue):
|
||||
if self.default_exchange and not queue.exchange:
|
||||
queue.exchange = self.default_exchange
|
||||
super().__setitem__(name, queue)
|
||||
if queue.alias:
|
||||
self.aliases[queue.alias] = queue
|
||||
|
||||
def __missing__(self, name):
|
||||
if self.create_missing:
|
||||
return self.add(self.new_missing(name))
|
||||
raise KeyError(name)
|
||||
|
||||
def add(self, queue, **kwargs):
|
||||
"""Add new queue.
|
||||
|
||||
The first argument can either be a :class:`kombu.Queue` instance,
|
||||
or the name of a queue. If the former the rest of the keyword
|
||||
arguments are ignored, and options are simply taken from the queue
|
||||
instance.
|
||||
|
||||
Arguments:
|
||||
queue (kombu.Queue, str): Queue to add.
|
||||
exchange (kombu.Exchange, str):
|
||||
if queue is str, specifies exchange name.
|
||||
routing_key (str): if queue is str, specifies binding key.
|
||||
exchange_type (str): if queue is str, specifies type of exchange.
|
||||
**options (Any): Additional declaration options used when
|
||||
queue is a str.
|
||||
"""
|
||||
if not isinstance(queue, Queue):
|
||||
return self.add_compat(queue, **kwargs)
|
||||
return self._add(queue)
|
||||
|
||||
def add_compat(self, name, **options):
|
||||
# docs used to use binding_key as routing key
|
||||
options.setdefault('routing_key', options.get('binding_key'))
|
||||
if options['routing_key'] is None:
|
||||
options['routing_key'] = name
|
||||
return self._add(Queue.from_dict(name, **options))
|
||||
|
||||
def _add(self, queue):
|
||||
if queue.exchange is None or queue.exchange.name == '':
|
||||
queue.exchange = self.default_exchange
|
||||
if not queue.routing_key:
|
||||
queue.routing_key = self.default_routing_key
|
||||
if self.max_priority is not None:
|
||||
if queue.queue_arguments is None:
|
||||
queue.queue_arguments = {}
|
||||
self._set_max_priority(queue.queue_arguments)
|
||||
self[queue.name] = queue
|
||||
return queue
|
||||
|
||||
def _set_max_priority(self, args):
|
||||
if 'x-max-priority' not in args and self.max_priority is not None:
|
||||
return args.update({'x-max-priority': self.max_priority})
|
||||
|
||||
def format(self, indent=0, indent_first=True):
|
||||
"""Format routing table into string for log dumps."""
|
||||
active = self.consume_from
|
||||
if not active:
|
||||
return ''
|
||||
info = [QUEUE_FORMAT.strip().format(q)
|
||||
for _, q in sorted(active.items())]
|
||||
if indent_first:
|
||||
return textindent('\n'.join(info), indent)
|
||||
return info[0] + '\n' + textindent('\n'.join(info[1:]), indent)
|
||||
|
||||
def select_add(self, queue, **kwargs):
|
||||
"""Add new task queue that'll be consumed from.
|
||||
|
||||
The queue will be active even when a subset has been selected
|
||||
using the :option:`celery worker -Q` option.
|
||||
"""
|
||||
q = self.add(queue, **kwargs)
|
||||
if self._consume_from is not None:
|
||||
self._consume_from[q.name] = q
|
||||
return q
|
||||
|
||||
def select(self, include):
|
||||
"""Select a subset of currently defined queues to consume from.
|
||||
|
||||
Arguments:
|
||||
include (Sequence[str], str): Names of queues to consume from.
|
||||
"""
|
||||
if include:
|
||||
self._consume_from = {
|
||||
name: self[name] for name in maybe_list(include)
|
||||
}
|
||||
|
||||
def deselect(self, exclude):
|
||||
"""Deselect queues so that they won't be consumed from.
|
||||
|
||||
Arguments:
|
||||
exclude (Sequence[str], str): Names of queues to avoid
|
||||
consuming from.
|
||||
"""
|
||||
if exclude:
|
||||
exclude = maybe_list(exclude)
|
||||
if self._consume_from is None:
|
||||
# using all queues
|
||||
return self.select(k for k in self if k not in exclude)
|
||||
# using selection
|
||||
for queue in exclude:
|
||||
self._consume_from.pop(queue, None)
|
||||
|
||||
def new_missing(self, name):
|
||||
return Queue(name, self.autoexchange(name), name)
|
||||
|
||||
@property
|
||||
def consume_from(self):
|
||||
if self._consume_from is not None:
|
||||
return self._consume_from
|
||||
return self
|
||||
|
||||
|
||||
class AMQP:
|
||||
"""App AMQP API: app.amqp."""
|
||||
|
||||
Connection = Connection
|
||||
Consumer = Consumer
|
||||
Producer = Producer
|
||||
|
||||
#: compat alias to Connection
|
||||
BrokerConnection = Connection
|
||||
|
||||
queues_cls = Queues
|
||||
|
||||
#: Cached and prepared routing table.
|
||||
_rtable = None
|
||||
|
||||
#: Underlying producer pool instance automatically
|
||||
#: set by the :attr:`producer_pool`.
|
||||
_producer_pool = None
|
||||
|
||||
# Exchange class/function used when defining automatic queues.
|
||||
# For example, you can use ``autoexchange = lambda n: None`` to use the
|
||||
# AMQP default exchange: a shortcut to bypass routing
|
||||
# and instead send directly to the queue named in the routing key.
|
||||
autoexchange = None
|
||||
|
||||
#: Max size of positional argument representation used for
|
||||
#: logging purposes.
|
||||
argsrepr_maxsize = 1024
|
||||
|
||||
#: Max size of keyword argument representation used for logging purposes.
|
||||
kwargsrepr_maxsize = 1024
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
self.task_protocols = {
|
||||
1: self.as_task_v1,
|
||||
2: self.as_task_v2,
|
||||
}
|
||||
self.app._conf.bind_to(self._handle_conf_update)
|
||||
|
||||
@cached_property
|
||||
def create_task_message(self):
|
||||
return self.task_protocols[self.app.conf.task_protocol]
|
||||
|
||||
@cached_property
|
||||
def send_task_message(self):
|
||||
return self._create_task_sender()
|
||||
|
||||
def Queues(self, queues, create_missing=None,
|
||||
autoexchange=None, max_priority=None):
|
||||
# Create new :class:`Queues` instance, using queue defaults
|
||||
# from the current configuration.
|
||||
conf = self.app.conf
|
||||
default_routing_key = conf.task_default_routing_key
|
||||
if create_missing is None:
|
||||
create_missing = conf.task_create_missing_queues
|
||||
if max_priority is None:
|
||||
max_priority = conf.task_queue_max_priority
|
||||
if not queues and conf.task_default_queue:
|
||||
queues = (Queue(conf.task_default_queue,
|
||||
exchange=self.default_exchange,
|
||||
routing_key=default_routing_key),)
|
||||
autoexchange = (self.autoexchange if autoexchange is None
|
||||
else autoexchange)
|
||||
return self.queues_cls(
|
||||
queues, self.default_exchange, create_missing,
|
||||
autoexchange, max_priority, default_routing_key,
|
||||
)
|
||||
|
||||
def Router(self, queues=None, create_missing=None):
|
||||
"""Return the current task router."""
|
||||
return _routes.Router(self.routes, queues or self.queues,
|
||||
self.app.either('task_create_missing_queues',
|
||||
create_missing), app=self.app)
|
||||
|
||||
def flush_routes(self):
|
||||
self._rtable = _routes.prepare(self.app.conf.task_routes)
|
||||
|
||||
def TaskConsumer(self, channel, queues=None, accept=None, **kw):
|
||||
if accept is None:
|
||||
accept = self.app.conf.accept_content
|
||||
return self.Consumer(
|
||||
channel, accept=accept,
|
||||
queues=queues or list(self.queues.consume_from.values()),
|
||||
**kw
|
||||
)
|
||||
|
||||
def as_task_v2(self, task_id, name, args=None, kwargs=None,
|
||||
countdown=None, eta=None, group_id=None, group_index=None,
|
||||
expires=None, retries=0, chord=None,
|
||||
callbacks=None, errbacks=None, reply_to=None,
|
||||
time_limit=None, soft_time_limit=None,
|
||||
create_sent_event=False, root_id=None, parent_id=None,
|
||||
shadow=None, chain=None, now=None, timezone=None,
|
||||
origin=None, ignore_result=False, argsrepr=None, kwargsrepr=None, stamped_headers=None,
|
||||
**options):
|
||||
|
||||
args = args or ()
|
||||
kwargs = kwargs or {}
|
||||
if not isinstance(args, (list, tuple)):
|
||||
raise TypeError('task args must be a list or tuple')
|
||||
if not isinstance(kwargs, Mapping):
|
||||
raise TypeError('task keyword arguments must be a mapping')
|
||||
if countdown: # convert countdown to ETA
|
||||
self._verify_seconds(countdown, 'countdown')
|
||||
now = now or self.app.now()
|
||||
timezone = timezone or self.app.timezone
|
||||
eta = maybe_make_aware(
|
||||
now + timedelta(seconds=countdown), tz=timezone,
|
||||
)
|
||||
if isinstance(expires, numbers.Real):
|
||||
self._verify_seconds(expires, 'expires')
|
||||
now = now or self.app.now()
|
||||
timezone = timezone or self.app.timezone
|
||||
expires = maybe_make_aware(
|
||||
now + timedelta(seconds=expires), tz=timezone,
|
||||
)
|
||||
if not isinstance(eta, str):
|
||||
eta = eta and eta.isoformat()
|
||||
# If we retry a task `expires` will already be ISO8601-formatted.
|
||||
if not isinstance(expires, str):
|
||||
expires = expires and expires.isoformat()
|
||||
|
||||
if argsrepr is None:
|
||||
argsrepr = saferepr(args, self.argsrepr_maxsize)
|
||||
if kwargsrepr is None:
|
||||
kwargsrepr = saferepr(kwargs, self.kwargsrepr_maxsize)
|
||||
|
||||
if not root_id: # empty root_id defaults to task_id
|
||||
root_id = task_id
|
||||
|
||||
stamps = {header: options[header] for header in stamped_headers or []}
|
||||
headers = {
|
||||
'lang': 'py',
|
||||
'task': name,
|
||||
'id': task_id,
|
||||
'shadow': shadow,
|
||||
'eta': eta,
|
||||
'expires': expires,
|
||||
'group': group_id,
|
||||
'group_index': group_index,
|
||||
'retries': retries,
|
||||
'timelimit': [time_limit, soft_time_limit],
|
||||
'root_id': root_id,
|
||||
'parent_id': parent_id,
|
||||
'argsrepr': argsrepr,
|
||||
'kwargsrepr': kwargsrepr,
|
||||
'origin': origin or anon_nodename(),
|
||||
'ignore_result': ignore_result,
|
||||
'stamped_headers': stamped_headers,
|
||||
'stamps': stamps,
|
||||
}
|
||||
|
||||
return task_message(
|
||||
headers=headers,
|
||||
properties={
|
||||
'correlation_id': task_id,
|
||||
'reply_to': reply_to or '',
|
||||
},
|
||||
body=(
|
||||
args, kwargs, {
|
||||
'callbacks': callbacks,
|
||||
'errbacks': errbacks,
|
||||
'chain': chain,
|
||||
'chord': chord,
|
||||
},
|
||||
),
|
||||
sent_event={
|
||||
'uuid': task_id,
|
||||
'root_id': root_id,
|
||||
'parent_id': parent_id,
|
||||
'name': name,
|
||||
'args': argsrepr,
|
||||
'kwargs': kwargsrepr,
|
||||
'retries': retries,
|
||||
'eta': eta,
|
||||
'expires': expires,
|
||||
} if create_sent_event else None,
|
||||
)
|
||||
|
||||
def as_task_v1(self, task_id, name, args=None, kwargs=None,
|
||||
countdown=None, eta=None, group_id=None, group_index=None,
|
||||
expires=None, retries=0,
|
||||
chord=None, callbacks=None, errbacks=None, reply_to=None,
|
||||
time_limit=None, soft_time_limit=None,
|
||||
create_sent_event=False, root_id=None, parent_id=None,
|
||||
shadow=None, now=None, timezone=None,
|
||||
**compat_kwargs):
|
||||
args = args or ()
|
||||
kwargs = kwargs or {}
|
||||
utc = self.utc
|
||||
if not isinstance(args, (list, tuple)):
|
||||
raise TypeError('task args must be a list or tuple')
|
||||
if not isinstance(kwargs, Mapping):
|
||||
raise TypeError('task keyword arguments must be a mapping')
|
||||
if countdown: # convert countdown to ETA
|
||||
self._verify_seconds(countdown, 'countdown')
|
||||
now = now or self.app.now()
|
||||
eta = now + timedelta(seconds=countdown)
|
||||
if isinstance(expires, numbers.Real):
|
||||
self._verify_seconds(expires, 'expires')
|
||||
now = now or self.app.now()
|
||||
expires = now + timedelta(seconds=expires)
|
||||
eta = eta and eta.isoformat()
|
||||
expires = expires and expires.isoformat()
|
||||
|
||||
return task_message(
|
||||
headers={},
|
||||
properties={
|
||||
'correlation_id': task_id,
|
||||
'reply_to': reply_to or '',
|
||||
},
|
||||
body={
|
||||
'task': name,
|
||||
'id': task_id,
|
||||
'args': args,
|
||||
'kwargs': kwargs,
|
||||
'group': group_id,
|
||||
'group_index': group_index,
|
||||
'retries': retries,
|
||||
'eta': eta,
|
||||
'expires': expires,
|
||||
'utc': utc,
|
||||
'callbacks': callbacks,
|
||||
'errbacks': errbacks,
|
||||
'timelimit': (time_limit, soft_time_limit),
|
||||
'taskset': group_id,
|
||||
'chord': chord,
|
||||
},
|
||||
sent_event={
|
||||
'uuid': task_id,
|
||||
'name': name,
|
||||
'args': saferepr(args),
|
||||
'kwargs': saferepr(kwargs),
|
||||
'retries': retries,
|
||||
'eta': eta,
|
||||
'expires': expires,
|
||||
} if create_sent_event else None,
|
||||
)
|
||||
|
||||
def _verify_seconds(self, s, what):
|
||||
if s < INT_MIN:
|
||||
raise ValueError(f'{what} is out of range: {s!r}')
|
||||
return s
|
||||
|
||||
def _create_task_sender(self):
|
||||
default_retry = self.app.conf.task_publish_retry
|
||||
default_policy = self.app.conf.task_publish_retry_policy
|
||||
default_delivery_mode = self.app.conf.task_default_delivery_mode
|
||||
default_queue = self.default_queue
|
||||
queues = self.queues
|
||||
send_before_publish = signals.before_task_publish.send
|
||||
before_receivers = signals.before_task_publish.receivers
|
||||
send_after_publish = signals.after_task_publish.send
|
||||
after_receivers = signals.after_task_publish.receivers
|
||||
|
||||
send_task_sent = signals.task_sent.send # XXX compat
|
||||
sent_receivers = signals.task_sent.receivers
|
||||
|
||||
default_evd = self._event_dispatcher
|
||||
default_exchange = self.default_exchange
|
||||
|
||||
default_rkey = self.app.conf.task_default_routing_key
|
||||
default_serializer = self.app.conf.task_serializer
|
||||
default_compressor = self.app.conf.task_compression
|
||||
|
||||
def send_task_message(producer, name, message,
|
||||
exchange=None, routing_key=None, queue=None,
|
||||
event_dispatcher=None,
|
||||
retry=None, retry_policy=None,
|
||||
serializer=None, delivery_mode=None,
|
||||
compression=None, declare=None,
|
||||
headers=None, exchange_type=None, **kwargs):
|
||||
retry = default_retry if retry is None else retry
|
||||
headers2, properties, body, sent_event = message
|
||||
if headers:
|
||||
headers2.update(headers)
|
||||
if kwargs:
|
||||
properties.update(kwargs)
|
||||
|
||||
qname = queue
|
||||
if queue is None and exchange is None:
|
||||
queue = default_queue
|
||||
if queue is not None:
|
||||
if isinstance(queue, str):
|
||||
qname, queue = queue, queues[queue]
|
||||
else:
|
||||
qname = queue.name
|
||||
|
||||
if delivery_mode is None:
|
||||
try:
|
||||
delivery_mode = queue.exchange.delivery_mode
|
||||
except AttributeError:
|
||||
pass
|
||||
delivery_mode = delivery_mode or default_delivery_mode
|
||||
|
||||
if exchange_type is None:
|
||||
try:
|
||||
exchange_type = queue.exchange.type
|
||||
except AttributeError:
|
||||
exchange_type = 'direct'
|
||||
|
||||
# convert to anon-exchange, when exchange not set and direct ex.
|
||||
if (not exchange or not routing_key) and exchange_type == 'direct':
|
||||
exchange, routing_key = '', qname
|
||||
elif exchange is None:
|
||||
# not topic exchange, and exchange not undefined
|
||||
exchange = queue.exchange.name or default_exchange
|
||||
routing_key = routing_key or queue.routing_key or default_rkey
|
||||
if declare is None and queue and not isinstance(queue, Broadcast):
|
||||
declare = [queue]
|
||||
|
||||
# merge default and custom policy
|
||||
retry = default_retry if retry is None else retry
|
||||
_rp = (dict(default_policy, **retry_policy) if retry_policy
|
||||
else default_policy)
|
||||
|
||||
if before_receivers:
|
||||
send_before_publish(
|
||||
sender=name, body=body,
|
||||
exchange=exchange, routing_key=routing_key,
|
||||
declare=declare, headers=headers2,
|
||||
properties=properties, retry_policy=retry_policy,
|
||||
)
|
||||
ret = producer.publish(
|
||||
body,
|
||||
exchange=exchange,
|
||||
routing_key=routing_key,
|
||||
serializer=serializer or default_serializer,
|
||||
compression=compression or default_compressor,
|
||||
retry=retry, retry_policy=_rp,
|
||||
delivery_mode=delivery_mode, declare=declare,
|
||||
headers=headers2,
|
||||
**properties
|
||||
)
|
||||
if after_receivers:
|
||||
send_after_publish(sender=name, body=body, headers=headers2,
|
||||
exchange=exchange, routing_key=routing_key)
|
||||
if sent_receivers: # XXX deprecated
|
||||
if isinstance(body, tuple): # protocol version 2
|
||||
send_task_sent(
|
||||
sender=name, task_id=headers2['id'], task=name,
|
||||
args=body[0], kwargs=body[1],
|
||||
eta=headers2['eta'], taskset=headers2['group'],
|
||||
)
|
||||
else: # protocol version 1
|
||||
send_task_sent(
|
||||
sender=name, task_id=body['id'], task=name,
|
||||
args=body['args'], kwargs=body['kwargs'],
|
||||
eta=body['eta'], taskset=body['taskset'],
|
||||
)
|
||||
if sent_event:
|
||||
evd = event_dispatcher or default_evd
|
||||
exname = exchange
|
||||
if isinstance(exname, Exchange):
|
||||
exname = exname.name
|
||||
sent_event.update({
|
||||
'queue': qname,
|
||||
'exchange': exname,
|
||||
'routing_key': routing_key,
|
||||
})
|
||||
evd.publish('task-sent', sent_event,
|
||||
producer, retry=retry, retry_policy=retry_policy)
|
||||
return ret
|
||||
return send_task_message
|
||||
|
||||
@cached_property
|
||||
def default_queue(self):
|
||||
return self.queues[self.app.conf.task_default_queue]
|
||||
|
||||
@cached_property
|
||||
def queues(self):
|
||||
"""Queue name⇒ declaration mapping."""
|
||||
return self.Queues(self.app.conf.task_queues)
|
||||
|
||||
@queues.setter
|
||||
def queues(self, queues):
|
||||
return self.Queues(queues)
|
||||
|
||||
@property
|
||||
def routes(self):
|
||||
if self._rtable is None:
|
||||
self.flush_routes()
|
||||
return self._rtable
|
||||
|
||||
@cached_property
|
||||
def router(self):
|
||||
return self.Router()
|
||||
|
||||
@router.setter
|
||||
def router(self, value):
|
||||
return value
|
||||
|
||||
@property
|
||||
def producer_pool(self):
|
||||
if self._producer_pool is None:
|
||||
self._producer_pool = pools.producers[
|
||||
self.app.connection_for_write()]
|
||||
self._producer_pool.limit = self.app.pool.limit
|
||||
return self._producer_pool
|
||||
publisher_pool = producer_pool # compat alias
|
||||
|
||||
@cached_property
|
||||
def default_exchange(self):
|
||||
return Exchange(self.app.conf.task_default_exchange,
|
||||
self.app.conf.task_default_exchange_type)
|
||||
|
||||
@cached_property
|
||||
def utc(self):
|
||||
return self.app.conf.enable_utc
|
||||
|
||||
@cached_property
|
||||
def _event_dispatcher(self):
|
||||
# We call Dispatcher.publish with a custom producer
|
||||
# so don't need the dispatcher to be enabled.
|
||||
return self.app.events.Dispatcher(enabled=False)
|
||||
|
||||
def _handle_conf_update(self, *args, **kwargs):
|
||||
if ('task_routes' in kwargs or 'task_routes' in args):
|
||||
self.flush_routes()
|
||||
self.router = self.Router()
|
||||
return
|
||||
@@ -0,0 +1,52 @@
|
||||
"""Task Annotations.
|
||||
|
||||
Annotations is a nice term for monkey-patching task classes
|
||||
in the configuration.
|
||||
|
||||
This prepares and performs the annotations in the
|
||||
:setting:`task_annotations` setting.
|
||||
"""
|
||||
from celery.utils.functional import firstmethod, mlazy
|
||||
from celery.utils.imports import instantiate
|
||||
|
||||
_first_match = firstmethod('annotate')
|
||||
_first_match_any = firstmethod('annotate_any')
|
||||
|
||||
__all__ = ('MapAnnotation', 'prepare', 'resolve_all')
|
||||
|
||||
|
||||
class MapAnnotation(dict):
|
||||
"""Annotation map: task_name => attributes."""
|
||||
|
||||
def annotate_any(self):
|
||||
try:
|
||||
return dict(self['*'])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def annotate(self, task):
|
||||
try:
|
||||
return dict(self[task.name])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
def prepare(annotations):
|
||||
"""Expand the :setting:`task_annotations` setting."""
|
||||
def expand_annotation(annotation):
|
||||
if isinstance(annotation, dict):
|
||||
return MapAnnotation(annotation)
|
||||
elif isinstance(annotation, str):
|
||||
return mlazy(instantiate, annotation)
|
||||
return annotation
|
||||
|
||||
if annotations is None:
|
||||
return ()
|
||||
elif not isinstance(annotations, (list, tuple)):
|
||||
annotations = (annotations,)
|
||||
return [expand_annotation(anno) for anno in annotations]
|
||||
|
||||
|
||||
def resolve_all(anno, task):
|
||||
"""Resolve all pending annotations."""
|
||||
return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x)
|
||||
@@ -0,0 +1,66 @@
|
||||
"""Tasks auto-retry functionality."""
|
||||
from vine.utils import wraps
|
||||
|
||||
from celery.exceptions import Ignore, Retry
|
||||
from celery.utils.time import get_exponential_backoff_interval
|
||||
|
||||
|
||||
def add_autoretry_behaviour(task, **options):
|
||||
"""Wrap task's `run` method with auto-retry functionality."""
|
||||
autoretry_for = tuple(
|
||||
options.get('autoretry_for',
|
||||
getattr(task, 'autoretry_for', ()))
|
||||
)
|
||||
dont_autoretry_for = tuple(
|
||||
options.get('dont_autoretry_for',
|
||||
getattr(task, 'dont_autoretry_for', ()))
|
||||
)
|
||||
retry_kwargs = options.get(
|
||||
'retry_kwargs', getattr(task, 'retry_kwargs', {})
|
||||
)
|
||||
retry_backoff = float(
|
||||
options.get('retry_backoff',
|
||||
getattr(task, 'retry_backoff', False))
|
||||
)
|
||||
retry_backoff_max = int(
|
||||
options.get('retry_backoff_max',
|
||||
getattr(task, 'retry_backoff_max', 600))
|
||||
)
|
||||
retry_jitter = options.get(
|
||||
'retry_jitter', getattr(task, 'retry_jitter', True)
|
||||
)
|
||||
|
||||
if autoretry_for and not hasattr(task, '_orig_run'):
|
||||
|
||||
@wraps(task.run)
|
||||
def run(*args, **kwargs):
|
||||
try:
|
||||
return task._orig_run(*args, **kwargs)
|
||||
except Ignore:
|
||||
# If Ignore signal occurs task shouldn't be retried,
|
||||
# even if it suits autoretry_for list
|
||||
raise
|
||||
except Retry:
|
||||
raise
|
||||
except dont_autoretry_for:
|
||||
raise
|
||||
except autoretry_for as exc:
|
||||
if retry_backoff:
|
||||
retry_kwargs['countdown'] = \
|
||||
get_exponential_backoff_interval(
|
||||
factor=int(max(1.0, retry_backoff)),
|
||||
retries=task.request.retries,
|
||||
maximum=retry_backoff_max,
|
||||
full_jitter=retry_jitter)
|
||||
# Override max_retries
|
||||
if hasattr(task, 'override_max_retries'):
|
||||
retry_kwargs['max_retries'] = getattr(task,
|
||||
'override_max_retries',
|
||||
task.max_retries)
|
||||
ret = task.retry(exc=exc, **retry_kwargs)
|
||||
# Stop propagation
|
||||
if hasattr(task, 'override_max_retries'):
|
||||
delattr(task, 'override_max_retries')
|
||||
raise ret
|
||||
|
||||
task._orig_run, task.run = task.run, run
|
||||
@@ -0,0 +1,68 @@
|
||||
"""Backend selection."""
|
||||
import sys
|
||||
import types
|
||||
|
||||
from celery._state import current_app
|
||||
from celery.exceptions import ImproperlyConfigured, reraise
|
||||
from celery.utils.imports import load_extension_class_names, symbol_by_name
|
||||
|
||||
__all__ = ('by_name', 'by_url')
|
||||
|
||||
UNKNOWN_BACKEND = """
|
||||
Unknown result backend: {0!r}. Did you spell that correctly? ({1!r})
|
||||
"""
|
||||
|
||||
BACKEND_ALIASES = {
|
||||
'rpc': 'celery.backends.rpc.RPCBackend',
|
||||
'cache': 'celery.backends.cache:CacheBackend',
|
||||
'redis': 'celery.backends.redis:RedisBackend',
|
||||
'rediss': 'celery.backends.redis:RedisBackend',
|
||||
'sentinel': 'celery.backends.redis:SentinelBackend',
|
||||
'mongodb': 'celery.backends.mongodb:MongoBackend',
|
||||
'db': 'celery.backends.database:DatabaseBackend',
|
||||
'database': 'celery.backends.database:DatabaseBackend',
|
||||
'elasticsearch': 'celery.backends.elasticsearch:ElasticsearchBackend',
|
||||
'cassandra': 'celery.backends.cassandra:CassandraBackend',
|
||||
'couchbase': 'celery.backends.couchbase:CouchbaseBackend',
|
||||
'couchdb': 'celery.backends.couchdb:CouchBackend',
|
||||
'cosmosdbsql': 'celery.backends.cosmosdbsql:CosmosDBSQLBackend',
|
||||
'riak': 'celery.backends.riak:RiakBackend',
|
||||
'file': 'celery.backends.filesystem:FilesystemBackend',
|
||||
'disabled': 'celery.backends.base:DisabledBackend',
|
||||
'consul': 'celery.backends.consul:ConsulBackend',
|
||||
'dynamodb': 'celery.backends.dynamodb:DynamoDBBackend',
|
||||
'azureblockblob': 'celery.backends.azureblockblob:AzureBlockBlobBackend',
|
||||
'arangodb': 'celery.backends.arangodb:ArangoDbBackend',
|
||||
's3': 'celery.backends.s3:S3Backend',
|
||||
}
|
||||
|
||||
|
||||
def by_name(backend=None, loader=None,
|
||||
extension_namespace='celery.result_backends'):
|
||||
"""Get backend class by name/alias."""
|
||||
backend = backend or 'disabled'
|
||||
loader = loader or current_app.loader
|
||||
aliases = dict(BACKEND_ALIASES, **loader.override_backends)
|
||||
aliases.update(load_extension_class_names(extension_namespace))
|
||||
try:
|
||||
cls = symbol_by_name(backend, aliases)
|
||||
except ValueError as exc:
|
||||
reraise(ImproperlyConfigured, ImproperlyConfigured(
|
||||
UNKNOWN_BACKEND.strip().format(backend, exc)), sys.exc_info()[2])
|
||||
if isinstance(cls, types.ModuleType):
|
||||
raise ImproperlyConfigured(UNKNOWN_BACKEND.strip().format(
|
||||
backend, 'is a Python module, not a backend class.'))
|
||||
return cls
|
||||
|
||||
|
||||
def by_url(backend=None, loader=None):
|
||||
"""Get backend class by URL."""
|
||||
url = None
|
||||
if backend and '://' in backend:
|
||||
url = backend
|
||||
scheme, _, _ = url.partition('://')
|
||||
if '+' in scheme:
|
||||
backend, url = url.split('+', 1)
|
||||
else:
|
||||
backend = scheme
|
||||
return by_name(backend, loader), url
|
||||
1366
gnx-react/venv/lib/python3.12/site-packages/celery/app/base.py
Normal file
1366
gnx-react/venv/lib/python3.12/site-packages/celery/app/base.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,187 @@
|
||||
"""Built-in Tasks.
|
||||
|
||||
The built-in tasks are always available in all app instances.
|
||||
"""
|
||||
from celery._state import connect_on_app_finalize
|
||||
from celery.utils.log import get_logger
|
||||
|
||||
__all__ = ()
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
@connect_on_app_finalize
|
||||
def add_backend_cleanup_task(app):
|
||||
"""Task used to clean up expired results.
|
||||
|
||||
If the configured backend requires periodic cleanup this task is also
|
||||
automatically configured to run every day at 4am (requires
|
||||
:program:`celery beat` to be running).
|
||||
"""
|
||||
@app.task(name='celery.backend_cleanup', shared=False, lazy=False)
|
||||
def backend_cleanup():
|
||||
app.backend.cleanup()
|
||||
return backend_cleanup
|
||||
|
||||
|
||||
@connect_on_app_finalize
|
||||
def add_accumulate_task(app):
|
||||
"""Task used by Task.replace when replacing task with group."""
|
||||
@app.task(bind=True, name='celery.accumulate', shared=False, lazy=False)
|
||||
def accumulate(self, *args, **kwargs):
|
||||
index = kwargs.get('index')
|
||||
return args[index] if index is not None else args
|
||||
return accumulate
|
||||
|
||||
|
||||
@connect_on_app_finalize
|
||||
def add_unlock_chord_task(app):
|
||||
"""Task used by result backends without native chord support.
|
||||
|
||||
Will joins chord by creating a task chain polling the header
|
||||
for completion.
|
||||
"""
|
||||
from celery.canvas import maybe_signature
|
||||
from celery.exceptions import ChordError
|
||||
from celery.result import allow_join_result, result_from_tuple
|
||||
|
||||
@app.task(name='celery.chord_unlock', max_retries=None, shared=False,
|
||||
default_retry_delay=app.conf.result_chord_retry_interval, ignore_result=True, lazy=False, bind=True)
|
||||
def unlock_chord(self, group_id, callback, interval=None,
|
||||
max_retries=None, result=None,
|
||||
Result=app.AsyncResult, GroupResult=app.GroupResult,
|
||||
result_from_tuple=result_from_tuple, **kwargs):
|
||||
if interval is None:
|
||||
interval = self.default_retry_delay
|
||||
|
||||
# check if the task group is ready, and if so apply the callback.
|
||||
callback = maybe_signature(callback, app)
|
||||
deps = GroupResult(
|
||||
group_id,
|
||||
[result_from_tuple(r, app=app) for r in result],
|
||||
app=app,
|
||||
)
|
||||
j = deps.join_native if deps.supports_native_join else deps.join
|
||||
|
||||
try:
|
||||
ready = deps.ready()
|
||||
except Exception as exc:
|
||||
raise self.retry(
|
||||
exc=exc, countdown=interval, max_retries=max_retries,
|
||||
)
|
||||
else:
|
||||
if not ready:
|
||||
raise self.retry(countdown=interval, max_retries=max_retries)
|
||||
|
||||
callback = maybe_signature(callback, app=app)
|
||||
try:
|
||||
with allow_join_result():
|
||||
ret = j(
|
||||
timeout=app.conf.result_chord_join_timeout,
|
||||
propagate=True,
|
||||
)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
try:
|
||||
culprit = next(deps._failed_join_report())
|
||||
reason = f'Dependency {culprit.id} raised {exc!r}'
|
||||
except StopIteration:
|
||||
reason = repr(exc)
|
||||
logger.exception('Chord %r raised: %r', group_id, exc)
|
||||
app.backend.chord_error_from_stack(callback, ChordError(reason))
|
||||
else:
|
||||
try:
|
||||
callback.delay(ret)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
logger.exception('Chord %r raised: %r', group_id, exc)
|
||||
app.backend.chord_error_from_stack(
|
||||
callback,
|
||||
exc=ChordError(f'Callback error: {exc!r}'),
|
||||
)
|
||||
return unlock_chord
|
||||
|
||||
|
||||
@connect_on_app_finalize
|
||||
def add_map_task(app):
|
||||
from celery.canvas import signature
|
||||
|
||||
@app.task(name='celery.map', shared=False, lazy=False)
|
||||
def xmap(task, it):
|
||||
task = signature(task, app=app).type
|
||||
return [task(item) for item in it]
|
||||
return xmap
|
||||
|
||||
|
||||
@connect_on_app_finalize
|
||||
def add_starmap_task(app):
|
||||
from celery.canvas import signature
|
||||
|
||||
@app.task(name='celery.starmap', shared=False, lazy=False)
|
||||
def xstarmap(task, it):
|
||||
task = signature(task, app=app).type
|
||||
return [task(*item) for item in it]
|
||||
return xstarmap
|
||||
|
||||
|
||||
@connect_on_app_finalize
|
||||
def add_chunk_task(app):
|
||||
from celery.canvas import chunks as _chunks
|
||||
|
||||
@app.task(name='celery.chunks', shared=False, lazy=False)
|
||||
def chunks(task, it, n):
|
||||
return _chunks.apply_chunks(task, it, n)
|
||||
return chunks
|
||||
|
||||
|
||||
@connect_on_app_finalize
|
||||
def add_group_task(app):
|
||||
"""No longer used, but here for backwards compatibility."""
|
||||
from celery.canvas import maybe_signature
|
||||
from celery.result import result_from_tuple
|
||||
|
||||
@app.task(name='celery.group', bind=True, shared=False, lazy=False)
|
||||
def group(self, tasks, result, group_id, partial_args, add_to_parent=True):
|
||||
app = self.app
|
||||
result = result_from_tuple(result, app)
|
||||
# any partial args are added to all tasks in the group
|
||||
taskit = (maybe_signature(task, app=app).clone(partial_args)
|
||||
for i, task in enumerate(tasks))
|
||||
with app.producer_or_acquire() as producer:
|
||||
[stask.apply_async(group_id=group_id, producer=producer,
|
||||
add_to_parent=False) for stask in taskit]
|
||||
parent = app.current_worker_task
|
||||
if add_to_parent and parent:
|
||||
parent.add_trail(result)
|
||||
return result
|
||||
return group
|
||||
|
||||
|
||||
@connect_on_app_finalize
|
||||
def add_chain_task(app):
|
||||
"""No longer used, but here for backwards compatibility."""
|
||||
@app.task(name='celery.chain', shared=False, lazy=False)
|
||||
def chain(*args, **kwargs):
|
||||
raise NotImplementedError('chain is not a real task')
|
||||
return chain
|
||||
|
||||
|
||||
@connect_on_app_finalize
|
||||
def add_chord_task(app):
|
||||
"""No longer used, but here for backwards compatibility."""
|
||||
from celery import chord as _chord
|
||||
from celery import group
|
||||
from celery.canvas import maybe_signature
|
||||
|
||||
@app.task(name='celery.chord', bind=True, ignore_result=False,
|
||||
shared=False, lazy=False)
|
||||
def chord(self, header, body, partial_args=(), interval=None,
|
||||
countdown=1, max_retries=None, eager=False, **kwargs):
|
||||
app = self.app
|
||||
# - convert back to group if serialized
|
||||
tasks = header.tasks if isinstance(header, group) else header
|
||||
header = group([
|
||||
maybe_signature(s, app=app) for s in tasks
|
||||
], app=self.app)
|
||||
body = maybe_signature(body, app=app)
|
||||
ch = _chord(header, body)
|
||||
return ch.run(header, body, partial_args, app, interval,
|
||||
countdown, max_retries, **kwargs)
|
||||
return chord
|
||||
@@ -0,0 +1,779 @@
|
||||
"""Worker Remote Control Client.
|
||||
|
||||
Client for worker remote control commands.
|
||||
Server implementation is in :mod:`celery.worker.control`.
|
||||
There are two types of remote control commands:
|
||||
|
||||
* Inspect commands: Does not have side effects, will usually just return some value
|
||||
found in the worker, like the list of currently registered tasks, the list of active tasks, etc.
|
||||
Commands are accessible via :class:`Inspect` class.
|
||||
|
||||
* Control commands: Performs side effects, like adding a new queue to consume from.
|
||||
Commands are accessible via :class:`Control` class.
|
||||
"""
|
||||
import warnings
|
||||
|
||||
from billiard.common import TERM_SIGNAME
|
||||
from kombu.matcher import match
|
||||
from kombu.pidbox import Mailbox
|
||||
from kombu.utils.compat import register_after_fork
|
||||
from kombu.utils.functional import lazy
|
||||
from kombu.utils.objects import cached_property
|
||||
|
||||
from celery.exceptions import DuplicateNodenameWarning
|
||||
from celery.utils.log import get_logger
|
||||
from celery.utils.text import pluralize
|
||||
|
||||
__all__ = ('Inspect', 'Control', 'flatten_reply')
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
W_DUPNODE = """\
|
||||
Received multiple replies from node {0}: {1}.
|
||||
Please make sure you give each node a unique nodename using
|
||||
the celery worker `-n` option.\
|
||||
"""
|
||||
|
||||
|
||||
def flatten_reply(reply):
|
||||
"""Flatten node replies.
|
||||
|
||||
Convert from a list of replies in this format::
|
||||
|
||||
[{'a@example.com': reply},
|
||||
{'b@example.com': reply}]
|
||||
|
||||
into this format::
|
||||
|
||||
{'a@example.com': reply,
|
||||
'b@example.com': reply}
|
||||
"""
|
||||
nodes, dupes = {}, set()
|
||||
for item in reply:
|
||||
[dupes.add(name) for name in item if name in nodes]
|
||||
nodes.update(item)
|
||||
if dupes:
|
||||
warnings.warn(DuplicateNodenameWarning(
|
||||
W_DUPNODE.format(
|
||||
pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)),
|
||||
),
|
||||
))
|
||||
return nodes
|
||||
|
||||
|
||||
def _after_fork_cleanup_control(control):
|
||||
try:
|
||||
control._after_fork()
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
logger.info('after fork raised exception: %r', exc, exc_info=1)
|
||||
|
||||
|
||||
class Inspect:
|
||||
"""API for inspecting workers.
|
||||
|
||||
This class provides proxy for accessing Inspect API of workers. The API is
|
||||
defined in :py:mod:`celery.worker.control`
|
||||
"""
|
||||
|
||||
app = None
|
||||
|
||||
def __init__(self, destination=None, timeout=1.0, callback=None,
|
||||
connection=None, app=None, limit=None, pattern=None,
|
||||
matcher=None):
|
||||
self.app = app or self.app
|
||||
self.destination = destination
|
||||
self.timeout = timeout
|
||||
self.callback = callback
|
||||
self.connection = connection
|
||||
self.limit = limit
|
||||
self.pattern = pattern
|
||||
self.matcher = matcher
|
||||
|
||||
def _prepare(self, reply):
|
||||
if reply:
|
||||
by_node = flatten_reply(reply)
|
||||
if (self.destination and
|
||||
not isinstance(self.destination, (list, tuple))):
|
||||
return by_node.get(self.destination)
|
||||
if self.pattern:
|
||||
pattern = self.pattern
|
||||
matcher = self.matcher
|
||||
return {node: reply for node, reply in by_node.items()
|
||||
if match(node, pattern, matcher)}
|
||||
return by_node
|
||||
|
||||
def _request(self, command, **kwargs):
|
||||
return self._prepare(self.app.control.broadcast(
|
||||
command,
|
||||
arguments=kwargs,
|
||||
destination=self.destination,
|
||||
callback=self.callback,
|
||||
connection=self.connection,
|
||||
limit=self.limit,
|
||||
timeout=self.timeout, reply=True,
|
||||
pattern=self.pattern, matcher=self.matcher,
|
||||
))
|
||||
|
||||
def report(self):
|
||||
"""Return human readable report for each worker.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{HOSTNAME: {'ok': REPORT_STRING}}``.
|
||||
"""
|
||||
return self._request('report')
|
||||
|
||||
def clock(self):
|
||||
"""Get the Clock value on workers.
|
||||
|
||||
>>> app.control.inspect().clock()
|
||||
{'celery@node1': {'clock': 12}}
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{HOSTNAME: CLOCK_VALUE}``.
|
||||
"""
|
||||
return self._request('clock')
|
||||
|
||||
def active(self, safe=None):
|
||||
"""Return list of tasks currently executed by workers.
|
||||
|
||||
Arguments:
|
||||
safe (Boolean): Set to True to disable deserialization.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{HOSTNAME: [TASK_INFO,...]}``.
|
||||
|
||||
See Also:
|
||||
For ``TASK_INFO`` details see :func:`query_task` return value.
|
||||
|
||||
"""
|
||||
return self._request('active', safe=safe)
|
||||
|
||||
def scheduled(self, safe=None):
|
||||
"""Return list of scheduled tasks with details.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{HOSTNAME: [TASK_SCHEDULED_INFO,...]}``.
|
||||
|
||||
Here is the list of ``TASK_SCHEDULED_INFO`` fields:
|
||||
|
||||
* ``eta`` - scheduled time for task execution as string in ISO 8601 format
|
||||
* ``priority`` - priority of the task
|
||||
* ``request`` - field containing ``TASK_INFO`` value.
|
||||
|
||||
See Also:
|
||||
For more details about ``TASK_INFO`` see :func:`query_task` return value.
|
||||
"""
|
||||
return self._request('scheduled')
|
||||
|
||||
def reserved(self, safe=None):
|
||||
"""Return list of currently reserved tasks, not including scheduled/active.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{HOSTNAME: [TASK_INFO,...]}``.
|
||||
|
||||
See Also:
|
||||
For ``TASK_INFO`` details see :func:`query_task` return value.
|
||||
"""
|
||||
return self._request('reserved')
|
||||
|
||||
def stats(self):
|
||||
"""Return statistics of worker.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{HOSTNAME: STAT_INFO}``.
|
||||
|
||||
Here is the list of ``STAT_INFO`` fields:
|
||||
|
||||
* ``broker`` - Section for broker information.
|
||||
* ``connect_timeout`` - Timeout in seconds (int/float) for establishing a new connection.
|
||||
* ``heartbeat`` - Current heartbeat value (set by client).
|
||||
* ``hostname`` - Node name of the remote broker.
|
||||
* ``insist`` - No longer used.
|
||||
* ``login_method`` - Login method used to connect to the broker.
|
||||
* ``port`` - Port of the remote broker.
|
||||
* ``ssl`` - SSL enabled/disabled.
|
||||
* ``transport`` - Name of transport used (e.g., amqp or redis)
|
||||
* ``transport_options`` - Options passed to transport.
|
||||
* ``uri_prefix`` - Some transports expects the host name to be a URL.
|
||||
E.g. ``redis+socket:///tmp/redis.sock``.
|
||||
In this example the URI-prefix will be redis.
|
||||
* ``userid`` - User id used to connect to the broker with.
|
||||
* ``virtual_host`` - Virtual host used.
|
||||
* ``clock`` - Value of the workers logical clock. This is a positive integer
|
||||
and should be increasing every time you receive statistics.
|
||||
* ``uptime`` - Numbers of seconds since the worker controller was started
|
||||
* ``pid`` - Process id of the worker instance (Main process).
|
||||
* ``pool`` - Pool-specific section.
|
||||
* ``max-concurrency`` - Max number of processes/threads/green threads.
|
||||
* ``max-tasks-per-child`` - Max number of tasks a thread may execute before being recycled.
|
||||
* ``processes`` - List of PIDs (or thread-id’s).
|
||||
* ``put-guarded-by-semaphore`` - Internal
|
||||
* ``timeouts`` - Default values for time limits.
|
||||
* ``writes`` - Specific to the prefork pool, this shows the distribution
|
||||
of writes to each process in the pool when using async I/O.
|
||||
* ``prefetch_count`` - Current prefetch count value for the task consumer.
|
||||
* ``rusage`` - System usage statistics. The fields available may be different on your platform.
|
||||
From :manpage:`getrusage(2)`:
|
||||
|
||||
* ``stime`` - Time spent in operating system code on behalf of this process.
|
||||
* ``utime`` - Time spent executing user instructions.
|
||||
* ``maxrss`` - The maximum resident size used by this process (in kilobytes).
|
||||
* ``idrss`` - Amount of non-shared memory used for data (in kilobytes times
|
||||
ticks of execution)
|
||||
* ``isrss`` - Amount of non-shared memory used for stack space
|
||||
(in kilobytes times ticks of execution)
|
||||
* ``ixrss`` - Amount of memory shared with other processes
|
||||
(in kilobytes times ticks of execution).
|
||||
* ``inblock`` - Number of times the file system had to read from the disk
|
||||
on behalf of this process.
|
||||
* ``oublock`` - Number of times the file system has to write to disk
|
||||
on behalf of this process.
|
||||
* ``majflt`` - Number of page faults that were serviced by doing I/O.
|
||||
* ``minflt`` - Number of page faults that were serviced without doing I/O.
|
||||
* ``msgrcv`` - Number of IPC messages received.
|
||||
* ``msgsnd`` - Number of IPC messages sent.
|
||||
* ``nvcsw`` - Number of times this process voluntarily invoked a context switch.
|
||||
* ``nivcsw`` - Number of times an involuntary context switch took place.
|
||||
* ``nsignals`` - Number of signals received.
|
||||
* ``nswap`` - The number of times this process was swapped entirely
|
||||
out of memory.
|
||||
* ``total`` - Map of task names and the total number of tasks with that type
|
||||
the worker has accepted since start-up.
|
||||
"""
|
||||
return self._request('stats')
|
||||
|
||||
def revoked(self):
|
||||
"""Return list of revoked tasks.
|
||||
|
||||
>>> app.control.inspect().revoked()
|
||||
{'celery@node1': ['16f527de-1c72-47a6-b477-c472b92fef7a']}
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{HOSTNAME: [TASK_ID, ...]}``.
|
||||
"""
|
||||
return self._request('revoked')
|
||||
|
||||
def registered(self, *taskinfoitems):
|
||||
"""Return all registered tasks per worker.
|
||||
|
||||
>>> app.control.inspect().registered()
|
||||
{'celery@node1': ['task1', 'task1']}
|
||||
>>> app.control.inspect().registered('serializer', 'max_retries')
|
||||
{'celery@node1': ['task_foo [serializer=json max_retries=3]', 'tasb_bar [serializer=json max_retries=3]']}
|
||||
|
||||
Arguments:
|
||||
taskinfoitems (Sequence[str]): List of :class:`~celery.app.task.Task`
|
||||
attributes to include.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{HOSTNAME: [TASK1_INFO, ...]}``.
|
||||
"""
|
||||
return self._request('registered', taskinfoitems=taskinfoitems)
|
||||
registered_tasks = registered
|
||||
|
||||
def ping(self, destination=None):
|
||||
"""Ping all (or specific) workers.
|
||||
|
||||
>>> app.control.inspect().ping()
|
||||
{'celery@node1': {'ok': 'pong'}, 'celery@node2': {'ok': 'pong'}}
|
||||
>>> app.control.inspect().ping(destination=['celery@node1'])
|
||||
{'celery@node1': {'ok': 'pong'}}
|
||||
|
||||
Arguments:
|
||||
destination (List): If set, a list of the hosts to send the
|
||||
command to, when empty broadcast to all workers.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{HOSTNAME: {'ok': 'pong'}}``.
|
||||
|
||||
See Also:
|
||||
:meth:`broadcast` for supported keyword arguments.
|
||||
"""
|
||||
if destination:
|
||||
self.destination = destination
|
||||
return self._request('ping')
|
||||
|
||||
def active_queues(self):
|
||||
"""Return information about queues from which worker consumes tasks.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{HOSTNAME: [QUEUE_INFO, QUEUE_INFO,...]}``.
|
||||
|
||||
Here is the list of ``QUEUE_INFO`` fields:
|
||||
|
||||
* ``name``
|
||||
* ``exchange``
|
||||
* ``name``
|
||||
* ``type``
|
||||
* ``arguments``
|
||||
* ``durable``
|
||||
* ``passive``
|
||||
* ``auto_delete``
|
||||
* ``delivery_mode``
|
||||
* ``no_declare``
|
||||
* ``routing_key``
|
||||
* ``queue_arguments``
|
||||
* ``binding_arguments``
|
||||
* ``consumer_arguments``
|
||||
* ``durable``
|
||||
* ``exclusive``
|
||||
* ``auto_delete``
|
||||
* ``no_ack``
|
||||
* ``alias``
|
||||
* ``bindings``
|
||||
* ``no_declare``
|
||||
* ``expires``
|
||||
* ``message_ttl``
|
||||
* ``max_length``
|
||||
* ``max_length_bytes``
|
||||
* ``max_priority``
|
||||
|
||||
See Also:
|
||||
See the RabbitMQ/AMQP documentation for more details about
|
||||
``queue_info`` fields.
|
||||
Note:
|
||||
The ``queue_info`` fields are RabbitMQ/AMQP oriented.
|
||||
Not all fields applies for other transports.
|
||||
"""
|
||||
return self._request('active_queues')
|
||||
|
||||
def query_task(self, *ids):
|
||||
"""Return detail of tasks currently executed by workers.
|
||||
|
||||
Arguments:
|
||||
*ids (str): IDs of tasks to be queried.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{HOSTNAME: {TASK_ID: [STATE, TASK_INFO]}}``.
|
||||
|
||||
Here is the list of ``TASK_INFO`` fields:
|
||||
* ``id`` - ID of the task
|
||||
* ``name`` - Name of the task
|
||||
* ``args`` - Positinal arguments passed to the task
|
||||
* ``kwargs`` - Keyword arguments passed to the task
|
||||
* ``type`` - Type of the task
|
||||
* ``hostname`` - Hostname of the worker processing the task
|
||||
* ``time_start`` - Time of processing start
|
||||
* ``acknowledged`` - True when task was acknowledged to broker
|
||||
* ``delivery_info`` - Dictionary containing delivery information
|
||||
* ``exchange`` - Name of exchange where task was published
|
||||
* ``routing_key`` - Routing key used when task was published
|
||||
* ``priority`` - Priority used when task was published
|
||||
* ``redelivered`` - True if the task was redelivered
|
||||
* ``worker_pid`` - PID of worker processin the task
|
||||
|
||||
"""
|
||||
# signature used be unary: query_task(ids=[id1, id2])
|
||||
# we need this to preserve backward compatibility.
|
||||
if len(ids) == 1 and isinstance(ids[0], (list, tuple)):
|
||||
ids = ids[0]
|
||||
return self._request('query_task', ids=ids)
|
||||
|
||||
def conf(self, with_defaults=False):
|
||||
"""Return configuration of each worker.
|
||||
|
||||
Arguments:
|
||||
with_defaults (bool): if set to True, method returns also
|
||||
configuration options with default values.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{HOSTNAME: WORKER_CONFIGURATION}``.
|
||||
|
||||
See Also:
|
||||
``WORKER_CONFIGURATION`` is a dictionary containing current configuration options.
|
||||
See :ref:`configuration` for possible values.
|
||||
"""
|
||||
return self._request('conf', with_defaults=with_defaults)
|
||||
|
||||
def hello(self, from_node, revoked=None):
|
||||
return self._request('hello', from_node=from_node, revoked=revoked)
|
||||
|
||||
def memsample(self):
|
||||
"""Return sample current RSS memory usage.
|
||||
|
||||
Note:
|
||||
Requires the psutils library.
|
||||
"""
|
||||
return self._request('memsample')
|
||||
|
||||
def memdump(self, samples=10):
|
||||
"""Dump statistics of previous memsample requests.
|
||||
|
||||
Note:
|
||||
Requires the psutils library.
|
||||
"""
|
||||
return self._request('memdump', samples=samples)
|
||||
|
||||
def objgraph(self, type='Request', n=200, max_depth=10):
|
||||
"""Create graph of uncollected objects (memory-leak debugging).
|
||||
|
||||
Arguments:
|
||||
n (int): Max number of objects to graph.
|
||||
max_depth (int): Traverse at most n levels deep.
|
||||
type (str): Name of object to graph. Default is ``"Request"``.
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary ``{'filename': FILENAME}``
|
||||
|
||||
Note:
|
||||
Requires the objgraph library.
|
||||
"""
|
||||
return self._request('objgraph', num=n, max_depth=max_depth, type=type)
|
||||
|
||||
|
||||
class Control:
|
||||
"""Worker remote control client."""
|
||||
|
||||
Mailbox = Mailbox
|
||||
|
||||
def __init__(self, app=None):
|
||||
self.app = app
|
||||
self.mailbox = self.Mailbox(
|
||||
app.conf.control_exchange,
|
||||
type='fanout',
|
||||
accept=app.conf.accept_content,
|
||||
serializer=app.conf.task_serializer,
|
||||
producer_pool=lazy(lambda: self.app.amqp.producer_pool),
|
||||
queue_ttl=app.conf.control_queue_ttl,
|
||||
reply_queue_ttl=app.conf.control_queue_ttl,
|
||||
queue_expires=app.conf.control_queue_expires,
|
||||
reply_queue_expires=app.conf.control_queue_expires,
|
||||
)
|
||||
register_after_fork(self, _after_fork_cleanup_control)
|
||||
|
||||
def _after_fork(self):
|
||||
del self.mailbox.producer_pool
|
||||
|
||||
@cached_property
|
||||
def inspect(self):
|
||||
"""Create new :class:`Inspect` instance."""
|
||||
return self.app.subclass_with_self(Inspect, reverse='control.inspect')
|
||||
|
||||
def purge(self, connection=None):
|
||||
"""Discard all waiting tasks.
|
||||
|
||||
This will ignore all tasks waiting for execution, and they will
|
||||
be deleted from the messaging server.
|
||||
|
||||
Arguments:
|
||||
connection (kombu.Connection): Optional specific connection
|
||||
instance to use. If not provided a connection will
|
||||
be acquired from the connection pool.
|
||||
|
||||
Returns:
|
||||
int: the number of tasks discarded.
|
||||
"""
|
||||
with self.app.connection_or_acquire(connection) as conn:
|
||||
return self.app.amqp.TaskConsumer(conn).purge()
|
||||
discard_all = purge
|
||||
|
||||
def election(self, id, topic, action=None, connection=None):
|
||||
self.broadcast(
|
||||
'election', connection=connection, destination=None,
|
||||
arguments={
|
||||
'id': id, 'topic': topic, 'action': action,
|
||||
},
|
||||
)
|
||||
|
||||
def revoke(self, task_id, destination=None, terminate=False,
|
||||
signal=TERM_SIGNAME, **kwargs):
|
||||
"""Tell all (or specific) workers to revoke a task by id (or list of ids).
|
||||
|
||||
If a task is revoked, the workers will ignore the task and
|
||||
not execute it after all.
|
||||
|
||||
Arguments:
|
||||
task_id (Union(str, list)): Id of the task to revoke
|
||||
(or list of ids).
|
||||
terminate (bool): Also terminate the process currently working
|
||||
on the task (if any).
|
||||
signal (str): Name of signal to send to process if terminate.
|
||||
Default is TERM.
|
||||
|
||||
See Also:
|
||||
:meth:`broadcast` for supported keyword arguments.
|
||||
"""
|
||||
return self.broadcast('revoke', destination=destination, arguments={
|
||||
'task_id': task_id,
|
||||
'terminate': terminate,
|
||||
'signal': signal,
|
||||
}, **kwargs)
|
||||
|
||||
def revoke_by_stamped_headers(self, headers, destination=None, terminate=False,
|
||||
signal=TERM_SIGNAME, **kwargs):
|
||||
"""
|
||||
Tell all (or specific) workers to revoke a task by headers.
|
||||
|
||||
If a task is revoked, the workers will ignore the task and
|
||||
not execute it after all.
|
||||
|
||||
Arguments:
|
||||
headers (dict[str, Union(str, list)]): Headers to match when revoking tasks.
|
||||
terminate (bool): Also terminate the process currently working
|
||||
on the task (if any).
|
||||
signal (str): Name of signal to send to process if terminate.
|
||||
Default is TERM.
|
||||
|
||||
See Also:
|
||||
:meth:`broadcast` for supported keyword arguments.
|
||||
"""
|
||||
result = self.broadcast('revoke_by_stamped_headers', destination=destination, arguments={
|
||||
'headers': headers,
|
||||
'terminate': terminate,
|
||||
'signal': signal,
|
||||
}, **kwargs)
|
||||
|
||||
task_ids = set()
|
||||
if result:
|
||||
for host in result:
|
||||
for response in host.values():
|
||||
task_ids.update(response['ok'])
|
||||
|
||||
if task_ids:
|
||||
return self.revoke(list(task_ids), destination=destination, terminate=terminate, signal=signal, **kwargs)
|
||||
else:
|
||||
return result
|
||||
|
||||
def terminate(self, task_id,
|
||||
destination=None, signal=TERM_SIGNAME, **kwargs):
|
||||
"""Tell all (or specific) workers to terminate a task by id (or list of ids).
|
||||
|
||||
See Also:
|
||||
This is just a shortcut to :meth:`revoke` with the terminate
|
||||
argument enabled.
|
||||
"""
|
||||
return self.revoke(
|
||||
task_id,
|
||||
destination=destination, terminate=True, signal=signal, **kwargs)
|
||||
|
||||
def ping(self, destination=None, timeout=1.0, **kwargs):
|
||||
"""Ping all (or specific) workers.
|
||||
|
||||
>>> app.control.ping()
|
||||
[{'celery@node1': {'ok': 'pong'}}, {'celery@node2': {'ok': 'pong'}}]
|
||||
>>> app.control.ping(destination=['celery@node2'])
|
||||
[{'celery@node2': {'ok': 'pong'}}]
|
||||
|
||||
Returns:
|
||||
List[Dict]: List of ``{HOSTNAME: {'ok': 'pong'}}`` dictionaries.
|
||||
|
||||
See Also:
|
||||
:meth:`broadcast` for supported keyword arguments.
|
||||
"""
|
||||
return self.broadcast(
|
||||
'ping', reply=True, arguments={}, destination=destination,
|
||||
timeout=timeout, **kwargs)
|
||||
|
||||
def rate_limit(self, task_name, rate_limit, destination=None, **kwargs):
|
||||
"""Tell workers to set a new rate limit for task by type.
|
||||
|
||||
Arguments:
|
||||
task_name (str): Name of task to change rate limit for.
|
||||
rate_limit (int, str): The rate limit as tasks per second,
|
||||
or a rate limit string (`'100/m'`, etc.
|
||||
see :attr:`celery.app.task.Task.rate_limit` for
|
||||
more information).
|
||||
|
||||
See Also:
|
||||
:meth:`broadcast` for supported keyword arguments.
|
||||
"""
|
||||
return self.broadcast(
|
||||
'rate_limit',
|
||||
destination=destination,
|
||||
arguments={
|
||||
'task_name': task_name,
|
||||
'rate_limit': rate_limit,
|
||||
},
|
||||
**kwargs)
|
||||
|
||||
def add_consumer(self, queue,
|
||||
exchange=None, exchange_type='direct', routing_key=None,
|
||||
options=None, destination=None, **kwargs):
|
||||
"""Tell all (or specific) workers to start consuming from a new queue.
|
||||
|
||||
Only the queue name is required as if only the queue is specified
|
||||
then the exchange/routing key will be set to the same name (
|
||||
like automatic queues do).
|
||||
|
||||
Note:
|
||||
This command does not respect the default queue/exchange
|
||||
options in the configuration.
|
||||
|
||||
Arguments:
|
||||
queue (str): Name of queue to start consuming from.
|
||||
exchange (str): Optional name of exchange.
|
||||
exchange_type (str): Type of exchange (defaults to 'direct')
|
||||
command to, when empty broadcast to all workers.
|
||||
routing_key (str): Optional routing key.
|
||||
options (Dict): Additional options as supported
|
||||
by :meth:`kombu.entity.Queue.from_dict`.
|
||||
|
||||
See Also:
|
||||
:meth:`broadcast` for supported keyword arguments.
|
||||
"""
|
||||
return self.broadcast(
|
||||
'add_consumer',
|
||||
destination=destination,
|
||||
arguments=dict({
|
||||
'queue': queue,
|
||||
'exchange': exchange,
|
||||
'exchange_type': exchange_type,
|
||||
'routing_key': routing_key,
|
||||
}, **options or {}),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def cancel_consumer(self, queue, destination=None, **kwargs):
|
||||
"""Tell all (or specific) workers to stop consuming from ``queue``.
|
||||
|
||||
See Also:
|
||||
Supports the same arguments as :meth:`broadcast`.
|
||||
"""
|
||||
return self.broadcast(
|
||||
'cancel_consumer', destination=destination,
|
||||
arguments={'queue': queue}, **kwargs)
|
||||
|
||||
def time_limit(self, task_name, soft=None, hard=None,
|
||||
destination=None, **kwargs):
|
||||
"""Tell workers to set time limits for a task by type.
|
||||
|
||||
Arguments:
|
||||
task_name (str): Name of task to change time limits for.
|
||||
soft (float): New soft time limit (in seconds).
|
||||
hard (float): New hard time limit (in seconds).
|
||||
**kwargs (Any): arguments passed on to :meth:`broadcast`.
|
||||
"""
|
||||
return self.broadcast(
|
||||
'time_limit',
|
||||
arguments={
|
||||
'task_name': task_name,
|
||||
'hard': hard,
|
||||
'soft': soft,
|
||||
},
|
||||
destination=destination,
|
||||
**kwargs)
|
||||
|
||||
def enable_events(self, destination=None, **kwargs):
|
||||
"""Tell all (or specific) workers to enable events.
|
||||
|
||||
See Also:
|
||||
Supports the same arguments as :meth:`broadcast`.
|
||||
"""
|
||||
return self.broadcast(
|
||||
'enable_events', arguments={}, destination=destination, **kwargs)
|
||||
|
||||
def disable_events(self, destination=None, **kwargs):
|
||||
"""Tell all (or specific) workers to disable events.
|
||||
|
||||
See Also:
|
||||
Supports the same arguments as :meth:`broadcast`.
|
||||
"""
|
||||
return self.broadcast(
|
||||
'disable_events', arguments={}, destination=destination, **kwargs)
|
||||
|
||||
def pool_grow(self, n=1, destination=None, **kwargs):
|
||||
"""Tell all (or specific) workers to grow the pool by ``n``.
|
||||
|
||||
See Also:
|
||||
Supports the same arguments as :meth:`broadcast`.
|
||||
"""
|
||||
return self.broadcast(
|
||||
'pool_grow', arguments={'n': n}, destination=destination, **kwargs)
|
||||
|
||||
def pool_shrink(self, n=1, destination=None, **kwargs):
|
||||
"""Tell all (or specific) workers to shrink the pool by ``n``.
|
||||
|
||||
See Also:
|
||||
Supports the same arguments as :meth:`broadcast`.
|
||||
"""
|
||||
return self.broadcast(
|
||||
'pool_shrink', arguments={'n': n},
|
||||
destination=destination, **kwargs)
|
||||
|
||||
def autoscale(self, max, min, destination=None, **kwargs):
|
||||
"""Change worker(s) autoscale setting.
|
||||
|
||||
See Also:
|
||||
Supports the same arguments as :meth:`broadcast`.
|
||||
"""
|
||||
return self.broadcast(
|
||||
'autoscale', arguments={'max': max, 'min': min},
|
||||
destination=destination, **kwargs)
|
||||
|
||||
def shutdown(self, destination=None, **kwargs):
|
||||
"""Shutdown worker(s).
|
||||
|
||||
See Also:
|
||||
Supports the same arguments as :meth:`broadcast`
|
||||
"""
|
||||
return self.broadcast(
|
||||
'shutdown', arguments={}, destination=destination, **kwargs)
|
||||
|
||||
def pool_restart(self, modules=None, reload=False, reloader=None,
|
||||
destination=None, **kwargs):
|
||||
"""Restart the execution pools of all or specific workers.
|
||||
|
||||
Keyword Arguments:
|
||||
modules (Sequence[str]): List of modules to reload.
|
||||
reload (bool): Flag to enable module reloading. Default is False.
|
||||
reloader (Any): Function to reload a module.
|
||||
destination (Sequence[str]): List of worker names to send this
|
||||
command to.
|
||||
|
||||
See Also:
|
||||
Supports the same arguments as :meth:`broadcast`
|
||||
"""
|
||||
return self.broadcast(
|
||||
'pool_restart',
|
||||
arguments={
|
||||
'modules': modules,
|
||||
'reload': reload,
|
||||
'reloader': reloader,
|
||||
},
|
||||
destination=destination, **kwargs)
|
||||
|
||||
def heartbeat(self, destination=None, **kwargs):
|
||||
"""Tell worker(s) to send a heartbeat immediately.
|
||||
|
||||
See Also:
|
||||
Supports the same arguments as :meth:`broadcast`
|
||||
"""
|
||||
return self.broadcast(
|
||||
'heartbeat', arguments={}, destination=destination, **kwargs)
|
||||
|
||||
def broadcast(self, command, arguments=None, destination=None,
|
||||
connection=None, reply=False, timeout=1.0, limit=None,
|
||||
callback=None, channel=None, pattern=None, matcher=None,
|
||||
**extra_kwargs):
|
||||
"""Broadcast a control command to the celery workers.
|
||||
|
||||
Arguments:
|
||||
command (str): Name of command to send.
|
||||
arguments (Dict): Keyword arguments for the command.
|
||||
destination (List): If set, a list of the hosts to send the
|
||||
command to, when empty broadcast to all workers.
|
||||
connection (kombu.Connection): Custom broker connection to use,
|
||||
if not set, a connection will be acquired from the pool.
|
||||
reply (bool): Wait for and return the reply.
|
||||
timeout (float): Timeout in seconds to wait for the reply.
|
||||
limit (int): Limit number of replies.
|
||||
callback (Callable): Callback called immediately for
|
||||
each reply received.
|
||||
pattern (str): Custom pattern string to match
|
||||
matcher (Callable): Custom matcher to run the pattern to match
|
||||
"""
|
||||
with self.app.connection_or_acquire(connection) as conn:
|
||||
arguments = dict(arguments or {}, **extra_kwargs)
|
||||
if pattern and matcher:
|
||||
# tests pass easier without requiring pattern/matcher to
|
||||
# always be sent in
|
||||
return self.mailbox(conn)._broadcast(
|
||||
command, arguments, destination, reply, timeout,
|
||||
limit, callback, channel=channel,
|
||||
pattern=pattern, matcher=matcher,
|
||||
)
|
||||
else:
|
||||
return self.mailbox(conn)._broadcast(
|
||||
command, arguments, destination, reply, timeout,
|
||||
limit, callback, channel=channel,
|
||||
)
|
||||
@@ -0,0 +1,414 @@
|
||||
"""Configuration introspection and defaults."""
|
||||
from collections import deque, namedtuple
|
||||
from datetime import timedelta
|
||||
|
||||
from celery.utils.functional import memoize
|
||||
from celery.utils.serialization import strtobool
|
||||
|
||||
__all__ = ('Option', 'NAMESPACES', 'flatten', 'find')
|
||||
|
||||
|
||||
DEFAULT_POOL = 'prefork'
|
||||
|
||||
DEFAULT_ACCEPT_CONTENT = ('json',)
|
||||
DEFAULT_PROCESS_LOG_FMT = """
|
||||
[%(asctime)s: %(levelname)s/%(processName)s] %(message)s
|
||||
""".strip()
|
||||
DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
|
||||
%(task_name)s[%(task_id)s]: %(message)s"""
|
||||
|
||||
DEFAULT_SECURITY_DIGEST = 'sha256'
|
||||
|
||||
|
||||
OLD_NS = {'celery_{0}'}
|
||||
OLD_NS_BEAT = {'celerybeat_{0}'}
|
||||
OLD_NS_WORKER = {'celeryd_{0}'}
|
||||
|
||||
searchresult = namedtuple('searchresult', ('namespace', 'key', 'type'))
|
||||
|
||||
|
||||
def Namespace(__old__=None, **options):
|
||||
if __old__ is not None:
|
||||
for key, opt in options.items():
|
||||
if not opt.old:
|
||||
opt.old = {o.format(key) for o in __old__}
|
||||
return options
|
||||
|
||||
|
||||
def old_ns(ns):
|
||||
return {f'{ns}_{{0}}'}
|
||||
|
||||
|
||||
class Option:
|
||||
"""Describes a Celery configuration option."""
|
||||
|
||||
alt = None
|
||||
deprecate_by = None
|
||||
remove_by = None
|
||||
old = set()
|
||||
typemap = {'string': str, 'int': int, 'float': float, 'any': lambda v: v,
|
||||
'bool': strtobool, 'dict': dict, 'tuple': tuple}
|
||||
|
||||
def __init__(self, default=None, *args, **kwargs):
|
||||
self.default = default
|
||||
self.type = kwargs.get('type') or 'string'
|
||||
for attr, value in kwargs.items():
|
||||
setattr(self, attr, value)
|
||||
|
||||
def to_python(self, value):
|
||||
return self.typemap[self.type](value)
|
||||
|
||||
def __repr__(self):
|
||||
return '<Option: type->{} default->{!r}>'.format(self.type,
|
||||
self.default)
|
||||
|
||||
|
||||
NAMESPACES = Namespace(
|
||||
accept_content=Option(DEFAULT_ACCEPT_CONTENT, type='list', old=OLD_NS),
|
||||
result_accept_content=Option(None, type='list'),
|
||||
enable_utc=Option(True, type='bool'),
|
||||
imports=Option((), type='tuple', old=OLD_NS),
|
||||
include=Option((), type='tuple', old=OLD_NS),
|
||||
timezone=Option(type='string', old=OLD_NS),
|
||||
beat=Namespace(
|
||||
__old__=OLD_NS_BEAT,
|
||||
|
||||
max_loop_interval=Option(0, type='float'),
|
||||
schedule=Option({}, type='dict'),
|
||||
scheduler=Option('celery.beat:PersistentScheduler'),
|
||||
schedule_filename=Option('celerybeat-schedule'),
|
||||
sync_every=Option(0, type='int'),
|
||||
cron_starting_deadline=Option(None, type=int)
|
||||
),
|
||||
broker=Namespace(
|
||||
url=Option(None, type='string'),
|
||||
read_url=Option(None, type='string'),
|
||||
write_url=Option(None, type='string'),
|
||||
transport=Option(type='string'),
|
||||
transport_options=Option({}, type='dict'),
|
||||
connection_timeout=Option(4, type='float'),
|
||||
connection_retry=Option(True, type='bool'),
|
||||
connection_retry_on_startup=Option(None, type='bool'),
|
||||
connection_max_retries=Option(100, type='int'),
|
||||
channel_error_retry=Option(False, type='bool'),
|
||||
failover_strategy=Option(None, type='string'),
|
||||
heartbeat=Option(120, type='int'),
|
||||
heartbeat_checkrate=Option(3.0, type='int'),
|
||||
login_method=Option(None, type='string'),
|
||||
pool_limit=Option(10, type='int'),
|
||||
use_ssl=Option(False, type='bool'),
|
||||
|
||||
host=Option(type='string'),
|
||||
port=Option(type='int'),
|
||||
user=Option(type='string'),
|
||||
password=Option(type='string'),
|
||||
vhost=Option(type='string'),
|
||||
),
|
||||
cache=Namespace(
|
||||
__old__=old_ns('celery_cache'),
|
||||
|
||||
backend=Option(),
|
||||
backend_options=Option({}, type='dict'),
|
||||
),
|
||||
cassandra=Namespace(
|
||||
entry_ttl=Option(type='float'),
|
||||
keyspace=Option(type='string'),
|
||||
port=Option(type='string'),
|
||||
read_consistency=Option(type='string'),
|
||||
servers=Option(type='list'),
|
||||
bundle_path=Option(type='string'),
|
||||
table=Option(type='string'),
|
||||
write_consistency=Option(type='string'),
|
||||
auth_provider=Option(type='string'),
|
||||
auth_kwargs=Option(type='string'),
|
||||
options=Option({}, type='dict'),
|
||||
),
|
||||
s3=Namespace(
|
||||
access_key_id=Option(type='string'),
|
||||
secret_access_key=Option(type='string'),
|
||||
bucket=Option(type='string'),
|
||||
base_path=Option(type='string'),
|
||||
endpoint_url=Option(type='string'),
|
||||
region=Option(type='string'),
|
||||
),
|
||||
azureblockblob=Namespace(
|
||||
container_name=Option('celery', type='string'),
|
||||
retry_initial_backoff_sec=Option(2, type='int'),
|
||||
retry_increment_base=Option(2, type='int'),
|
||||
retry_max_attempts=Option(3, type='int'),
|
||||
base_path=Option('', type='string'),
|
||||
connection_timeout=Option(20, type='int'),
|
||||
read_timeout=Option(120, type='int'),
|
||||
),
|
||||
control=Namespace(
|
||||
queue_ttl=Option(300.0, type='float'),
|
||||
queue_expires=Option(10.0, type='float'),
|
||||
exchange=Option('celery', type='string'),
|
||||
),
|
||||
couchbase=Namespace(
|
||||
__old__=old_ns('celery_couchbase'),
|
||||
|
||||
backend_settings=Option(None, type='dict'),
|
||||
),
|
||||
arangodb=Namespace(
|
||||
__old__=old_ns('celery_arangodb'),
|
||||
backend_settings=Option(None, type='dict')
|
||||
),
|
||||
mongodb=Namespace(
|
||||
__old__=old_ns('celery_mongodb'),
|
||||
|
||||
backend_settings=Option(type='dict'),
|
||||
),
|
||||
cosmosdbsql=Namespace(
|
||||
database_name=Option('celerydb', type='string'),
|
||||
collection_name=Option('celerycol', type='string'),
|
||||
consistency_level=Option('Session', type='string'),
|
||||
max_retry_attempts=Option(9, type='int'),
|
||||
max_retry_wait_time=Option(30, type='int'),
|
||||
),
|
||||
event=Namespace(
|
||||
__old__=old_ns('celery_event'),
|
||||
|
||||
queue_expires=Option(60.0, type='float'),
|
||||
queue_ttl=Option(5.0, type='float'),
|
||||
queue_prefix=Option('celeryev'),
|
||||
serializer=Option('json'),
|
||||
exchange=Option('celeryev', type='string'),
|
||||
),
|
||||
redis=Namespace(
|
||||
__old__=old_ns('celery_redis'),
|
||||
|
||||
backend_use_ssl=Option(type='dict'),
|
||||
db=Option(type='int'),
|
||||
host=Option(type='string'),
|
||||
max_connections=Option(type='int'),
|
||||
username=Option(type='string'),
|
||||
password=Option(type='string'),
|
||||
port=Option(type='int'),
|
||||
socket_timeout=Option(120.0, type='float'),
|
||||
socket_connect_timeout=Option(None, type='float'),
|
||||
retry_on_timeout=Option(False, type='bool'),
|
||||
socket_keepalive=Option(False, type='bool'),
|
||||
),
|
||||
result=Namespace(
|
||||
__old__=old_ns('celery_result'),
|
||||
|
||||
backend=Option(type='string'),
|
||||
cache_max=Option(
|
||||
-1,
|
||||
type='int', old={'celery_max_cached_results'},
|
||||
),
|
||||
compression=Option(type='str'),
|
||||
exchange=Option('celeryresults'),
|
||||
exchange_type=Option('direct'),
|
||||
expires=Option(
|
||||
timedelta(days=1),
|
||||
type='float', old={'celery_task_result_expires'},
|
||||
),
|
||||
persistent=Option(None, type='bool'),
|
||||
extended=Option(False, type='bool'),
|
||||
serializer=Option('json'),
|
||||
backend_transport_options=Option({}, type='dict'),
|
||||
chord_retry_interval=Option(1.0, type='float'),
|
||||
chord_join_timeout=Option(3.0, type='float'),
|
||||
backend_max_sleep_between_retries_ms=Option(10000, type='int'),
|
||||
backend_max_retries=Option(float("inf"), type='float'),
|
||||
backend_base_sleep_between_retries_ms=Option(10, type='int'),
|
||||
backend_always_retry=Option(False, type='bool'),
|
||||
),
|
||||
elasticsearch=Namespace(
|
||||
__old__=old_ns('celery_elasticsearch'),
|
||||
|
||||
retry_on_timeout=Option(type='bool'),
|
||||
max_retries=Option(type='int'),
|
||||
timeout=Option(type='float'),
|
||||
save_meta_as_text=Option(True, type='bool'),
|
||||
),
|
||||
security=Namespace(
|
||||
__old__=old_ns('celery_security'),
|
||||
|
||||
certificate=Option(type='string'),
|
||||
cert_store=Option(type='string'),
|
||||
key=Option(type='string'),
|
||||
key_password=Option(type='bytes'),
|
||||
digest=Option(DEFAULT_SECURITY_DIGEST, type='string'),
|
||||
),
|
||||
database=Namespace(
|
||||
url=Option(old={'celery_result_dburi'}),
|
||||
engine_options=Option(
|
||||
type='dict', old={'celery_result_engine_options'},
|
||||
),
|
||||
short_lived_sessions=Option(
|
||||
False, type='bool', old={'celery_result_db_short_lived_sessions'},
|
||||
),
|
||||
table_schemas=Option(type='dict'),
|
||||
table_names=Option(type='dict', old={'celery_result_db_tablenames'}),
|
||||
),
|
||||
task=Namespace(
|
||||
__old__=OLD_NS,
|
||||
acks_late=Option(False, type='bool'),
|
||||
acks_on_failure_or_timeout=Option(True, type='bool'),
|
||||
always_eager=Option(False, type='bool'),
|
||||
annotations=Option(type='any'),
|
||||
compression=Option(type='string', old={'celery_message_compression'}),
|
||||
create_missing_queues=Option(True, type='bool'),
|
||||
inherit_parent_priority=Option(False, type='bool'),
|
||||
default_delivery_mode=Option(2, type='string'),
|
||||
default_queue=Option('celery'),
|
||||
default_exchange=Option(None, type='string'), # taken from queue
|
||||
default_exchange_type=Option('direct'),
|
||||
default_routing_key=Option(None, type='string'), # taken from queue
|
||||
default_rate_limit=Option(type='string'),
|
||||
default_priority=Option(None, type='string'),
|
||||
eager_propagates=Option(
|
||||
False, type='bool', old={'celery_eager_propagates_exceptions'},
|
||||
),
|
||||
ignore_result=Option(False, type='bool'),
|
||||
store_eager_result=Option(False, type='bool'),
|
||||
protocol=Option(2, type='int', old={'celery_task_protocol'}),
|
||||
publish_retry=Option(
|
||||
True, type='bool', old={'celery_task_publish_retry'},
|
||||
),
|
||||
publish_retry_policy=Option(
|
||||
{'max_retries': 3,
|
||||
'interval_start': 0,
|
||||
'interval_max': 1,
|
||||
'interval_step': 0.2},
|
||||
type='dict', old={'celery_task_publish_retry_policy'},
|
||||
),
|
||||
queues=Option(type='dict'),
|
||||
queue_max_priority=Option(None, type='int'),
|
||||
reject_on_worker_lost=Option(type='bool'),
|
||||
remote_tracebacks=Option(False, type='bool'),
|
||||
routes=Option(type='any'),
|
||||
send_sent_event=Option(
|
||||
False, type='bool', old={'celery_send_task_sent_event'},
|
||||
),
|
||||
serializer=Option('json', old={'celery_task_serializer'}),
|
||||
soft_time_limit=Option(
|
||||
type='float', old={'celeryd_task_soft_time_limit'},
|
||||
),
|
||||
time_limit=Option(
|
||||
type='float', old={'celeryd_task_time_limit'},
|
||||
),
|
||||
store_errors_even_if_ignored=Option(False, type='bool'),
|
||||
track_started=Option(False, type='bool'),
|
||||
allow_error_cb_on_chord_header=Option(False, type='bool'),
|
||||
),
|
||||
worker=Namespace(
|
||||
__old__=OLD_NS_WORKER,
|
||||
agent=Option(None, type='string'),
|
||||
autoscaler=Option('celery.worker.autoscale:Autoscaler'),
|
||||
cancel_long_running_tasks_on_connection_loss=Option(
|
||||
False, type='bool'
|
||||
),
|
||||
concurrency=Option(None, type='int'),
|
||||
consumer=Option('celery.worker.consumer:Consumer', type='string'),
|
||||
direct=Option(False, type='bool', old={'celery_worker_direct'}),
|
||||
disable_rate_limits=Option(
|
||||
False, type='bool', old={'celery_disable_rate_limits'},
|
||||
),
|
||||
deduplicate_successful_tasks=Option(
|
||||
False, type='bool'
|
||||
),
|
||||
enable_remote_control=Option(
|
||||
True, type='bool', old={'celery_enable_remote_control'},
|
||||
),
|
||||
hijack_root_logger=Option(True, type='bool'),
|
||||
log_color=Option(type='bool'),
|
||||
log_format=Option(DEFAULT_PROCESS_LOG_FMT),
|
||||
lost_wait=Option(10.0, type='float', old={'celeryd_worker_lost_wait'}),
|
||||
max_memory_per_child=Option(type='int'),
|
||||
max_tasks_per_child=Option(type='int'),
|
||||
pool=Option(DEFAULT_POOL),
|
||||
pool_putlocks=Option(True, type='bool'),
|
||||
pool_restarts=Option(False, type='bool'),
|
||||
proc_alive_timeout=Option(4.0, type='float'),
|
||||
prefetch_multiplier=Option(4, type='int'),
|
||||
redirect_stdouts=Option(
|
||||
True, type='bool', old={'celery_redirect_stdouts'},
|
||||
),
|
||||
redirect_stdouts_level=Option(
|
||||
'WARNING', old={'celery_redirect_stdouts_level'},
|
||||
),
|
||||
send_task_events=Option(
|
||||
False, type='bool', old={'celery_send_events'},
|
||||
),
|
||||
state_db=Option(),
|
||||
task_log_format=Option(DEFAULT_TASK_LOG_FMT),
|
||||
timer=Option(type='string'),
|
||||
timer_precision=Option(1.0, type='float'),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _flatten_keys(ns, key, opt):
|
||||
return [(ns + key, opt)]
|
||||
|
||||
|
||||
def _to_compat(ns, key, opt):
|
||||
if opt.old:
|
||||
return [
|
||||
(oldkey.format(key).upper(), ns + key, opt)
|
||||
for oldkey in opt.old
|
||||
]
|
||||
return [((ns + key).upper(), ns + key, opt)]
|
||||
|
||||
|
||||
def flatten(d, root='', keyfilter=_flatten_keys):
|
||||
"""Flatten settings."""
|
||||
stack = deque([(root, d)])
|
||||
while stack:
|
||||
ns, options = stack.popleft()
|
||||
for key, opt in options.items():
|
||||
if isinstance(opt, dict):
|
||||
stack.append((ns + key + '_', opt))
|
||||
else:
|
||||
yield from keyfilter(ns, key, opt)
|
||||
|
||||
|
||||
DEFAULTS = {
|
||||
key: opt.default for key, opt in flatten(NAMESPACES)
|
||||
}
|
||||
__compat = list(flatten(NAMESPACES, keyfilter=_to_compat))
|
||||
_OLD_DEFAULTS = {old_key: opt.default for old_key, _, opt in __compat}
|
||||
_TO_OLD_KEY = {new_key: old_key for old_key, new_key, _ in __compat}
|
||||
_TO_NEW_KEY = {old_key: new_key for old_key, new_key, _ in __compat}
|
||||
__compat = None
|
||||
|
||||
SETTING_KEYS = set(DEFAULTS.keys())
|
||||
_OLD_SETTING_KEYS = set(_TO_NEW_KEY.keys())
|
||||
|
||||
|
||||
def find_deprecated_settings(source): # pragma: no cover
|
||||
from celery.utils import deprecated
|
||||
for name, opt in flatten(NAMESPACES):
|
||||
if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
|
||||
deprecated.warn(description=f'The {name!r} setting',
|
||||
deprecation=opt.deprecate_by,
|
||||
removal=opt.remove_by,
|
||||
alternative=f'Use the {opt.alt} instead')
|
||||
return source
|
||||
|
||||
|
||||
@memoize(maxsize=None)
|
||||
def find(name, namespace='celery'):
|
||||
"""Find setting by name."""
|
||||
# - Try specified name-space first.
|
||||
namespace = namespace.lower()
|
||||
try:
|
||||
return searchresult(
|
||||
namespace, name.lower(), NAMESPACES[namespace][name.lower()],
|
||||
)
|
||||
except KeyError:
|
||||
# - Try all the other namespaces.
|
||||
for ns, opts in NAMESPACES.items():
|
||||
if ns.lower() == name.lower():
|
||||
return searchresult(None, ns, opts)
|
||||
elif isinstance(opts, dict):
|
||||
try:
|
||||
return searchresult(ns, name.lower(), opts[name.lower()])
|
||||
except KeyError:
|
||||
pass
|
||||
# - See if name is a qualname last.
|
||||
return searchresult(None, name.lower(), DEFAULTS[name.lower()])
|
||||
@@ -0,0 +1,40 @@
|
||||
"""Implementation for the app.events shortcuts."""
|
||||
from contextlib import contextmanager
|
||||
|
||||
from kombu.utils.objects import cached_property
|
||||
|
||||
|
||||
class Events:
|
||||
"""Implements app.events."""
|
||||
|
||||
receiver_cls = 'celery.events.receiver:EventReceiver'
|
||||
dispatcher_cls = 'celery.events.dispatcher:EventDispatcher'
|
||||
state_cls = 'celery.events.state:State'
|
||||
|
||||
def __init__(self, app=None):
|
||||
self.app = app
|
||||
|
||||
@cached_property
|
||||
def Receiver(self):
|
||||
return self.app.subclass_with_self(
|
||||
self.receiver_cls, reverse='events.Receiver')
|
||||
|
||||
@cached_property
|
||||
def Dispatcher(self):
|
||||
return self.app.subclass_with_self(
|
||||
self.dispatcher_cls, reverse='events.Dispatcher')
|
||||
|
||||
@cached_property
|
||||
def State(self):
|
||||
return self.app.subclass_with_self(
|
||||
self.state_cls, reverse='events.State')
|
||||
|
||||
@contextmanager
|
||||
def default_dispatcher(self, hostname=None, enabled=True,
|
||||
buffer_while_offline=False):
|
||||
with self.app.amqp.producer_pool.acquire(block=True) as prod:
|
||||
# pylint: disable=too-many-function-args
|
||||
# This is a property pylint...
|
||||
with self.Dispatcher(prod.connection, hostname, enabled,
|
||||
prod.channel, buffer_while_offline) as d:
|
||||
yield d
|
||||
247
gnx-react/venv/lib/python3.12/site-packages/celery/app/log.py
Normal file
247
gnx-react/venv/lib/python3.12/site-packages/celery/app/log.py
Normal file
@@ -0,0 +1,247 @@
|
||||
"""Logging configuration.
|
||||
|
||||
The Celery instances logging section: ``Celery.log``.
|
||||
|
||||
Sets up logging for the worker and other programs,
|
||||
redirects standard outs, colors log output, patches logging
|
||||
related compatibility fixes, and so on.
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
from logging.handlers import WatchedFileHandler
|
||||
|
||||
from kombu.utils.encoding import set_default_encoding_file
|
||||
|
||||
from celery import signals
|
||||
from celery._state import get_current_task
|
||||
from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning
|
||||
from celery.local import class_property
|
||||
from celery.utils.log import (ColorFormatter, LoggingProxy, get_logger, get_multiprocessing_logger, mlevel,
|
||||
reset_multiprocessing_logger)
|
||||
from celery.utils.nodenames import node_format
|
||||
from celery.utils.term import colored
|
||||
|
||||
__all__ = ('TaskFormatter', 'Logging')
|
||||
|
||||
MP_LOG = os.environ.get('MP_LOG', False)
|
||||
|
||||
|
||||
class TaskFormatter(ColorFormatter):
|
||||
"""Formatter for tasks, adding the task name and id."""
|
||||
|
||||
def format(self, record):
|
||||
task = get_current_task()
|
||||
if task and task.request:
|
||||
record.__dict__.update(task_id=task.request.id,
|
||||
task_name=task.name)
|
||||
else:
|
||||
record.__dict__.setdefault('task_name', '???')
|
||||
record.__dict__.setdefault('task_id', '???')
|
||||
return super().format(record)
|
||||
|
||||
|
||||
class Logging:
|
||||
"""Application logging setup (app.log)."""
|
||||
|
||||
#: The logging subsystem is only configured once per process.
|
||||
#: setup_logging_subsystem sets this flag, and subsequent calls
|
||||
#: will do nothing.
|
||||
_setup = False
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
self.loglevel = mlevel(logging.WARN)
|
||||
self.format = self.app.conf.worker_log_format
|
||||
self.task_format = self.app.conf.worker_task_log_format
|
||||
self.colorize = self.app.conf.worker_log_color
|
||||
|
||||
def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
|
||||
redirect_level='WARNING', colorize=None, hostname=None):
|
||||
loglevel = mlevel(loglevel)
|
||||
handled = self.setup_logging_subsystem(
|
||||
loglevel, logfile, colorize=colorize, hostname=hostname,
|
||||
)
|
||||
if not handled and redirect_stdouts:
|
||||
self.redirect_stdouts(redirect_level)
|
||||
os.environ.update(
|
||||
CELERY_LOG_LEVEL=str(loglevel) if loglevel else '',
|
||||
CELERY_LOG_FILE=str(logfile) if logfile else '',
|
||||
)
|
||||
warnings.filterwarnings('always', category=CDeprecationWarning)
|
||||
warnings.filterwarnings('always', category=CPendingDeprecationWarning)
|
||||
logging.captureWarnings(True)
|
||||
return handled
|
||||
|
||||
def redirect_stdouts(self, loglevel=None, name='celery.redirected'):
|
||||
self.redirect_stdouts_to_logger(
|
||||
get_logger(name), loglevel=loglevel
|
||||
)
|
||||
os.environ.update(
|
||||
CELERY_LOG_REDIRECT='1',
|
||||
CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''),
|
||||
)
|
||||
|
||||
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None,
|
||||
colorize=None, hostname=None, **kwargs):
|
||||
if self.already_setup:
|
||||
return
|
||||
if logfile and hostname:
|
||||
logfile = node_format(logfile, hostname)
|
||||
Logging._setup = True
|
||||
loglevel = mlevel(loglevel or self.loglevel)
|
||||
format = format or self.format
|
||||
colorize = self.supports_color(colorize, logfile)
|
||||
reset_multiprocessing_logger()
|
||||
receivers = signals.setup_logging.send(
|
||||
sender=None, loglevel=loglevel, logfile=logfile,
|
||||
format=format, colorize=colorize,
|
||||
)
|
||||
|
||||
if not receivers:
|
||||
root = logging.getLogger()
|
||||
|
||||
if self.app.conf.worker_hijack_root_logger:
|
||||
root.handlers = []
|
||||
get_logger('celery').handlers = []
|
||||
get_logger('celery.task').handlers = []
|
||||
get_logger('celery.redirected').handlers = []
|
||||
|
||||
# Configure root logger
|
||||
self._configure_logger(
|
||||
root, logfile, loglevel, format, colorize, **kwargs
|
||||
)
|
||||
|
||||
# Configure the multiprocessing logger
|
||||
self._configure_logger(
|
||||
get_multiprocessing_logger(),
|
||||
logfile, loglevel if MP_LOG else logging.ERROR,
|
||||
format, colorize, **kwargs
|
||||
)
|
||||
|
||||
signals.after_setup_logger.send(
|
||||
sender=None, logger=root,
|
||||
loglevel=loglevel, logfile=logfile,
|
||||
format=format, colorize=colorize,
|
||||
)
|
||||
|
||||
# then setup the root task logger.
|
||||
self.setup_task_loggers(loglevel, logfile, colorize=colorize)
|
||||
|
||||
try:
|
||||
stream = logging.getLogger().handlers[0].stream
|
||||
except (AttributeError, IndexError):
|
||||
pass
|
||||
else:
|
||||
set_default_encoding_file(stream)
|
||||
|
||||
# This is a hack for multiprocessing's fork+exec, so that
|
||||
# logging before Process.run works.
|
||||
logfile_name = logfile if isinstance(logfile, str) else ''
|
||||
os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
|
||||
_MP_FORK_LOGFILE_=logfile_name,
|
||||
_MP_FORK_LOGFORMAT_=format)
|
||||
return receivers
|
||||
|
||||
def _configure_logger(self, logger, logfile, loglevel,
|
||||
format, colorize, **kwargs):
|
||||
if logger is not None:
|
||||
self.setup_handlers(logger, logfile, format,
|
||||
colorize, **kwargs)
|
||||
if loglevel:
|
||||
logger.setLevel(loglevel)
|
||||
|
||||
def setup_task_loggers(self, loglevel=None, logfile=None, format=None,
|
||||
colorize=None, propagate=False, **kwargs):
|
||||
"""Setup the task logger.
|
||||
|
||||
If `logfile` is not specified, then `sys.stderr` is used.
|
||||
|
||||
Will return the base task logger object.
|
||||
"""
|
||||
loglevel = mlevel(loglevel or self.loglevel)
|
||||
format = format or self.task_format
|
||||
colorize = self.supports_color(colorize, logfile)
|
||||
|
||||
logger = self.setup_handlers(
|
||||
get_logger('celery.task'),
|
||||
logfile, format, colorize,
|
||||
formatter=TaskFormatter, **kwargs
|
||||
)
|
||||
logger.setLevel(loglevel)
|
||||
# this is an int for some reason, better to not question why.
|
||||
logger.propagate = int(propagate)
|
||||
signals.after_setup_task_logger.send(
|
||||
sender=None, logger=logger,
|
||||
loglevel=loglevel, logfile=logfile,
|
||||
format=format, colorize=colorize,
|
||||
)
|
||||
return logger
|
||||
|
||||
def redirect_stdouts_to_logger(self, logger, loglevel=None,
|
||||
stdout=True, stderr=True):
|
||||
"""Redirect :class:`sys.stdout` and :class:`sys.stderr` to logger.
|
||||
|
||||
Arguments:
|
||||
logger (logging.Logger): Logger instance to redirect to.
|
||||
loglevel (int, str): The loglevel redirected message
|
||||
will be logged as.
|
||||
"""
|
||||
proxy = LoggingProxy(logger, loglevel)
|
||||
if stdout:
|
||||
sys.stdout = proxy
|
||||
if stderr:
|
||||
sys.stderr = proxy
|
||||
return proxy
|
||||
|
||||
def supports_color(self, colorize=None, logfile=None):
|
||||
colorize = self.colorize if colorize is None else colorize
|
||||
if self.app.IS_WINDOWS:
|
||||
# Windows does not support ANSI color codes.
|
||||
return False
|
||||
if colorize or colorize is None:
|
||||
# Only use color if there's no active log file
|
||||
# and stderr is an actual terminal.
|
||||
return logfile is None and sys.stderr.isatty()
|
||||
return colorize
|
||||
|
||||
def colored(self, logfile=None, enabled=None):
|
||||
return colored(enabled=self.supports_color(enabled, logfile))
|
||||
|
||||
def setup_handlers(self, logger, logfile, format, colorize,
|
||||
formatter=ColorFormatter, **kwargs):
|
||||
if self._is_configured(logger):
|
||||
return logger
|
||||
handler = self._detect_handler(logfile)
|
||||
handler.setFormatter(formatter(format, use_color=colorize))
|
||||
logger.addHandler(handler)
|
||||
return logger
|
||||
|
||||
def _detect_handler(self, logfile=None):
|
||||
"""Create handler from filename, an open stream or `None` (stderr)."""
|
||||
logfile = sys.__stderr__ if logfile is None else logfile
|
||||
if hasattr(logfile, 'write'):
|
||||
return logging.StreamHandler(logfile)
|
||||
return WatchedFileHandler(logfile, encoding='utf-8')
|
||||
|
||||
def _has_handler(self, logger):
|
||||
return any(
|
||||
not isinstance(h, logging.NullHandler)
|
||||
for h in logger.handlers or []
|
||||
)
|
||||
|
||||
def _is_configured(self, logger):
|
||||
return self._has_handler(logger) and not getattr(
|
||||
logger, '_rudimentary_setup', False)
|
||||
|
||||
def get_default_logger(self, name='celery', **kwargs):
|
||||
return get_logger(name)
|
||||
|
||||
@class_property
|
||||
def already_setup(self):
|
||||
return self._setup
|
||||
|
||||
@already_setup.setter
|
||||
def already_setup(self, was_setup):
|
||||
self._setup = was_setup
|
||||
@@ -0,0 +1,68 @@
|
||||
"""Registry of available tasks."""
|
||||
import inspect
|
||||
from importlib import import_module
|
||||
|
||||
from celery._state import get_current_app
|
||||
from celery.app.autoretry import add_autoretry_behaviour
|
||||
from celery.exceptions import InvalidTaskError, NotRegistered
|
||||
|
||||
__all__ = ('TaskRegistry',)
|
||||
|
||||
|
||||
class TaskRegistry(dict):
|
||||
"""Map of registered tasks."""
|
||||
|
||||
NotRegistered = NotRegistered
|
||||
|
||||
def __missing__(self, key):
|
||||
raise self.NotRegistered(key)
|
||||
|
||||
def register(self, task):
|
||||
"""Register a task in the task registry.
|
||||
|
||||
The task will be automatically instantiated if not already an
|
||||
instance. Name must be configured prior to registration.
|
||||
"""
|
||||
if task.name is None:
|
||||
raise InvalidTaskError(
|
||||
'Task class {!r} must specify .name attribute'.format(
|
||||
type(task).__name__))
|
||||
task = inspect.isclass(task) and task() or task
|
||||
add_autoretry_behaviour(task)
|
||||
self[task.name] = task
|
||||
|
||||
def unregister(self, name):
|
||||
"""Unregister task by name.
|
||||
|
||||
Arguments:
|
||||
name (str): name of the task to unregister, or a
|
||||
:class:`celery.app.task.Task` with a valid `name` attribute.
|
||||
|
||||
Raises:
|
||||
celery.exceptions.NotRegistered: if the task is not registered.
|
||||
"""
|
||||
try:
|
||||
self.pop(getattr(name, 'name', name))
|
||||
except KeyError:
|
||||
raise self.NotRegistered(name)
|
||||
|
||||
# -- these methods are irrelevant now and will be removed in 4.0
|
||||
def regular(self):
|
||||
return self.filter_types('regular')
|
||||
|
||||
def periodic(self):
|
||||
return self.filter_types('periodic')
|
||||
|
||||
def filter_types(self, type):
|
||||
return {name: task for name, task in self.items()
|
||||
if getattr(task, 'type', 'regular') == type}
|
||||
|
||||
|
||||
def _unpickle_task(name):
|
||||
return get_current_app().tasks[name]
|
||||
|
||||
|
||||
def _unpickle_task_v2(name, module=None):
|
||||
if module:
|
||||
import_module(module)
|
||||
return get_current_app().tasks[name]
|
||||
136
gnx-react/venv/lib/python3.12/site-packages/celery/app/routes.py
Normal file
136
gnx-react/venv/lib/python3.12/site-packages/celery/app/routes.py
Normal file
@@ -0,0 +1,136 @@
|
||||
"""Task Routing.
|
||||
|
||||
Contains utilities for working with task routers, (:setting:`task_routes`).
|
||||
"""
|
||||
import fnmatch
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from collections.abc import Mapping
|
||||
|
||||
from kombu import Queue
|
||||
|
||||
from celery.exceptions import QueueNotFound
|
||||
from celery.utils.collections import lpmerge
|
||||
from celery.utils.functional import maybe_evaluate, mlazy
|
||||
from celery.utils.imports import symbol_by_name
|
||||
|
||||
try:
|
||||
Pattern = re._pattern_type
|
||||
except AttributeError: # pragma: no cover
|
||||
# for support Python 3.7
|
||||
Pattern = re.Pattern
|
||||
|
||||
__all__ = ('MapRoute', 'Router', 'prepare')
|
||||
|
||||
|
||||
class MapRoute:
|
||||
"""Creates a router out of a :class:`dict`."""
|
||||
|
||||
def __init__(self, map):
|
||||
map = map.items() if isinstance(map, Mapping) else map
|
||||
self.map = {}
|
||||
self.patterns = OrderedDict()
|
||||
for k, v in map:
|
||||
if isinstance(k, Pattern):
|
||||
self.patterns[k] = v
|
||||
elif '*' in k:
|
||||
self.patterns[re.compile(fnmatch.translate(k))] = v
|
||||
else:
|
||||
self.map[k] = v
|
||||
|
||||
def __call__(self, name, *args, **kwargs):
|
||||
try:
|
||||
return dict(self.map[name])
|
||||
except KeyError:
|
||||
pass
|
||||
except ValueError:
|
||||
return {'queue': self.map[name]}
|
||||
for regex, route in self.patterns.items():
|
||||
if regex.match(name):
|
||||
try:
|
||||
return dict(route)
|
||||
except ValueError:
|
||||
return {'queue': route}
|
||||
|
||||
|
||||
class Router:
|
||||
"""Route tasks based on the :setting:`task_routes` setting."""
|
||||
|
||||
def __init__(self, routes=None, queues=None,
|
||||
create_missing=False, app=None):
|
||||
self.app = app
|
||||
self.queues = {} if queues is None else queues
|
||||
self.routes = [] if routes is None else routes
|
||||
self.create_missing = create_missing
|
||||
|
||||
def route(self, options, name, args=(), kwargs=None, task_type=None):
|
||||
kwargs = {} if not kwargs else kwargs
|
||||
options = self.expand_destination(options) # expands 'queue'
|
||||
if self.routes:
|
||||
route = self.lookup_route(name, args, kwargs, options, task_type)
|
||||
if route: # expands 'queue' in route.
|
||||
return lpmerge(self.expand_destination(route), options)
|
||||
if 'queue' not in options:
|
||||
options = lpmerge(self.expand_destination(
|
||||
self.app.conf.task_default_queue), options)
|
||||
return options
|
||||
|
||||
def expand_destination(self, route):
|
||||
# Route can be a queue name: convenient for direct exchanges.
|
||||
if isinstance(route, str):
|
||||
queue, route = route, {}
|
||||
else:
|
||||
# can use defaults from configured queue, but override specific
|
||||
# things (like the routing_key): great for topic exchanges.
|
||||
queue = route.pop('queue', None)
|
||||
|
||||
if queue:
|
||||
if isinstance(queue, Queue):
|
||||
route['queue'] = queue
|
||||
else:
|
||||
try:
|
||||
route['queue'] = self.queues[queue]
|
||||
except KeyError:
|
||||
raise QueueNotFound(
|
||||
f'Queue {queue!r} missing from task_queues')
|
||||
return route
|
||||
|
||||
def lookup_route(self, name,
|
||||
args=None, kwargs=None, options=None, task_type=None):
|
||||
query = self.query_router
|
||||
for router in self.routes:
|
||||
route = query(router, name, args, kwargs, options, task_type)
|
||||
if route is not None:
|
||||
return route
|
||||
|
||||
def query_router(self, router, task, args, kwargs, options, task_type):
|
||||
router = maybe_evaluate(router)
|
||||
if hasattr(router, 'route_for_task'):
|
||||
# pre 4.0 router class
|
||||
return router.route_for_task(task, args, kwargs)
|
||||
return router(task, args, kwargs, options, task=task_type)
|
||||
|
||||
|
||||
def expand_router_string(router):
|
||||
router = symbol_by_name(router)
|
||||
if hasattr(router, 'route_for_task'):
|
||||
# need to instantiate pre 4.0 router classes
|
||||
router = router()
|
||||
return router
|
||||
|
||||
|
||||
def prepare(routes):
|
||||
"""Expand the :setting:`task_routes` setting."""
|
||||
|
||||
def expand_route(route):
|
||||
if isinstance(route, (Mapping, list, tuple)):
|
||||
return MapRoute(route)
|
||||
if isinstance(route, str):
|
||||
return mlazy(expand_router_string, route)
|
||||
return route
|
||||
|
||||
if routes is None:
|
||||
return ()
|
||||
if not isinstance(routes, (list, tuple)):
|
||||
routes = (routes,)
|
||||
return [expand_route(route) for route in routes]
|
||||
1144
gnx-react/venv/lib/python3.12/site-packages/celery/app/task.py
Normal file
1144
gnx-react/venv/lib/python3.12/site-packages/celery/app/task.py
Normal file
File diff suppressed because it is too large
Load Diff
763
gnx-react/venv/lib/python3.12/site-packages/celery/app/trace.py
Normal file
763
gnx-react/venv/lib/python3.12/site-packages/celery/app/trace.py
Normal file
@@ -0,0 +1,763 @@
|
||||
"""Trace task execution.
|
||||
|
||||
This module defines how the task execution is traced:
|
||||
errors are recorded, handlers are applied and so on.
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from collections import namedtuple
|
||||
from typing import Any, Callable, Dict, FrozenSet, Optional, Sequence, Tuple, Type, Union
|
||||
from warnings import warn
|
||||
|
||||
from billiard.einfo import ExceptionInfo, ExceptionWithTraceback
|
||||
from kombu.exceptions import EncodeError
|
||||
from kombu.serialization import loads as loads_message
|
||||
from kombu.serialization import prepare_accept_content
|
||||
from kombu.utils.encoding import safe_repr, safe_str
|
||||
|
||||
import celery
|
||||
import celery.loaders.app
|
||||
from celery import current_app, group, signals, states
|
||||
from celery._state import _task_stack
|
||||
from celery.app.task import Context
|
||||
from celery.app.task import Task as BaseTask
|
||||
from celery.exceptions import BackendGetMetaError, Ignore, InvalidTaskError, Reject, Retry
|
||||
from celery.result import AsyncResult
|
||||
from celery.utils.log import get_logger
|
||||
from celery.utils.nodenames import gethostname
|
||||
from celery.utils.objects import mro_lookup
|
||||
from celery.utils.saferepr import saferepr
|
||||
from celery.utils.serialization import get_pickleable_etype, get_pickleable_exception, get_pickled_exception
|
||||
|
||||
# ## ---
|
||||
# This is the heart of the worker, the inner loop so to speak.
|
||||
# It used to be split up into nice little classes and methods,
|
||||
# but in the end it only resulted in bad performance and horrible tracebacks,
|
||||
# so instead we now use one closure per task class.
|
||||
|
||||
# pylint: disable=redefined-outer-name
|
||||
# We cache globals and attribute lookups, so disable this warning.
|
||||
# pylint: disable=broad-except
|
||||
# We know what we're doing...
|
||||
|
||||
|
||||
__all__ = (
|
||||
'TraceInfo', 'build_tracer', 'trace_task',
|
||||
'setup_worker_optimizations', 'reset_worker_optimizations',
|
||||
)
|
||||
|
||||
from celery.worker.state import successful_requests
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
#: Format string used to log task receipt.
|
||||
LOG_RECEIVED = """\
|
||||
Task %(name)s[%(id)s] received\
|
||||
"""
|
||||
|
||||
#: Format string used to log task success.
|
||||
LOG_SUCCESS = """\
|
||||
Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s\
|
||||
"""
|
||||
|
||||
#: Format string used to log task failure.
|
||||
LOG_FAILURE = """\
|
||||
Task %(name)s[%(id)s] %(description)s: %(exc)s\
|
||||
"""
|
||||
|
||||
#: Format string used to log task internal error.
|
||||
LOG_INTERNAL_ERROR = """\
|
||||
Task %(name)s[%(id)s] %(description)s: %(exc)s\
|
||||
"""
|
||||
|
||||
#: Format string used to log task ignored.
|
||||
LOG_IGNORED = """\
|
||||
Task %(name)s[%(id)s] %(description)s\
|
||||
"""
|
||||
|
||||
#: Format string used to log task rejected.
|
||||
LOG_REJECTED = """\
|
||||
Task %(name)s[%(id)s] %(exc)s\
|
||||
"""
|
||||
|
||||
#: Format string used to log task retry.
|
||||
LOG_RETRY = """\
|
||||
Task %(name)s[%(id)s] retry: %(exc)s\
|
||||
"""
|
||||
|
||||
log_policy_t = namedtuple(
|
||||
'log_policy_t',
|
||||
('format', 'description', 'severity', 'traceback', 'mail'),
|
||||
)
|
||||
|
||||
log_policy_reject = log_policy_t(LOG_REJECTED, 'rejected', logging.WARN, 1, 1)
|
||||
log_policy_ignore = log_policy_t(LOG_IGNORED, 'ignored', logging.INFO, 0, 0)
|
||||
log_policy_internal = log_policy_t(
|
||||
LOG_INTERNAL_ERROR, 'INTERNAL ERROR', logging.CRITICAL, 1, 1,
|
||||
)
|
||||
log_policy_expected = log_policy_t(
|
||||
LOG_FAILURE, 'raised expected', logging.INFO, 0, 0,
|
||||
)
|
||||
log_policy_unexpected = log_policy_t(
|
||||
LOG_FAILURE, 'raised unexpected', logging.ERROR, 1, 1,
|
||||
)
|
||||
|
||||
send_prerun = signals.task_prerun.send
|
||||
send_postrun = signals.task_postrun.send
|
||||
send_success = signals.task_success.send
|
||||
STARTED = states.STARTED
|
||||
SUCCESS = states.SUCCESS
|
||||
IGNORED = states.IGNORED
|
||||
REJECTED = states.REJECTED
|
||||
RETRY = states.RETRY
|
||||
FAILURE = states.FAILURE
|
||||
EXCEPTION_STATES = states.EXCEPTION_STATES
|
||||
IGNORE_STATES = frozenset({IGNORED, RETRY, REJECTED})
|
||||
|
||||
#: set by :func:`setup_worker_optimizations`
|
||||
_localized = []
|
||||
_patched = {}
|
||||
|
||||
trace_ok_t = namedtuple('trace_ok_t', ('retval', 'info', 'runtime', 'retstr'))
|
||||
|
||||
|
||||
def info(fmt, context):
|
||||
"""Log 'fmt % context' with severity 'INFO'.
|
||||
|
||||
'context' is also passed in extra with key 'data' for custom handlers.
|
||||
"""
|
||||
logger.info(fmt, context, extra={'data': context})
|
||||
|
||||
|
||||
def task_has_custom(task, attr):
|
||||
"""Return true if the task overrides ``attr``."""
|
||||
return mro_lookup(task.__class__, attr, stop={BaseTask, object},
|
||||
monkey_patched=['celery.app.task'])
|
||||
|
||||
|
||||
def get_log_policy(task, einfo, exc):
|
||||
if isinstance(exc, Reject):
|
||||
return log_policy_reject
|
||||
elif isinstance(exc, Ignore):
|
||||
return log_policy_ignore
|
||||
elif einfo.internal:
|
||||
return log_policy_internal
|
||||
else:
|
||||
if task.throws and isinstance(exc, task.throws):
|
||||
return log_policy_expected
|
||||
return log_policy_unexpected
|
||||
|
||||
|
||||
def get_task_name(request, default):
|
||||
"""Use 'shadow' in request for the task name if applicable."""
|
||||
# request.shadow could be None or an empty string.
|
||||
# If so, we should use default.
|
||||
return getattr(request, 'shadow', None) or default
|
||||
|
||||
|
||||
class TraceInfo:
|
||||
"""Information about task execution."""
|
||||
|
||||
__slots__ = ('state', 'retval')
|
||||
|
||||
def __init__(self, state, retval=None):
|
||||
self.state = state
|
||||
self.retval = retval
|
||||
|
||||
def handle_error_state(self, task, req,
|
||||
eager=False, call_errbacks=True):
|
||||
if task.ignore_result:
|
||||
store_errors = task.store_errors_even_if_ignored
|
||||
elif eager and task.store_eager_result:
|
||||
store_errors = True
|
||||
else:
|
||||
store_errors = not eager
|
||||
|
||||
return {
|
||||
RETRY: self.handle_retry,
|
||||
FAILURE: self.handle_failure,
|
||||
}[self.state](task, req,
|
||||
store_errors=store_errors,
|
||||
call_errbacks=call_errbacks)
|
||||
|
||||
def handle_reject(self, task, req, **kwargs):
|
||||
self._log_error(task, req, ExceptionInfo())
|
||||
|
||||
def handle_ignore(self, task, req, **kwargs):
|
||||
self._log_error(task, req, ExceptionInfo())
|
||||
|
||||
def handle_retry(self, task, req, store_errors=True, **kwargs):
|
||||
"""Handle retry exception."""
|
||||
# the exception raised is the Retry semi-predicate,
|
||||
# and it's exc' attribute is the original exception raised (if any).
|
||||
type_, _, tb = sys.exc_info()
|
||||
try:
|
||||
reason = self.retval
|
||||
einfo = ExceptionInfo((type_, reason, tb))
|
||||
if store_errors:
|
||||
task.backend.mark_as_retry(
|
||||
req.id, reason.exc, einfo.traceback, request=req,
|
||||
)
|
||||
task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
|
||||
signals.task_retry.send(sender=task, request=req,
|
||||
reason=reason, einfo=einfo)
|
||||
info(LOG_RETRY, {
|
||||
'id': req.id,
|
||||
'name': get_task_name(req, task.name),
|
||||
'exc': str(reason),
|
||||
})
|
||||
return einfo
|
||||
finally:
|
||||
del tb
|
||||
|
||||
def handle_failure(self, task, req, store_errors=True, call_errbacks=True):
|
||||
"""Handle exception."""
|
||||
orig_exc = self.retval
|
||||
|
||||
exc = get_pickleable_exception(orig_exc)
|
||||
if exc.__traceback__ is None:
|
||||
# `get_pickleable_exception` may have created a new exception without
|
||||
# a traceback.
|
||||
_, _, exc.__traceback__ = sys.exc_info()
|
||||
|
||||
exc_type = get_pickleable_etype(type(orig_exc))
|
||||
|
||||
# make sure we only send pickleable exceptions back to parent.
|
||||
einfo = ExceptionInfo(exc_info=(exc_type, exc, exc.__traceback__))
|
||||
|
||||
task.backend.mark_as_failure(
|
||||
req.id, exc, einfo.traceback,
|
||||
request=req, store_result=store_errors,
|
||||
call_errbacks=call_errbacks,
|
||||
)
|
||||
|
||||
task.on_failure(exc, req.id, req.args, req.kwargs, einfo)
|
||||
signals.task_failure.send(sender=task, task_id=req.id,
|
||||
exception=exc, args=req.args,
|
||||
kwargs=req.kwargs,
|
||||
traceback=exc.__traceback__,
|
||||
einfo=einfo)
|
||||
self._log_error(task, req, einfo)
|
||||
return einfo
|
||||
|
||||
def _log_error(self, task, req, einfo):
|
||||
eobj = einfo.exception = get_pickled_exception(einfo.exception)
|
||||
if isinstance(eobj, ExceptionWithTraceback):
|
||||
eobj = einfo.exception = eobj.exc
|
||||
exception, traceback, exc_info, sargs, skwargs = (
|
||||
safe_repr(eobj),
|
||||
safe_str(einfo.traceback),
|
||||
einfo.exc_info,
|
||||
req.get('argsrepr') or safe_repr(req.args),
|
||||
req.get('kwargsrepr') or safe_repr(req.kwargs),
|
||||
)
|
||||
policy = get_log_policy(task, einfo, eobj)
|
||||
|
||||
context = {
|
||||
'hostname': req.hostname,
|
||||
'id': req.id,
|
||||
'name': get_task_name(req, task.name),
|
||||
'exc': exception,
|
||||
'traceback': traceback,
|
||||
'args': sargs,
|
||||
'kwargs': skwargs,
|
||||
'description': policy.description,
|
||||
'internal': einfo.internal,
|
||||
}
|
||||
|
||||
logger.log(policy.severity, policy.format.strip(), context,
|
||||
exc_info=exc_info if policy.traceback else None,
|
||||
extra={'data': context})
|
||||
|
||||
|
||||
def traceback_clear(exc=None):
|
||||
# Cleared Tb, but einfo still has a reference to Traceback.
|
||||
# exc cleans up the Traceback at the last moment that can be revealed.
|
||||
tb = None
|
||||
if exc is not None:
|
||||
if hasattr(exc, '__traceback__'):
|
||||
tb = exc.__traceback__
|
||||
else:
|
||||
_, _, tb = sys.exc_info()
|
||||
else:
|
||||
_, _, tb = sys.exc_info()
|
||||
|
||||
while tb is not None:
|
||||
try:
|
||||
tb.tb_frame.clear()
|
||||
tb.tb_frame.f_locals
|
||||
except RuntimeError:
|
||||
# Ignore the exception raised if the frame is still executing.
|
||||
pass
|
||||
tb = tb.tb_next
|
||||
|
||||
|
||||
def build_tracer(
|
||||
name: str,
|
||||
task: Union[celery.Task, celery.local.PromiseProxy],
|
||||
loader: Optional[celery.loaders.app.AppLoader] = None,
|
||||
hostname: Optional[str] = None,
|
||||
store_errors: bool = True,
|
||||
Info: Type[TraceInfo] = TraceInfo,
|
||||
eager: bool = False,
|
||||
propagate: bool = False,
|
||||
app: Optional[celery.Celery] = None,
|
||||
monotonic: Callable[[], int] = time.monotonic,
|
||||
trace_ok_t: Type[trace_ok_t] = trace_ok_t,
|
||||
IGNORE_STATES: FrozenSet[str] = IGNORE_STATES) -> \
|
||||
Callable[[str, Tuple[Any, ...], Dict[str, Any], Any], trace_ok_t]:
|
||||
"""Return a function that traces task execution.
|
||||
|
||||
Catches all exceptions and updates result backend with the
|
||||
state and result.
|
||||
|
||||
If the call was successful, it saves the result to the task result
|
||||
backend, and sets the task status to `"SUCCESS"`.
|
||||
|
||||
If the call raises :exc:`~@Retry`, it extracts
|
||||
the original exception, uses that as the result and sets the task state
|
||||
to `"RETRY"`.
|
||||
|
||||
If the call results in an exception, it saves the exception as the task
|
||||
result, and sets the task state to `"FAILURE"`.
|
||||
|
||||
Return a function that takes the following arguments:
|
||||
|
||||
:param uuid: The id of the task.
|
||||
:param args: List of positional args to pass on to the function.
|
||||
:param kwargs: Keyword arguments mapping to pass on to the function.
|
||||
:keyword request: Request dict.
|
||||
|
||||
"""
|
||||
|
||||
# pylint: disable=too-many-statements
|
||||
|
||||
# If the task doesn't define a custom __call__ method
|
||||
# we optimize it away by simply calling the run method directly,
|
||||
# saving the extra method call and a line less in the stack trace.
|
||||
fun = task if task_has_custom(task, '__call__') else task.run
|
||||
|
||||
loader = loader or app.loader
|
||||
ignore_result = task.ignore_result
|
||||
track_started = task.track_started
|
||||
track_started = not eager and (task.track_started and not ignore_result)
|
||||
|
||||
# #6476
|
||||
if eager and not ignore_result and task.store_eager_result:
|
||||
publish_result = True
|
||||
else:
|
||||
publish_result = not eager and not ignore_result
|
||||
|
||||
deduplicate_successful_tasks = ((app.conf.task_acks_late or task.acks_late)
|
||||
and app.conf.worker_deduplicate_successful_tasks
|
||||
and app.backend.persistent)
|
||||
|
||||
hostname = hostname or gethostname()
|
||||
inherit_parent_priority = app.conf.task_inherit_parent_priority
|
||||
|
||||
loader_task_init = loader.on_task_init
|
||||
loader_cleanup = loader.on_process_cleanup
|
||||
|
||||
task_before_start = None
|
||||
task_on_success = None
|
||||
task_after_return = None
|
||||
if task_has_custom(task, 'before_start'):
|
||||
task_before_start = task.before_start
|
||||
if task_has_custom(task, 'on_success'):
|
||||
task_on_success = task.on_success
|
||||
if task_has_custom(task, 'after_return'):
|
||||
task_after_return = task.after_return
|
||||
|
||||
pid = os.getpid()
|
||||
|
||||
request_stack = task.request_stack
|
||||
push_request = request_stack.push
|
||||
pop_request = request_stack.pop
|
||||
push_task = _task_stack.push
|
||||
pop_task = _task_stack.pop
|
||||
_does_info = logger.isEnabledFor(logging.INFO)
|
||||
resultrepr_maxsize = task.resultrepr_maxsize
|
||||
|
||||
prerun_receivers = signals.task_prerun.receivers
|
||||
postrun_receivers = signals.task_postrun.receivers
|
||||
success_receivers = signals.task_success.receivers
|
||||
|
||||
from celery import canvas
|
||||
signature = canvas.maybe_signature # maybe_ does not clone if already
|
||||
|
||||
def on_error(
|
||||
request: celery.app.task.Context,
|
||||
exc: Union[Exception, Type[Exception]],
|
||||
state: str = FAILURE,
|
||||
call_errbacks: bool = True) -> Tuple[Info, Any, Any, Any]:
|
||||
"""Handle any errors raised by a `Task`'s execution."""
|
||||
if propagate:
|
||||
raise
|
||||
I = Info(state, exc)
|
||||
R = I.handle_error_state(
|
||||
task, request, eager=eager, call_errbacks=call_errbacks,
|
||||
)
|
||||
return I, R, I.state, I.retval
|
||||
|
||||
def trace_task(
|
||||
uuid: str,
|
||||
args: Sequence[Any],
|
||||
kwargs: Dict[str, Any],
|
||||
request: Optional[Dict[str, Any]] = None) -> trace_ok_t:
|
||||
"""Execute and trace a `Task`."""
|
||||
|
||||
# R - is the possibly prepared return value.
|
||||
# I - is the Info object.
|
||||
# T - runtime
|
||||
# Rstr - textual representation of return value
|
||||
# retval - is the always unmodified return value.
|
||||
# state - is the resulting task state.
|
||||
|
||||
# This function is very long because we've unrolled all the calls
|
||||
# for performance reasons, and because the function is so long
|
||||
# we want the main variables (I, and R) to stand out visually from the
|
||||
# the rest of the variables, so breaking PEP8 is worth it ;)
|
||||
R = I = T = Rstr = retval = state = None
|
||||
task_request = None
|
||||
time_start = monotonic()
|
||||
try:
|
||||
try:
|
||||
kwargs.items
|
||||
except AttributeError:
|
||||
raise InvalidTaskError(
|
||||
'Task keyword arguments is not a mapping')
|
||||
|
||||
task_request = Context(request or {}, args=args,
|
||||
called_directly=False, kwargs=kwargs)
|
||||
|
||||
redelivered = (task_request.delivery_info
|
||||
and task_request.delivery_info.get('redelivered', False))
|
||||
if deduplicate_successful_tasks and redelivered:
|
||||
if task_request.id in successful_requests:
|
||||
return trace_ok_t(R, I, T, Rstr)
|
||||
r = AsyncResult(task_request.id, app=app)
|
||||
|
||||
try:
|
||||
state = r.state
|
||||
except BackendGetMetaError:
|
||||
pass
|
||||
else:
|
||||
if state == SUCCESS:
|
||||
info(LOG_IGNORED, {
|
||||
'id': task_request.id,
|
||||
'name': get_task_name(task_request, name),
|
||||
'description': 'Task already completed successfully.'
|
||||
})
|
||||
return trace_ok_t(R, I, T, Rstr)
|
||||
|
||||
push_task(task)
|
||||
root_id = task_request.root_id or uuid
|
||||
task_priority = task_request.delivery_info.get('priority') if \
|
||||
inherit_parent_priority else None
|
||||
push_request(task_request)
|
||||
try:
|
||||
# -*- PRE -*-
|
||||
if prerun_receivers:
|
||||
send_prerun(sender=task, task_id=uuid, task=task,
|
||||
args=args, kwargs=kwargs)
|
||||
loader_task_init(uuid, task)
|
||||
if track_started:
|
||||
task.backend.store_result(
|
||||
uuid, {'pid': pid, 'hostname': hostname}, STARTED,
|
||||
request=task_request,
|
||||
)
|
||||
|
||||
# -*- TRACE -*-
|
||||
try:
|
||||
if task_before_start:
|
||||
task_before_start(uuid, args, kwargs)
|
||||
|
||||
R = retval = fun(*args, **kwargs)
|
||||
state = SUCCESS
|
||||
except Reject as exc:
|
||||
I, R = Info(REJECTED, exc), ExceptionInfo(internal=True)
|
||||
state, retval = I.state, I.retval
|
||||
I.handle_reject(task, task_request)
|
||||
traceback_clear(exc)
|
||||
except Ignore as exc:
|
||||
I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
|
||||
state, retval = I.state, I.retval
|
||||
I.handle_ignore(task, task_request)
|
||||
traceback_clear(exc)
|
||||
except Retry as exc:
|
||||
I, R, state, retval = on_error(
|
||||
task_request, exc, RETRY, call_errbacks=False)
|
||||
traceback_clear(exc)
|
||||
except Exception as exc:
|
||||
I, R, state, retval = on_error(task_request, exc)
|
||||
traceback_clear(exc)
|
||||
except BaseException:
|
||||
raise
|
||||
else:
|
||||
try:
|
||||
# callback tasks must be applied before the result is
|
||||
# stored, so that result.children is populated.
|
||||
|
||||
# groups are called inline and will store trail
|
||||
# separately, so need to call them separately
|
||||
# so that the trail's not added multiple times :(
|
||||
# (Issue #1936)
|
||||
callbacks = task.request.callbacks
|
||||
if callbacks:
|
||||
if len(task.request.callbacks) > 1:
|
||||
sigs, groups = [], []
|
||||
for sig in callbacks:
|
||||
sig = signature(sig, app=app)
|
||||
if isinstance(sig, group):
|
||||
groups.append(sig)
|
||||
else:
|
||||
sigs.append(sig)
|
||||
for group_ in groups:
|
||||
group_.apply_async(
|
||||
(retval,),
|
||||
parent_id=uuid, root_id=root_id,
|
||||
priority=task_priority
|
||||
)
|
||||
if sigs:
|
||||
group(sigs, app=app).apply_async(
|
||||
(retval,),
|
||||
parent_id=uuid, root_id=root_id,
|
||||
priority=task_priority
|
||||
)
|
||||
else:
|
||||
signature(callbacks[0], app=app).apply_async(
|
||||
(retval,), parent_id=uuid, root_id=root_id,
|
||||
priority=task_priority
|
||||
)
|
||||
|
||||
# execute first task in chain
|
||||
chain = task_request.chain
|
||||
if chain:
|
||||
_chsig = signature(chain.pop(), app=app)
|
||||
_chsig.apply_async(
|
||||
(retval,), chain=chain,
|
||||
parent_id=uuid, root_id=root_id,
|
||||
priority=task_priority
|
||||
)
|
||||
task.backend.mark_as_done(
|
||||
uuid, retval, task_request, publish_result,
|
||||
)
|
||||
except EncodeError as exc:
|
||||
I, R, state, retval = on_error(task_request, exc)
|
||||
else:
|
||||
Rstr = saferepr(R, resultrepr_maxsize)
|
||||
T = monotonic() - time_start
|
||||
if task_on_success:
|
||||
task_on_success(retval, uuid, args, kwargs)
|
||||
if success_receivers:
|
||||
send_success(sender=task, result=retval)
|
||||
if _does_info:
|
||||
info(LOG_SUCCESS, {
|
||||
'id': uuid,
|
||||
'name': get_task_name(task_request, name),
|
||||
'return_value': Rstr,
|
||||
'runtime': T,
|
||||
'args': task_request.get('argsrepr') or safe_repr(args),
|
||||
'kwargs': task_request.get('kwargsrepr') or safe_repr(kwargs),
|
||||
})
|
||||
|
||||
# -* POST *-
|
||||
if state not in IGNORE_STATES:
|
||||
if task_after_return:
|
||||
task_after_return(
|
||||
state, retval, uuid, args, kwargs, None,
|
||||
)
|
||||
finally:
|
||||
try:
|
||||
if postrun_receivers:
|
||||
send_postrun(sender=task, task_id=uuid, task=task,
|
||||
args=args, kwargs=kwargs,
|
||||
retval=retval, state=state)
|
||||
finally:
|
||||
pop_task()
|
||||
pop_request()
|
||||
if not eager:
|
||||
try:
|
||||
task.backend.process_cleanup()
|
||||
loader_cleanup()
|
||||
except (KeyboardInterrupt, SystemExit, MemoryError):
|
||||
raise
|
||||
except Exception as exc:
|
||||
logger.error('Process cleanup failed: %r', exc,
|
||||
exc_info=True)
|
||||
except MemoryError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
_signal_internal_error(task, uuid, args, kwargs, request, exc)
|
||||
if eager:
|
||||
raise
|
||||
R = report_internal_error(task, exc)
|
||||
if task_request is not None:
|
||||
I, _, _, _ = on_error(task_request, exc)
|
||||
return trace_ok_t(R, I, T, Rstr)
|
||||
|
||||
return trace_task
|
||||
|
||||
|
||||
def trace_task(task, uuid, args, kwargs, request=None, **opts):
|
||||
"""Trace task execution."""
|
||||
request = {} if not request else request
|
||||
try:
|
||||
if task.__trace__ is None:
|
||||
task.__trace__ = build_tracer(task.name, task, **opts)
|
||||
return task.__trace__(uuid, args, kwargs, request)
|
||||
except Exception as exc:
|
||||
_signal_internal_error(task, uuid, args, kwargs, request, exc)
|
||||
return trace_ok_t(report_internal_error(task, exc), TraceInfo(FAILURE, exc), 0.0, None)
|
||||
|
||||
|
||||
def _signal_internal_error(task, uuid, args, kwargs, request, exc):
|
||||
"""Send a special `internal_error` signal to the app for outside body errors."""
|
||||
try:
|
||||
_, _, tb = sys.exc_info()
|
||||
einfo = ExceptionInfo()
|
||||
einfo.exception = get_pickleable_exception(einfo.exception)
|
||||
einfo.type = get_pickleable_etype(einfo.type)
|
||||
signals.task_internal_error.send(
|
||||
sender=task,
|
||||
task_id=uuid,
|
||||
args=args,
|
||||
kwargs=kwargs,
|
||||
request=request,
|
||||
exception=exc,
|
||||
traceback=tb,
|
||||
einfo=einfo,
|
||||
)
|
||||
finally:
|
||||
del tb
|
||||
|
||||
|
||||
def trace_task_ret(name, uuid, request, body, content_type,
|
||||
content_encoding, loads=loads_message, app=None,
|
||||
**extra_request):
|
||||
app = app or current_app._get_current_object()
|
||||
embed = None
|
||||
if content_type:
|
||||
accept = prepare_accept_content(app.conf.accept_content)
|
||||
args, kwargs, embed = loads(
|
||||
body, content_type, content_encoding, accept=accept,
|
||||
)
|
||||
else:
|
||||
args, kwargs, embed = body
|
||||
hostname = gethostname()
|
||||
request.update({
|
||||
'args': args, 'kwargs': kwargs,
|
||||
'hostname': hostname, 'is_eager': False,
|
||||
}, **embed or {})
|
||||
R, I, T, Rstr = trace_task(app.tasks[name],
|
||||
uuid, args, kwargs, request, app=app)
|
||||
return (1, R, T) if I else (0, Rstr, T)
|
||||
|
||||
|
||||
def fast_trace_task(task, uuid, request, body, content_type,
|
||||
content_encoding, loads=loads_message, _loc=None,
|
||||
hostname=None, **_):
|
||||
_loc = _localized if not _loc else _loc
|
||||
embed = None
|
||||
tasks, accept, hostname = _loc
|
||||
if content_type:
|
||||
args, kwargs, embed = loads(
|
||||
body, content_type, content_encoding, accept=accept,
|
||||
)
|
||||
else:
|
||||
args, kwargs, embed = body
|
||||
request.update({
|
||||
'args': args, 'kwargs': kwargs,
|
||||
'hostname': hostname, 'is_eager': False,
|
||||
}, **embed or {})
|
||||
R, I, T, Rstr = tasks[task].__trace__(
|
||||
uuid, args, kwargs, request,
|
||||
)
|
||||
return (1, R, T) if I else (0, Rstr, T)
|
||||
|
||||
|
||||
def report_internal_error(task, exc):
|
||||
_type, _value, _tb = sys.exc_info()
|
||||
try:
|
||||
_value = task.backend.prepare_exception(exc, 'pickle')
|
||||
exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
|
||||
warn(RuntimeWarning(
|
||||
'Exception raised outside body: {!r}:\n{}'.format(
|
||||
exc, exc_info.traceback)))
|
||||
return exc_info
|
||||
finally:
|
||||
del _tb
|
||||
|
||||
|
||||
def setup_worker_optimizations(app, hostname=None):
|
||||
"""Setup worker related optimizations."""
|
||||
hostname = hostname or gethostname()
|
||||
|
||||
# make sure custom Task.__call__ methods that calls super
|
||||
# won't mess up the request/task stack.
|
||||
_install_stack_protection()
|
||||
|
||||
# all new threads start without a current app, so if an app is not
|
||||
# passed on to the thread it will fall back to the "default app",
|
||||
# which then could be the wrong app. So for the worker
|
||||
# we set this to always return our app. This is a hack,
|
||||
# and means that only a single app can be used for workers
|
||||
# running in the same process.
|
||||
app.set_current()
|
||||
app.set_default()
|
||||
|
||||
# evaluate all task classes by finalizing the app.
|
||||
app.finalize()
|
||||
|
||||
# set fast shortcut to task registry
|
||||
_localized[:] = [
|
||||
app._tasks,
|
||||
prepare_accept_content(app.conf.accept_content),
|
||||
hostname,
|
||||
]
|
||||
|
||||
app.use_fast_trace_task = True
|
||||
|
||||
|
||||
def reset_worker_optimizations(app=current_app):
|
||||
"""Reset previously configured optimizations."""
|
||||
try:
|
||||
delattr(BaseTask, '_stackprotected')
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
BaseTask.__call__ = _patched.pop('BaseTask.__call__')
|
||||
except KeyError:
|
||||
pass
|
||||
app.use_fast_trace_task = False
|
||||
|
||||
|
||||
def _install_stack_protection():
|
||||
# Patches BaseTask.__call__ in the worker to handle the edge case
|
||||
# where people override it and also call super.
|
||||
#
|
||||
# - The worker optimizes away BaseTask.__call__ and instead
|
||||
# calls task.run directly.
|
||||
# - so with the addition of current_task and the request stack
|
||||
# BaseTask.__call__ now pushes to those stacks so that
|
||||
# they work when tasks are called directly.
|
||||
#
|
||||
# The worker only optimizes away __call__ in the case
|
||||
# where it hasn't been overridden, so the request/task stack
|
||||
# will blow if a custom task class defines __call__ and also
|
||||
# calls super().
|
||||
if not getattr(BaseTask, '_stackprotected', False):
|
||||
_patched['BaseTask.__call__'] = orig = BaseTask.__call__
|
||||
|
||||
def __protected_call__(self, *args, **kwargs):
|
||||
stack = self.request_stack
|
||||
req = stack.top
|
||||
if req and not req._protected and \
|
||||
len(stack) == 1 and not req.called_directly:
|
||||
req._protected = 1
|
||||
return self.run(*args, **kwargs)
|
||||
return orig(self, *args, **kwargs)
|
||||
BaseTask.__call__ = __protected_call__
|
||||
BaseTask._stackprotected = True
|
||||
415
gnx-react/venv/lib/python3.12/site-packages/celery/app/utils.py
Normal file
415
gnx-react/venv/lib/python3.12/site-packages/celery/app/utils.py
Normal file
@@ -0,0 +1,415 @@
|
||||
"""App utilities: Compat settings, bug-report tool, pickling apps."""
|
||||
import os
|
||||
import platform as _platform
|
||||
import re
|
||||
from collections import namedtuple
|
||||
from collections.abc import Mapping
|
||||
from copy import deepcopy
|
||||
from types import ModuleType
|
||||
|
||||
from kombu.utils.url import maybe_sanitize_url
|
||||
|
||||
from celery.exceptions import ImproperlyConfigured
|
||||
from celery.platforms import pyimplementation
|
||||
from celery.utils.collections import ConfigurationView
|
||||
from celery.utils.imports import import_from_cwd, qualname, symbol_by_name
|
||||
from celery.utils.text import pretty
|
||||
|
||||
from .defaults import _OLD_DEFAULTS, _OLD_SETTING_KEYS, _TO_NEW_KEY, _TO_OLD_KEY, DEFAULTS, SETTING_KEYS, find
|
||||
|
||||
__all__ = (
|
||||
'Settings', 'appstr', 'bugreport',
|
||||
'filter_hidden_settings', 'find_app',
|
||||
)
|
||||
|
||||
#: Format used to generate bug-report information.
|
||||
BUGREPORT_INFO = """
|
||||
software -> celery:{celery_v} kombu:{kombu_v} py:{py_v}
|
||||
billiard:{billiard_v} {driver_v}
|
||||
platform -> system:{system} arch:{arch}
|
||||
kernel version:{kernel_version} imp:{py_i}
|
||||
loader -> {loader}
|
||||
settings -> transport:{transport} results:{results}
|
||||
|
||||
{human_settings}
|
||||
"""
|
||||
|
||||
HIDDEN_SETTINGS = re.compile(
|
||||
'API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE|DATABASE',
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
E_MIX_OLD_INTO_NEW = """
|
||||
|
||||
Cannot mix new and old setting keys, please rename the
|
||||
following settings to the new format:
|
||||
|
||||
{renames}
|
||||
|
||||
"""
|
||||
|
||||
E_MIX_NEW_INTO_OLD = """
|
||||
|
||||
Cannot mix new setting names with old setting names, please
|
||||
rename the following settings to use the old format:
|
||||
|
||||
{renames}
|
||||
|
||||
Or change all of the settings to use the new format :)
|
||||
|
||||
"""
|
||||
|
||||
FMT_REPLACE_SETTING = '{replace:<36} -> {with_}'
|
||||
|
||||
|
||||
def appstr(app):
|
||||
"""String used in __repr__ etc, to id app instances."""
|
||||
return f'{app.main or "__main__"} at {id(app):#x}'
|
||||
|
||||
|
||||
class Settings(ConfigurationView):
|
||||
"""Celery settings object.
|
||||
|
||||
.. seealso:
|
||||
|
||||
:ref:`configuration` for a full list of configuration keys.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *args, deprecated_settings=None, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.deprecated_settings = deprecated_settings
|
||||
|
||||
@property
|
||||
def broker_read_url(self):
|
||||
return (
|
||||
os.environ.get('CELERY_BROKER_READ_URL') or
|
||||
self.get('broker_read_url') or
|
||||
self.broker_url
|
||||
)
|
||||
|
||||
@property
|
||||
def broker_write_url(self):
|
||||
return (
|
||||
os.environ.get('CELERY_BROKER_WRITE_URL') or
|
||||
self.get('broker_write_url') or
|
||||
self.broker_url
|
||||
)
|
||||
|
||||
@property
|
||||
def broker_url(self):
|
||||
return (
|
||||
os.environ.get('CELERY_BROKER_URL') or
|
||||
self.first('broker_url', 'broker_host')
|
||||
)
|
||||
|
||||
@property
|
||||
def result_backend(self):
|
||||
return (
|
||||
os.environ.get('CELERY_RESULT_BACKEND') or
|
||||
self.first('result_backend', 'CELERY_RESULT_BACKEND')
|
||||
)
|
||||
|
||||
@property
|
||||
def task_default_exchange(self):
|
||||
return self.first(
|
||||
'task_default_exchange',
|
||||
'task_default_queue',
|
||||
)
|
||||
|
||||
@property
|
||||
def task_default_routing_key(self):
|
||||
return self.first(
|
||||
'task_default_routing_key',
|
||||
'task_default_queue',
|
||||
)
|
||||
|
||||
@property
|
||||
def timezone(self):
|
||||
# this way we also support django's time zone.
|
||||
return self.first('timezone', 'TIME_ZONE')
|
||||
|
||||
def without_defaults(self):
|
||||
"""Return the current configuration, but without defaults."""
|
||||
# the last stash is the default settings, so just skip that
|
||||
return Settings({}, self.maps[:-1])
|
||||
|
||||
def value_set_for(self, key):
|
||||
return key in self.without_defaults()
|
||||
|
||||
def find_option(self, name, namespace=''):
|
||||
"""Search for option by name.
|
||||
|
||||
Example:
|
||||
>>> from proj.celery import app
|
||||
>>> app.conf.find_option('disable_rate_limits')
|
||||
('worker', 'prefetch_multiplier',
|
||||
<Option: type->bool default->False>))
|
||||
|
||||
Arguments:
|
||||
name (str): Name of option, cannot be partial.
|
||||
namespace (str): Preferred name-space (``None`` by default).
|
||||
Returns:
|
||||
Tuple: of ``(namespace, key, type)``.
|
||||
"""
|
||||
return find(name, namespace)
|
||||
|
||||
def find_value_for_key(self, name, namespace='celery'):
|
||||
"""Shortcut to ``get_by_parts(*find_option(name)[:-1])``."""
|
||||
return self.get_by_parts(*self.find_option(name, namespace)[:-1])
|
||||
|
||||
def get_by_parts(self, *parts):
|
||||
"""Return the current value for setting specified as a path.
|
||||
|
||||
Example:
|
||||
>>> from proj.celery import app
|
||||
>>> app.conf.get_by_parts('worker', 'disable_rate_limits')
|
||||
False
|
||||
"""
|
||||
return self['_'.join(part for part in parts if part)]
|
||||
|
||||
def finalize(self):
|
||||
# See PendingConfiguration in celery/app/base.py
|
||||
# first access will read actual configuration.
|
||||
try:
|
||||
self['__bogus__']
|
||||
except KeyError:
|
||||
pass
|
||||
return self
|
||||
|
||||
def table(self, with_defaults=False, censored=True):
|
||||
filt = filter_hidden_settings if censored else lambda v: v
|
||||
dict_members = dir(dict)
|
||||
self.finalize()
|
||||
settings = self if with_defaults else self.without_defaults()
|
||||
return filt({
|
||||
k: v for k, v in settings.items()
|
||||
if not k.startswith('_') and k not in dict_members
|
||||
})
|
||||
|
||||
def humanize(self, with_defaults=False, censored=True):
|
||||
"""Return a human readable text showing configuration changes."""
|
||||
return '\n'.join(
|
||||
f'{key}: {pretty(value, width=50)}'
|
||||
for key, value in self.table(with_defaults, censored).items())
|
||||
|
||||
def maybe_warn_deprecated_settings(self):
|
||||
# TODO: Remove this method in Celery 6.0
|
||||
if self.deprecated_settings:
|
||||
from celery.app.defaults import _TO_NEW_KEY
|
||||
from celery.utils import deprecated
|
||||
for setting in self.deprecated_settings:
|
||||
deprecated.warn(description=f'The {setting!r} setting',
|
||||
removal='6.0.0',
|
||||
alternative=f'Use the {_TO_NEW_KEY[setting]} instead')
|
||||
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _new_key_to_old(key, convert=_TO_OLD_KEY.get):
|
||||
return convert(key, key)
|
||||
|
||||
|
||||
def _old_key_to_new(key, convert=_TO_NEW_KEY.get):
|
||||
return convert(key, key)
|
||||
|
||||
|
||||
_settings_info_t = namedtuple('settings_info_t', (
|
||||
'defaults', 'convert', 'key_t', 'mix_error',
|
||||
))
|
||||
|
||||
_settings_info = _settings_info_t(
|
||||
DEFAULTS, _TO_NEW_KEY, _old_key_to_new, E_MIX_OLD_INTO_NEW,
|
||||
)
|
||||
_old_settings_info = _settings_info_t(
|
||||
_OLD_DEFAULTS, _TO_OLD_KEY, _new_key_to_old, E_MIX_NEW_INTO_OLD,
|
||||
)
|
||||
|
||||
|
||||
def detect_settings(conf, preconf=None, ignore_keys=None, prefix=None,
|
||||
all_keys=None, old_keys=None):
|
||||
preconf = {} if not preconf else preconf
|
||||
ignore_keys = set() if not ignore_keys else ignore_keys
|
||||
all_keys = SETTING_KEYS if not all_keys else all_keys
|
||||
old_keys = _OLD_SETTING_KEYS if not old_keys else old_keys
|
||||
|
||||
source = conf
|
||||
if conf is None:
|
||||
source, conf = preconf, {}
|
||||
have = set(source.keys()) - ignore_keys
|
||||
is_in_new = have.intersection(all_keys)
|
||||
is_in_old = have.intersection(old_keys)
|
||||
|
||||
info = None
|
||||
if is_in_new:
|
||||
# have new setting names
|
||||
info, left = _settings_info, is_in_old
|
||||
if is_in_old and len(is_in_old) > len(is_in_new):
|
||||
# Majority of the settings are old.
|
||||
info, left = _old_settings_info, is_in_new
|
||||
if is_in_old:
|
||||
# have old setting names, or a majority of the names are old.
|
||||
if not info:
|
||||
info, left = _old_settings_info, is_in_new
|
||||
if is_in_new and len(is_in_new) > len(is_in_old):
|
||||
# Majority of the settings are new
|
||||
info, left = _settings_info, is_in_old
|
||||
else:
|
||||
# no settings, just use new format.
|
||||
info, left = _settings_info, is_in_old
|
||||
|
||||
if prefix:
|
||||
# always use new format if prefix is used.
|
||||
info, left = _settings_info, set()
|
||||
|
||||
# only raise error for keys that the user didn't provide two keys
|
||||
# for (e.g., both ``result_expires`` and ``CELERY_TASK_RESULT_EXPIRES``).
|
||||
really_left = {key for key in left if info.convert[key] not in have}
|
||||
if really_left:
|
||||
# user is mixing old/new, or new/old settings, give renaming
|
||||
# suggestions.
|
||||
raise ImproperlyConfigured(info.mix_error.format(renames='\n'.join(
|
||||
FMT_REPLACE_SETTING.format(replace=key, with_=info.convert[key])
|
||||
for key in sorted(really_left)
|
||||
)))
|
||||
|
||||
preconf = {info.convert.get(k, k): v for k, v in preconf.items()}
|
||||
defaults = dict(deepcopy(info.defaults), **preconf)
|
||||
return Settings(
|
||||
preconf, [conf, defaults],
|
||||
(_old_key_to_new, _new_key_to_old),
|
||||
deprecated_settings=is_in_old,
|
||||
prefix=prefix,
|
||||
)
|
||||
|
||||
|
||||
class AppPickler:
|
||||
"""Old application pickler/unpickler (< 3.1)."""
|
||||
|
||||
def __call__(self, cls, *args):
|
||||
kwargs = self.build_kwargs(*args)
|
||||
app = self.construct(cls, **kwargs)
|
||||
self.prepare(app, **kwargs)
|
||||
return app
|
||||
|
||||
def prepare(self, app, **kwargs):
|
||||
app.conf.update(kwargs['changes'])
|
||||
|
||||
def build_kwargs(self, *args):
|
||||
return self.build_standard_kwargs(*args)
|
||||
|
||||
def build_standard_kwargs(self, main, changes, loader, backend, amqp,
|
||||
events, log, control, accept_magic_kwargs,
|
||||
config_source=None):
|
||||
return {'main': main, 'loader': loader, 'backend': backend,
|
||||
'amqp': amqp, 'changes': changes, 'events': events,
|
||||
'log': log, 'control': control, 'set_as_current': False,
|
||||
'config_source': config_source}
|
||||
|
||||
def construct(self, cls, **kwargs):
|
||||
return cls(**kwargs)
|
||||
|
||||
|
||||
def _unpickle_app(cls, pickler, *args):
|
||||
"""Rebuild app for versions 2.5+."""
|
||||
return pickler()(cls, *args)
|
||||
|
||||
|
||||
def _unpickle_app_v2(cls, kwargs):
|
||||
"""Rebuild app for versions 3.1+."""
|
||||
kwargs['set_as_current'] = False
|
||||
return cls(**kwargs)
|
||||
|
||||
|
||||
def filter_hidden_settings(conf):
|
||||
"""Filter sensitive settings."""
|
||||
def maybe_censor(key, value, mask='*' * 8):
|
||||
if isinstance(value, Mapping):
|
||||
return filter_hidden_settings(value)
|
||||
if isinstance(key, str):
|
||||
if HIDDEN_SETTINGS.search(key):
|
||||
return mask
|
||||
elif 'broker_url' in key.lower():
|
||||
from kombu import Connection
|
||||
return Connection(value).as_uri(mask=mask)
|
||||
elif 'backend' in key.lower():
|
||||
return maybe_sanitize_url(value, mask=mask)
|
||||
|
||||
return value
|
||||
|
||||
return {k: maybe_censor(k, v) for k, v in conf.items()}
|
||||
|
||||
|
||||
def bugreport(app):
|
||||
"""Return a string containing information useful in bug-reports."""
|
||||
import billiard
|
||||
import kombu
|
||||
|
||||
import celery
|
||||
|
||||
try:
|
||||
conn = app.connection()
|
||||
driver_v = '{}:{}'.format(conn.transport.driver_name,
|
||||
conn.transport.driver_version())
|
||||
transport = conn.transport_cls
|
||||
except Exception: # pylint: disable=broad-except
|
||||
transport = driver_v = ''
|
||||
|
||||
return BUGREPORT_INFO.format(
|
||||
system=_platform.system(),
|
||||
arch=', '.join(x for x in _platform.architecture() if x),
|
||||
kernel_version=_platform.release(),
|
||||
py_i=pyimplementation(),
|
||||
celery_v=celery.VERSION_BANNER,
|
||||
kombu_v=kombu.__version__,
|
||||
billiard_v=billiard.__version__,
|
||||
py_v=_platform.python_version(),
|
||||
driver_v=driver_v,
|
||||
transport=transport,
|
||||
results=maybe_sanitize_url(app.conf.result_backend or 'disabled'),
|
||||
human_settings=app.conf.humanize(),
|
||||
loader=qualname(app.loader.__class__),
|
||||
)
|
||||
|
||||
|
||||
def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd):
|
||||
"""Find app by name."""
|
||||
from .base import Celery
|
||||
|
||||
try:
|
||||
sym = symbol_by_name(app, imp=imp)
|
||||
except AttributeError:
|
||||
# last part was not an attribute, but a module
|
||||
sym = imp(app)
|
||||
if isinstance(sym, ModuleType) and ':' not in app:
|
||||
try:
|
||||
found = sym.app
|
||||
if isinstance(found, ModuleType):
|
||||
raise AttributeError()
|
||||
except AttributeError:
|
||||
try:
|
||||
found = sym.celery
|
||||
if isinstance(found, ModuleType):
|
||||
raise AttributeError(
|
||||
"attribute 'celery' is the celery module not the instance of celery")
|
||||
except AttributeError:
|
||||
if getattr(sym, '__path__', None):
|
||||
try:
|
||||
return find_app(
|
||||
f'{app}.celery',
|
||||
symbol_by_name=symbol_by_name, imp=imp,
|
||||
)
|
||||
except ImportError:
|
||||
pass
|
||||
for suspect in vars(sym).values():
|
||||
if isinstance(suspect, Celery):
|
||||
return suspect
|
||||
raise
|
||||
else:
|
||||
return found
|
||||
else:
|
||||
return found
|
||||
return sym
|
||||
Reference in New Issue
Block a user