update
This commit is contained in:
@@ -0,0 +1,15 @@
|
||||
"""Monitoring Event Receiver+Dispatcher.
|
||||
|
||||
Events is a stream of messages sent for certain actions occurring
|
||||
in the worker (and clients if :setting:`task_send_sent_event`
|
||||
is enabled), used for monitoring purposes.
|
||||
"""
|
||||
|
||||
from .dispatcher import EventDispatcher
|
||||
from .event import Event, event_exchange, get_exchange, group_from
|
||||
from .receiver import EventReceiver
|
||||
|
||||
__all__ = (
|
||||
'Event', 'EventDispatcher', 'EventReceiver',
|
||||
'event_exchange', 'get_exchange', 'group_from',
|
||||
)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,534 @@
|
||||
"""Graphical monitor of Celery events using curses."""
|
||||
|
||||
import curses
|
||||
import sys
|
||||
import threading
|
||||
from datetime import datetime
|
||||
from itertools import count
|
||||
from math import ceil
|
||||
from textwrap import wrap
|
||||
from time import time
|
||||
|
||||
from celery import VERSION_BANNER, states
|
||||
from celery.app import app_or_default
|
||||
from celery.utils.text import abbr, abbrtask
|
||||
|
||||
__all__ = ('CursesMonitor', 'evtop')
|
||||
|
||||
BORDER_SPACING = 4
|
||||
LEFT_BORDER_OFFSET = 3
|
||||
UUID_WIDTH = 36
|
||||
STATE_WIDTH = 8
|
||||
TIMESTAMP_WIDTH = 8
|
||||
MIN_WORKER_WIDTH = 15
|
||||
MIN_TASK_WIDTH = 16
|
||||
|
||||
# this module is considered experimental
|
||||
# we don't care about coverage.
|
||||
|
||||
STATUS_SCREEN = """\
|
||||
events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all}
|
||||
"""
|
||||
|
||||
|
||||
class CursesMonitor: # pragma: no cover
|
||||
"""A curses based Celery task monitor."""
|
||||
|
||||
keymap = {}
|
||||
win = None
|
||||
screen_delay = 10
|
||||
selected_task = None
|
||||
selected_position = 0
|
||||
selected_str = 'Selected: '
|
||||
foreground = curses.COLOR_BLACK
|
||||
background = curses.COLOR_WHITE
|
||||
online_str = 'Workers online: '
|
||||
help_title = 'Keys: '
|
||||
help = ('j:down k:up i:info t:traceback r:result c:revoke ^c: quit')
|
||||
greet = f'celery events {VERSION_BANNER}'
|
||||
info_str = 'Info: '
|
||||
|
||||
def __init__(self, state, app, keymap=None):
|
||||
self.app = app
|
||||
self.keymap = keymap or self.keymap
|
||||
self.state = state
|
||||
default_keymap = {
|
||||
'J': self.move_selection_down,
|
||||
'K': self.move_selection_up,
|
||||
'C': self.revoke_selection,
|
||||
'T': self.selection_traceback,
|
||||
'R': self.selection_result,
|
||||
'I': self.selection_info,
|
||||
'L': self.selection_rate_limit,
|
||||
}
|
||||
self.keymap = dict(default_keymap, **self.keymap)
|
||||
self.lock = threading.RLock()
|
||||
|
||||
def format_row(self, uuid, task, worker, timestamp, state):
|
||||
mx = self.display_width
|
||||
|
||||
# include spacing
|
||||
detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH
|
||||
uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH
|
||||
|
||||
if uuid_space < UUID_WIDTH:
|
||||
uuid_width = uuid_space
|
||||
else:
|
||||
uuid_width = UUID_WIDTH
|
||||
|
||||
detail_width = detail_width - uuid_width - 1
|
||||
task_width = int(ceil(detail_width / 2.0))
|
||||
worker_width = detail_width - task_width - 1
|
||||
|
||||
uuid = abbr(uuid, uuid_width).ljust(uuid_width)
|
||||
worker = abbr(worker, worker_width).ljust(worker_width)
|
||||
task = abbrtask(task, task_width).ljust(task_width)
|
||||
state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH)
|
||||
timestamp = timestamp.ljust(TIMESTAMP_WIDTH)
|
||||
|
||||
row = f'{uuid} {worker} {task} {timestamp} {state} '
|
||||
if self.screen_width is None:
|
||||
self.screen_width = len(row[:mx])
|
||||
return row[:mx]
|
||||
|
||||
@property
|
||||
def screen_width(self):
|
||||
_, mx = self.win.getmaxyx()
|
||||
return mx
|
||||
|
||||
@property
|
||||
def screen_height(self):
|
||||
my, _ = self.win.getmaxyx()
|
||||
return my
|
||||
|
||||
@property
|
||||
def display_width(self):
|
||||
_, mx = self.win.getmaxyx()
|
||||
return mx - BORDER_SPACING
|
||||
|
||||
@property
|
||||
def display_height(self):
|
||||
my, _ = self.win.getmaxyx()
|
||||
return my - 10
|
||||
|
||||
@property
|
||||
def limit(self):
|
||||
return self.display_height
|
||||
|
||||
def find_position(self):
|
||||
if not self.tasks:
|
||||
return 0
|
||||
for i, e in enumerate(self.tasks):
|
||||
if self.selected_task == e[0]:
|
||||
return i
|
||||
return 0
|
||||
|
||||
def move_selection_up(self):
|
||||
self.move_selection(-1)
|
||||
|
||||
def move_selection_down(self):
|
||||
self.move_selection(1)
|
||||
|
||||
def move_selection(self, direction=1):
|
||||
if not self.tasks:
|
||||
return
|
||||
pos = self.find_position()
|
||||
try:
|
||||
self.selected_task = self.tasks[pos + direction][0]
|
||||
except IndexError:
|
||||
self.selected_task = self.tasks[0][0]
|
||||
|
||||
keyalias = {curses.KEY_DOWN: 'J',
|
||||
curses.KEY_UP: 'K',
|
||||
curses.KEY_ENTER: 'I'}
|
||||
|
||||
def handle_keypress(self):
|
||||
try:
|
||||
key = self.win.getkey().upper()
|
||||
except Exception: # pylint: disable=broad-except
|
||||
return
|
||||
key = self.keyalias.get(key) or key
|
||||
handler = self.keymap.get(key)
|
||||
if handler is not None:
|
||||
handler()
|
||||
|
||||
def alert(self, callback, title=None):
|
||||
self.win.erase()
|
||||
my, mx = self.win.getmaxyx()
|
||||
y = blank_line = count(2)
|
||||
if title:
|
||||
self.win.addstr(next(y), 3, title,
|
||||
curses.A_BOLD | curses.A_UNDERLINE)
|
||||
next(blank_line)
|
||||
callback(my, mx, next(y))
|
||||
self.win.addstr(my - 1, 0, 'Press any key to continue...',
|
||||
curses.A_BOLD)
|
||||
self.win.refresh()
|
||||
while 1:
|
||||
try:
|
||||
return self.win.getkey().upper()
|
||||
except Exception: # pylint: disable=broad-except
|
||||
pass
|
||||
|
||||
def selection_rate_limit(self):
|
||||
if not self.selected_task:
|
||||
return curses.beep()
|
||||
task = self.state.tasks[self.selected_task]
|
||||
if not task.name:
|
||||
return curses.beep()
|
||||
|
||||
my, mx = self.win.getmaxyx()
|
||||
r = 'New rate limit: '
|
||||
self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE)
|
||||
self.win.addstr(my - 2, len(r) + 3, ' ' * (mx - len(r)))
|
||||
rlimit = self.readline(my - 2, 3 + len(r))
|
||||
|
||||
if rlimit:
|
||||
reply = self.app.control.rate_limit(task.name,
|
||||
rlimit.strip(), reply=True)
|
||||
self.alert_remote_control_reply(reply)
|
||||
|
||||
def alert_remote_control_reply(self, reply):
|
||||
|
||||
def callback(my, mx, xs):
|
||||
y = count(xs)
|
||||
if not reply:
|
||||
self.win.addstr(
|
||||
next(y), 3, 'No replies received in 1s deadline.',
|
||||
curses.A_BOLD + curses.color_pair(2),
|
||||
)
|
||||
return
|
||||
|
||||
for subreply in reply:
|
||||
curline = next(y)
|
||||
|
||||
host, response = next(subreply.items())
|
||||
host = f'{host}: '
|
||||
self.win.addstr(curline, 3, host, curses.A_BOLD)
|
||||
attr = curses.A_NORMAL
|
||||
text = ''
|
||||
if 'error' in response:
|
||||
text = response['error']
|
||||
attr |= curses.color_pair(2)
|
||||
elif 'ok' in response:
|
||||
text = response['ok']
|
||||
attr |= curses.color_pair(3)
|
||||
self.win.addstr(curline, 3 + len(host), text, attr)
|
||||
|
||||
return self.alert(callback, 'Remote Control Command Replies')
|
||||
|
||||
def readline(self, x, y):
|
||||
buffer = ''
|
||||
curses.echo()
|
||||
try:
|
||||
i = 0
|
||||
while 1:
|
||||
ch = self.win.getch(x, y + i)
|
||||
if ch != -1:
|
||||
if ch in (10, curses.KEY_ENTER): # enter
|
||||
break
|
||||
if ch in (27,):
|
||||
buffer = ''
|
||||
break
|
||||
buffer += chr(ch)
|
||||
i += 1
|
||||
finally:
|
||||
curses.noecho()
|
||||
return buffer
|
||||
|
||||
def revoke_selection(self):
|
||||
if not self.selected_task:
|
||||
return curses.beep()
|
||||
reply = self.app.control.revoke(self.selected_task, reply=True)
|
||||
self.alert_remote_control_reply(reply)
|
||||
|
||||
def selection_info(self):
|
||||
if not self.selected_task:
|
||||
return
|
||||
|
||||
def alert_callback(mx, my, xs):
|
||||
my, mx = self.win.getmaxyx()
|
||||
y = count(xs)
|
||||
task = self.state.tasks[self.selected_task]
|
||||
info = task.info(extra=['state'])
|
||||
infoitems = [
|
||||
('args', info.pop('args', None)),
|
||||
('kwargs', info.pop('kwargs', None))
|
||||
] + list(info.items())
|
||||
for key, value in infoitems:
|
||||
if key is None:
|
||||
continue
|
||||
value = str(value)
|
||||
curline = next(y)
|
||||
keys = key + ': '
|
||||
self.win.addstr(curline, 3, keys, curses.A_BOLD)
|
||||
wrapped = wrap(value, mx - 2)
|
||||
if len(wrapped) == 1:
|
||||
self.win.addstr(
|
||||
curline, len(keys) + 3,
|
||||
abbr(wrapped[0],
|
||||
self.screen_width - (len(keys) + 3)))
|
||||
else:
|
||||
for subline in wrapped:
|
||||
nexty = next(y)
|
||||
if nexty >= my - 1:
|
||||
subline = ' ' * 4 + '[...]'
|
||||
self.win.addstr(
|
||||
nexty, 3,
|
||||
abbr(' ' * 4 + subline, self.screen_width - 4),
|
||||
curses.A_NORMAL,
|
||||
)
|
||||
|
||||
return self.alert(
|
||||
alert_callback, f'Task details for {self.selected_task}',
|
||||
)
|
||||
|
||||
def selection_traceback(self):
|
||||
if not self.selected_task:
|
||||
return curses.beep()
|
||||
task = self.state.tasks[self.selected_task]
|
||||
if task.state not in states.EXCEPTION_STATES:
|
||||
return curses.beep()
|
||||
|
||||
def alert_callback(my, mx, xs):
|
||||
y = count(xs)
|
||||
for line in task.traceback.split('\n'):
|
||||
self.win.addstr(next(y), 3, line)
|
||||
|
||||
return self.alert(
|
||||
alert_callback,
|
||||
f'Task Exception Traceback for {self.selected_task}',
|
||||
)
|
||||
|
||||
def selection_result(self):
|
||||
if not self.selected_task:
|
||||
return
|
||||
|
||||
def alert_callback(my, mx, xs):
|
||||
y = count(xs)
|
||||
task = self.state.tasks[self.selected_task]
|
||||
result = (getattr(task, 'result', None) or
|
||||
getattr(task, 'exception', None))
|
||||
for line in wrap(result or '', mx - 2):
|
||||
self.win.addstr(next(y), 3, line)
|
||||
|
||||
return self.alert(
|
||||
alert_callback,
|
||||
f'Task Result for {self.selected_task}',
|
||||
)
|
||||
|
||||
def display_task_row(self, lineno, task):
|
||||
state_color = self.state_colors.get(task.state)
|
||||
attr = curses.A_NORMAL
|
||||
if task.uuid == self.selected_task:
|
||||
attr = curses.A_STANDOUT
|
||||
timestamp = datetime.utcfromtimestamp(
|
||||
task.timestamp or time(),
|
||||
)
|
||||
timef = timestamp.strftime('%H:%M:%S')
|
||||
hostname = task.worker.hostname if task.worker else '*NONE*'
|
||||
line = self.format_row(task.uuid, task.name,
|
||||
hostname,
|
||||
timef, task.state)
|
||||
self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr)
|
||||
|
||||
if state_color:
|
||||
self.win.addstr(lineno,
|
||||
len(line) - STATE_WIDTH + BORDER_SPACING - 1,
|
||||
task.state, state_color | attr)
|
||||
|
||||
def draw(self):
|
||||
with self.lock:
|
||||
win = self.win
|
||||
self.handle_keypress()
|
||||
x = LEFT_BORDER_OFFSET
|
||||
y = blank_line = count(2)
|
||||
my, _ = win.getmaxyx()
|
||||
win.erase()
|
||||
win.bkgd(' ', curses.color_pair(1))
|
||||
win.border()
|
||||
win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5))
|
||||
next(blank_line)
|
||||
win.addstr(next(y), x, self.format_row('UUID', 'TASK',
|
||||
'WORKER', 'TIME', 'STATE'),
|
||||
curses.A_BOLD | curses.A_UNDERLINE)
|
||||
tasks = self.tasks
|
||||
if tasks:
|
||||
for row, (_, task) in enumerate(tasks):
|
||||
if row > self.display_height:
|
||||
break
|
||||
|
||||
if task.uuid:
|
||||
lineno = next(y)
|
||||
self.display_task_row(lineno, task)
|
||||
|
||||
# -- Footer
|
||||
next(blank_line)
|
||||
win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4)
|
||||
|
||||
# Selected Task Info
|
||||
if self.selected_task:
|
||||
win.addstr(my - 5, x, self.selected_str, curses.A_BOLD)
|
||||
info = 'Missing extended info'
|
||||
detail = ''
|
||||
try:
|
||||
selection = self.state.tasks[self.selected_task]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
info = selection.info()
|
||||
if 'runtime' in info:
|
||||
info['runtime'] = '{:.2f}'.format(info['runtime'])
|
||||
if 'result' in info:
|
||||
info['result'] = abbr(info['result'], 16)
|
||||
info = ' '.join(
|
||||
f'{key}={value}'
|
||||
for key, value in info.items()
|
||||
)
|
||||
detail = '... -> key i'
|
||||
infowin = abbr(info,
|
||||
self.screen_width - len(self.selected_str) - 2,
|
||||
detail)
|
||||
win.addstr(my - 5, x + len(self.selected_str), infowin)
|
||||
# Make ellipsis bold
|
||||
if detail in infowin:
|
||||
detailpos = len(infowin) - len(detail)
|
||||
win.addstr(my - 5, x + len(self.selected_str) + detailpos,
|
||||
detail, curses.A_BOLD)
|
||||
else:
|
||||
win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL)
|
||||
|
||||
# Workers
|
||||
if self.workers:
|
||||
win.addstr(my - 4, x, self.online_str, curses.A_BOLD)
|
||||
win.addstr(my - 4, x + len(self.online_str),
|
||||
', '.join(sorted(self.workers)), curses.A_NORMAL)
|
||||
else:
|
||||
win.addstr(my - 4, x, 'No workers discovered.')
|
||||
|
||||
# Info
|
||||
win.addstr(my - 3, x, self.info_str, curses.A_BOLD)
|
||||
win.addstr(
|
||||
my - 3, x + len(self.info_str),
|
||||
STATUS_SCREEN.format(
|
||||
s=self.state,
|
||||
w_alive=len([w for w in self.state.workers.values()
|
||||
if w.alive]),
|
||||
w_all=len(self.state.workers),
|
||||
),
|
||||
curses.A_DIM,
|
||||
)
|
||||
|
||||
# Help
|
||||
self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD)
|
||||
self.safe_add_str(my - 2, x + len(self.help_title), self.help,
|
||||
curses.A_DIM)
|
||||
win.refresh()
|
||||
|
||||
def safe_add_str(self, y, x, string, *args, **kwargs):
|
||||
if x + len(string) > self.screen_width:
|
||||
string = string[:self.screen_width - x]
|
||||
self.win.addstr(y, x, string, *args, **kwargs)
|
||||
|
||||
def init_screen(self):
|
||||
with self.lock:
|
||||
self.win = curses.initscr()
|
||||
self.win.nodelay(True)
|
||||
self.win.keypad(True)
|
||||
curses.start_color()
|
||||
curses.init_pair(1, self.foreground, self.background)
|
||||
# exception states
|
||||
curses.init_pair(2, curses.COLOR_RED, self.background)
|
||||
# successful state
|
||||
curses.init_pair(3, curses.COLOR_GREEN, self.background)
|
||||
# revoked state
|
||||
curses.init_pair(4, curses.COLOR_MAGENTA, self.background)
|
||||
# greeting
|
||||
curses.init_pair(5, curses.COLOR_BLUE, self.background)
|
||||
# started state
|
||||
curses.init_pair(6, curses.COLOR_YELLOW, self.foreground)
|
||||
|
||||
self.state_colors = {states.SUCCESS: curses.color_pair(3),
|
||||
states.REVOKED: curses.color_pair(4),
|
||||
states.STARTED: curses.color_pair(6)}
|
||||
for state in states.EXCEPTION_STATES:
|
||||
self.state_colors[state] = curses.color_pair(2)
|
||||
|
||||
curses.cbreak()
|
||||
|
||||
def resetscreen(self):
|
||||
with self.lock:
|
||||
curses.nocbreak()
|
||||
self.win.keypad(False)
|
||||
curses.echo()
|
||||
curses.endwin()
|
||||
|
||||
def nap(self):
|
||||
curses.napms(self.screen_delay)
|
||||
|
||||
@property
|
||||
def tasks(self):
|
||||
return list(self.state.tasks_by_time(limit=self.limit))
|
||||
|
||||
@property
|
||||
def workers(self):
|
||||
return [hostname for hostname, w in self.state.workers.items()
|
||||
if w.alive]
|
||||
|
||||
|
||||
class DisplayThread(threading.Thread): # pragma: no cover
|
||||
|
||||
def __init__(self, display):
|
||||
self.display = display
|
||||
self.shutdown = False
|
||||
super().__init__()
|
||||
|
||||
def run(self):
|
||||
while not self.shutdown:
|
||||
self.display.draw()
|
||||
self.display.nap()
|
||||
|
||||
|
||||
def capture_events(app, state, display): # pragma: no cover
|
||||
|
||||
def on_connection_error(exc, interval):
|
||||
print('Connection Error: {!r}. Retry in {}s.'.format(
|
||||
exc, interval), file=sys.stderr)
|
||||
|
||||
while 1:
|
||||
print('-> evtop: starting capture...', file=sys.stderr)
|
||||
with app.connection_for_read() as conn:
|
||||
try:
|
||||
conn.ensure_connection(on_connection_error,
|
||||
app.conf.broker_connection_max_retries)
|
||||
recv = app.events.Receiver(conn, handlers={'*': state.event})
|
||||
display.resetscreen()
|
||||
display.init_screen()
|
||||
recv.capture()
|
||||
except conn.connection_errors + conn.channel_errors as exc:
|
||||
print(f'Connection lost: {exc!r}', file=sys.stderr)
|
||||
|
||||
|
||||
def evtop(app=None): # pragma: no cover
|
||||
"""Start curses monitor."""
|
||||
app = app_or_default(app)
|
||||
state = app.events.State()
|
||||
display = CursesMonitor(state, app)
|
||||
display.init_screen()
|
||||
refresher = DisplayThread(display)
|
||||
refresher.start()
|
||||
try:
|
||||
capture_events(app, state, display)
|
||||
except Exception:
|
||||
refresher.shutdown = True
|
||||
refresher.join()
|
||||
display.resetscreen()
|
||||
raise
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
refresher.shutdown = True
|
||||
refresher.join()
|
||||
display.resetscreen()
|
||||
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
evtop()
|
||||
@@ -0,0 +1,229 @@
|
||||
"""Event dispatcher sends events."""
|
||||
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from collections import defaultdict, deque
|
||||
|
||||
from kombu import Producer
|
||||
|
||||
from celery.app import app_or_default
|
||||
from celery.utils.nodenames import anon_nodename
|
||||
from celery.utils.time import utcoffset
|
||||
|
||||
from .event import Event, get_exchange, group_from
|
||||
|
||||
__all__ = ('EventDispatcher',)
|
||||
|
||||
|
||||
class EventDispatcher:
|
||||
"""Dispatches event messages.
|
||||
|
||||
Arguments:
|
||||
connection (kombu.Connection): Connection to the broker.
|
||||
|
||||
hostname (str): Hostname to identify ourselves as,
|
||||
by default uses the hostname returned by
|
||||
:func:`~celery.utils.anon_nodename`.
|
||||
|
||||
groups (Sequence[str]): List of groups to send events for.
|
||||
:meth:`send` will ignore send requests to groups not in this list.
|
||||
If this is :const:`None`, all events will be sent.
|
||||
Example groups include ``"task"`` and ``"worker"``.
|
||||
|
||||
enabled (bool): Set to :const:`False` to not actually publish any
|
||||
events, making :meth:`send` a no-op.
|
||||
|
||||
channel (kombu.Channel): Can be used instead of `connection` to specify
|
||||
an exact channel to use when sending events.
|
||||
|
||||
buffer_while_offline (bool): If enabled events will be buffered
|
||||
while the connection is down. :meth:`flush` must be called
|
||||
as soon as the connection is re-established.
|
||||
|
||||
Note:
|
||||
You need to :meth:`close` this after use.
|
||||
"""
|
||||
|
||||
DISABLED_TRANSPORTS = {'sql'}
|
||||
|
||||
app = None
|
||||
|
||||
# set of callbacks to be called when :meth:`enabled`.
|
||||
on_enabled = None
|
||||
|
||||
# set of callbacks to be called when :meth:`disabled`.
|
||||
on_disabled = None
|
||||
|
||||
def __init__(self, connection=None, hostname=None, enabled=True,
|
||||
channel=None, buffer_while_offline=True, app=None,
|
||||
serializer=None, groups=None, delivery_mode=1,
|
||||
buffer_group=None, buffer_limit=24, on_send_buffered=None):
|
||||
self.app = app_or_default(app or self.app)
|
||||
self.connection = connection
|
||||
self.channel = channel
|
||||
self.hostname = hostname or anon_nodename()
|
||||
self.buffer_while_offline = buffer_while_offline
|
||||
self.buffer_group = buffer_group or frozenset()
|
||||
self.buffer_limit = buffer_limit
|
||||
self.on_send_buffered = on_send_buffered
|
||||
self._group_buffer = defaultdict(list)
|
||||
self.mutex = threading.Lock()
|
||||
self.producer = None
|
||||
self._outbound_buffer = deque()
|
||||
self.serializer = serializer or self.app.conf.event_serializer
|
||||
self.on_enabled = set()
|
||||
self.on_disabled = set()
|
||||
self.groups = set(groups or [])
|
||||
self.tzoffset = [-time.timezone, -time.altzone]
|
||||
self.clock = self.app.clock
|
||||
self.delivery_mode = delivery_mode
|
||||
if not connection and channel:
|
||||
self.connection = channel.connection.client
|
||||
self.enabled = enabled
|
||||
conninfo = self.connection or self.app.connection_for_write()
|
||||
self.exchange = get_exchange(conninfo,
|
||||
name=self.app.conf.event_exchange)
|
||||
if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS:
|
||||
self.enabled = False
|
||||
if self.enabled:
|
||||
self.enable()
|
||||
self.headers = {'hostname': self.hostname}
|
||||
self.pid = os.getpid()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
self.close()
|
||||
|
||||
def enable(self):
|
||||
self.producer = Producer(self.channel or self.connection,
|
||||
exchange=self.exchange,
|
||||
serializer=self.serializer,
|
||||
auto_declare=False)
|
||||
self.enabled = True
|
||||
for callback in self.on_enabled:
|
||||
callback()
|
||||
|
||||
def disable(self):
|
||||
if self.enabled:
|
||||
self.enabled = False
|
||||
self.close()
|
||||
for callback in self.on_disabled:
|
||||
callback()
|
||||
|
||||
def publish(self, type, fields, producer,
|
||||
blind=False, Event=Event, **kwargs):
|
||||
"""Publish event using custom :class:`~kombu.Producer`.
|
||||
|
||||
Arguments:
|
||||
type (str): Event type name, with group separated by dash (`-`).
|
||||
fields: Dictionary of event fields, must be json serializable.
|
||||
producer (kombu.Producer): Producer instance to use:
|
||||
only the ``publish`` method will be called.
|
||||
retry (bool): Retry in the event of connection failure.
|
||||
retry_policy (Mapping): Map of custom retry policy options.
|
||||
See :meth:`~kombu.Connection.ensure`.
|
||||
blind (bool): Don't set logical clock value (also don't forward
|
||||
the internal logical clock).
|
||||
Event (Callable): Event type used to create event.
|
||||
Defaults to :func:`Event`.
|
||||
utcoffset (Callable): Function returning the current
|
||||
utc offset in hours.
|
||||
"""
|
||||
clock = None if blind else self.clock.forward()
|
||||
event = Event(type, hostname=self.hostname, utcoffset=utcoffset(),
|
||||
pid=self.pid, clock=clock, **fields)
|
||||
with self.mutex:
|
||||
return self._publish(event, producer,
|
||||
routing_key=type.replace('-', '.'), **kwargs)
|
||||
|
||||
def _publish(self, event, producer, routing_key, retry=False,
|
||||
retry_policy=None, utcoffset=utcoffset):
|
||||
exchange = self.exchange
|
||||
try:
|
||||
producer.publish(
|
||||
event,
|
||||
routing_key=routing_key,
|
||||
exchange=exchange.name,
|
||||
retry=retry,
|
||||
retry_policy=retry_policy,
|
||||
declare=[exchange],
|
||||
serializer=self.serializer,
|
||||
headers=self.headers,
|
||||
delivery_mode=self.delivery_mode,
|
||||
)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
if not self.buffer_while_offline:
|
||||
raise
|
||||
self._outbound_buffer.append((event, routing_key, exc))
|
||||
|
||||
def send(self, type, blind=False, utcoffset=utcoffset, retry=False,
|
||||
retry_policy=None, Event=Event, **fields):
|
||||
"""Send event.
|
||||
|
||||
Arguments:
|
||||
type (str): Event type name, with group separated by dash (`-`).
|
||||
retry (bool): Retry in the event of connection failure.
|
||||
retry_policy (Mapping): Map of custom retry policy options.
|
||||
See :meth:`~kombu.Connection.ensure`.
|
||||
blind (bool): Don't set logical clock value (also don't forward
|
||||
the internal logical clock).
|
||||
Event (Callable): Event type used to create event,
|
||||
defaults to :func:`Event`.
|
||||
utcoffset (Callable): unction returning the current utc offset
|
||||
in hours.
|
||||
**fields (Any): Event fields -- must be json serializable.
|
||||
"""
|
||||
if self.enabled:
|
||||
groups, group = self.groups, group_from(type)
|
||||
if groups and group not in groups:
|
||||
return
|
||||
if group in self.buffer_group:
|
||||
clock = self.clock.forward()
|
||||
event = Event(type, hostname=self.hostname,
|
||||
utcoffset=utcoffset(),
|
||||
pid=self.pid, clock=clock, **fields)
|
||||
buf = self._group_buffer[group]
|
||||
buf.append(event)
|
||||
if len(buf) >= self.buffer_limit:
|
||||
self.flush()
|
||||
elif self.on_send_buffered:
|
||||
self.on_send_buffered()
|
||||
else:
|
||||
return self.publish(type, fields, self.producer, blind=blind,
|
||||
Event=Event, retry=retry,
|
||||
retry_policy=retry_policy)
|
||||
|
||||
def flush(self, errors=True, groups=True):
|
||||
"""Flush the outbound buffer."""
|
||||
if errors:
|
||||
buf = list(self._outbound_buffer)
|
||||
try:
|
||||
with self.mutex:
|
||||
for event, routing_key, _ in buf:
|
||||
self._publish(event, self.producer, routing_key)
|
||||
finally:
|
||||
self._outbound_buffer.clear()
|
||||
if groups:
|
||||
with self.mutex:
|
||||
for group, events in self._group_buffer.items():
|
||||
self._publish(events, self.producer, '%s.multi' % group)
|
||||
events[:] = [] # list.clear
|
||||
|
||||
def extend_buffer(self, other):
|
||||
"""Copy the outbound buffer of another instance."""
|
||||
self._outbound_buffer.extend(other._outbound_buffer)
|
||||
|
||||
def close(self):
|
||||
"""Close the event dispatcher."""
|
||||
self.mutex.locked() and self.mutex.release()
|
||||
self.producer = None
|
||||
|
||||
def _get_publisher(self):
|
||||
return self.producer
|
||||
|
||||
def _set_publisher(self, producer):
|
||||
self.producer = producer
|
||||
publisher = property(_get_publisher, _set_publisher) # XXX compat
|
||||
@@ -0,0 +1,103 @@
|
||||
"""Utility to dump events to screen.
|
||||
|
||||
This is a simple program that dumps events to the console
|
||||
as they happen. Think of it like a `tcpdump` for Celery events.
|
||||
"""
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
from celery.app import app_or_default
|
||||
from celery.utils.functional import LRUCache
|
||||
from celery.utils.time import humanize_seconds
|
||||
|
||||
__all__ = ('Dumper', 'evdump')
|
||||
|
||||
TASK_NAMES = LRUCache(limit=0xFFF)
|
||||
|
||||
HUMAN_TYPES = {
|
||||
'worker-offline': 'shutdown',
|
||||
'worker-online': 'started',
|
||||
'worker-heartbeat': 'heartbeat',
|
||||
}
|
||||
|
||||
CONNECTION_ERROR = """\
|
||||
-> Cannot connect to %s: %s.
|
||||
Trying again %s
|
||||
"""
|
||||
|
||||
|
||||
def humanize_type(type):
|
||||
try:
|
||||
return HUMAN_TYPES[type.lower()]
|
||||
except KeyError:
|
||||
return type.lower().replace('-', ' ')
|
||||
|
||||
|
||||
class Dumper:
|
||||
"""Monitor events."""
|
||||
|
||||
def __init__(self, out=sys.stdout):
|
||||
self.out = out
|
||||
|
||||
def say(self, msg):
|
||||
print(msg, file=self.out)
|
||||
# need to flush so that output can be piped.
|
||||
try:
|
||||
self.out.flush()
|
||||
except AttributeError: # pragma: no cover
|
||||
pass
|
||||
|
||||
def on_event(self, ev):
|
||||
timestamp = datetime.utcfromtimestamp(ev.pop('timestamp'))
|
||||
type = ev.pop('type').lower()
|
||||
hostname = ev.pop('hostname')
|
||||
if type.startswith('task-'):
|
||||
uuid = ev.pop('uuid')
|
||||
if type in ('task-received', 'task-sent'):
|
||||
task = TASK_NAMES[uuid] = '{}({}) args={} kwargs={}' \
|
||||
.format(ev.pop('name'), uuid,
|
||||
ev.pop('args'),
|
||||
ev.pop('kwargs'))
|
||||
else:
|
||||
task = TASK_NAMES.get(uuid, '')
|
||||
return self.format_task_event(hostname, timestamp,
|
||||
type, task, ev)
|
||||
fields = ', '.join(
|
||||
f'{key}={ev[key]}' for key in sorted(ev)
|
||||
)
|
||||
sep = fields and ':' or ''
|
||||
self.say(f'{hostname} [{timestamp}] {humanize_type(type)}{sep} {fields}')
|
||||
|
||||
def format_task_event(self, hostname, timestamp, type, task, event):
|
||||
fields = ', '.join(
|
||||
f'{key}={event[key]}' for key in sorted(event)
|
||||
)
|
||||
sep = fields and ':' or ''
|
||||
self.say(f'{hostname} [{timestamp}] {humanize_type(type)}{sep} {task} {fields}')
|
||||
|
||||
|
||||
def evdump(app=None, out=sys.stdout):
|
||||
"""Start event dump."""
|
||||
app = app_or_default(app)
|
||||
dumper = Dumper(out=out)
|
||||
dumper.say('-> evdump: starting capture...')
|
||||
conn = app.connection_for_read().clone()
|
||||
|
||||
def _error_handler(exc, interval):
|
||||
dumper.say(CONNECTION_ERROR % (
|
||||
conn.as_uri(), exc, humanize_seconds(interval, 'in', ' ')
|
||||
))
|
||||
|
||||
while 1:
|
||||
try:
|
||||
conn.ensure_connection(_error_handler)
|
||||
recv = app.events.Receiver(conn, handlers={'*': dumper.on_event})
|
||||
recv.capture()
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
return conn and conn.close()
|
||||
except conn.connection_errors + conn.channel_errors:
|
||||
dumper.say('-> Connection lost, attempting reconnect')
|
||||
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
evdump()
|
||||
@@ -0,0 +1,63 @@
|
||||
"""Creating events, and event exchange definition."""
|
||||
import time
|
||||
from copy import copy
|
||||
|
||||
from kombu import Exchange
|
||||
|
||||
__all__ = (
|
||||
'Event', 'event_exchange', 'get_exchange', 'group_from',
|
||||
)
|
||||
|
||||
EVENT_EXCHANGE_NAME = 'celeryev'
|
||||
#: Exchange used to send events on.
|
||||
#: Note: Use :func:`get_exchange` instead, as the type of
|
||||
#: exchange will vary depending on the broker connection.
|
||||
event_exchange = Exchange(EVENT_EXCHANGE_NAME, type='topic')
|
||||
|
||||
|
||||
def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields):
|
||||
"""Create an event.
|
||||
|
||||
Notes:
|
||||
An event is simply a dictionary: the only required field is ``type``.
|
||||
A ``timestamp`` field will be set to the current time if not provided.
|
||||
"""
|
||||
event = __dict__(_fields, **fields) if _fields else fields
|
||||
if 'timestamp' not in event:
|
||||
event.update(timestamp=__now__(), type=type)
|
||||
else:
|
||||
event['type'] = type
|
||||
return event
|
||||
|
||||
|
||||
def group_from(type):
|
||||
"""Get the group part of an event type name.
|
||||
|
||||
Example:
|
||||
>>> group_from('task-sent')
|
||||
'task'
|
||||
|
||||
>>> group_from('custom-my-event')
|
||||
'custom'
|
||||
"""
|
||||
return type.split('-', 1)[0]
|
||||
|
||||
|
||||
def get_exchange(conn, name=EVENT_EXCHANGE_NAME):
|
||||
"""Get exchange used for sending events.
|
||||
|
||||
Arguments:
|
||||
conn (kombu.Connection): Connection used for sending/receiving events.
|
||||
name (str): Name of the exchange. Default is ``celeryev``.
|
||||
|
||||
Note:
|
||||
The event type changes if Redis is used as the transport
|
||||
(from topic -> fanout).
|
||||
"""
|
||||
ex = copy(event_exchange)
|
||||
if conn.transport.driver_type == 'redis':
|
||||
# quick hack for Issue #436
|
||||
ex.type = 'fanout'
|
||||
if name != ex.name:
|
||||
ex.name = name
|
||||
return ex
|
||||
@@ -0,0 +1,135 @@
|
||||
"""Event receiver implementation."""
|
||||
import time
|
||||
from operator import itemgetter
|
||||
|
||||
from kombu import Queue
|
||||
from kombu.connection import maybe_channel
|
||||
from kombu.mixins import ConsumerMixin
|
||||
|
||||
from celery import uuid
|
||||
from celery.app import app_or_default
|
||||
from celery.utils.time import adjust_timestamp
|
||||
|
||||
from .event import get_exchange
|
||||
|
||||
__all__ = ('EventReceiver',)
|
||||
|
||||
CLIENT_CLOCK_SKEW = -1
|
||||
|
||||
_TZGETTER = itemgetter('utcoffset', 'timestamp')
|
||||
|
||||
|
||||
class EventReceiver(ConsumerMixin):
|
||||
"""Capture events.
|
||||
|
||||
Arguments:
|
||||
connection (kombu.Connection): Connection to the broker.
|
||||
handlers (Mapping[Callable]): Event handlers.
|
||||
This is a map of event type names and their handlers.
|
||||
The special handler `"*"` captures all events that don't have a
|
||||
handler.
|
||||
"""
|
||||
|
||||
app = None
|
||||
|
||||
def __init__(self, channel, handlers=None, routing_key='#',
|
||||
node_id=None, app=None, queue_prefix=None,
|
||||
accept=None, queue_ttl=None, queue_expires=None):
|
||||
self.app = app_or_default(app or self.app)
|
||||
self.channel = maybe_channel(channel)
|
||||
self.handlers = {} if handlers is None else handlers
|
||||
self.routing_key = routing_key
|
||||
self.node_id = node_id or uuid()
|
||||
self.queue_prefix = queue_prefix or self.app.conf.event_queue_prefix
|
||||
self.exchange = get_exchange(
|
||||
self.connection or self.app.connection_for_write(),
|
||||
name=self.app.conf.event_exchange)
|
||||
if queue_ttl is None:
|
||||
queue_ttl = self.app.conf.event_queue_ttl
|
||||
if queue_expires is None:
|
||||
queue_expires = self.app.conf.event_queue_expires
|
||||
self.queue = Queue(
|
||||
'.'.join([self.queue_prefix, self.node_id]),
|
||||
exchange=self.exchange,
|
||||
routing_key=self.routing_key,
|
||||
auto_delete=True, durable=False,
|
||||
message_ttl=queue_ttl,
|
||||
expires=queue_expires,
|
||||
)
|
||||
self.clock = self.app.clock
|
||||
self.adjust_clock = self.clock.adjust
|
||||
self.forward_clock = self.clock.forward
|
||||
if accept is None:
|
||||
accept = {self.app.conf.event_serializer, 'json'}
|
||||
self.accept = accept
|
||||
|
||||
def process(self, type, event):
|
||||
"""Process event by dispatching to configured handler."""
|
||||
handler = self.handlers.get(type) or self.handlers.get('*')
|
||||
handler and handler(event)
|
||||
|
||||
def get_consumers(self, Consumer, channel):
|
||||
return [Consumer(queues=[self.queue],
|
||||
callbacks=[self._receive], no_ack=True,
|
||||
accept=self.accept)]
|
||||
|
||||
def on_consume_ready(self, connection, channel, consumers,
|
||||
wakeup=True, **kwargs):
|
||||
if wakeup:
|
||||
self.wakeup_workers(channel=channel)
|
||||
|
||||
def itercapture(self, limit=None, timeout=None, wakeup=True):
|
||||
return self.consume(limit=limit, timeout=timeout, wakeup=wakeup)
|
||||
|
||||
def capture(self, limit=None, timeout=None, wakeup=True):
|
||||
"""Open up a consumer capturing events.
|
||||
|
||||
This has to run in the main process, and it will never stop
|
||||
unless :attr:`EventDispatcher.should_stop` is set to True, or
|
||||
forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
|
||||
"""
|
||||
for _ in self.consume(limit=limit, timeout=timeout, wakeup=wakeup):
|
||||
pass
|
||||
|
||||
def wakeup_workers(self, channel=None):
|
||||
self.app.control.broadcast('heartbeat',
|
||||
connection=self.connection,
|
||||
channel=channel)
|
||||
|
||||
def event_from_message(self, body, localize=True,
|
||||
now=time.time, tzfields=_TZGETTER,
|
||||
adjust_timestamp=adjust_timestamp,
|
||||
CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW):
|
||||
type = body['type']
|
||||
if type == 'task-sent':
|
||||
# clients never sync so cannot use their clock value
|
||||
_c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW
|
||||
self.adjust_clock(_c)
|
||||
else:
|
||||
try:
|
||||
clock = body['clock']
|
||||
except KeyError:
|
||||
body['clock'] = self.forward_clock()
|
||||
else:
|
||||
self.adjust_clock(clock)
|
||||
|
||||
if localize:
|
||||
try:
|
||||
offset, timestamp = tzfields(body)
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
body['timestamp'] = adjust_timestamp(timestamp, offset)
|
||||
body['local_received'] = now()
|
||||
return type, body
|
||||
|
||||
def _receive(self, body, message, list=list, isinstance=isinstance):
|
||||
if isinstance(body, list): # celery 4.0+: List of events
|
||||
process, from_message = self.process, self.event_from_message
|
||||
[process(*from_message(event)) for event in body]
|
||||
else:
|
||||
self.process(*self.event_from_message(body))
|
||||
|
||||
@property
|
||||
def connection(self):
|
||||
return self.channel.connection.client if self.channel else None
|
||||
@@ -0,0 +1,111 @@
|
||||
"""Periodically store events in a database.
|
||||
|
||||
Consuming the events as a stream isn't always suitable
|
||||
so this module implements a system to take snapshots of the
|
||||
state of a cluster at regular intervals. There's a full
|
||||
implementation of this writing the snapshots to a database
|
||||
in :mod:`djcelery.snapshots` in the `django-celery` distribution.
|
||||
"""
|
||||
from kombu.utils.limits import TokenBucket
|
||||
|
||||
from celery import platforms
|
||||
from celery.app import app_or_default
|
||||
from celery.utils.dispatch import Signal
|
||||
from celery.utils.imports import instantiate
|
||||
from celery.utils.log import get_logger
|
||||
from celery.utils.time import rate
|
||||
from celery.utils.timer2 import Timer
|
||||
|
||||
__all__ = ('Polaroid', 'evcam')
|
||||
|
||||
logger = get_logger('celery.evcam')
|
||||
|
||||
|
||||
class Polaroid:
|
||||
"""Record event snapshots."""
|
||||
|
||||
timer = None
|
||||
shutter_signal = Signal(name='shutter_signal', providing_args={'state'})
|
||||
cleanup_signal = Signal(name='cleanup_signal')
|
||||
clear_after = False
|
||||
|
||||
_tref = None
|
||||
_ctref = None
|
||||
|
||||
def __init__(self, state, freq=1.0, maxrate=None,
|
||||
cleanup_freq=3600.0, timer=None, app=None):
|
||||
self.app = app_or_default(app)
|
||||
self.state = state
|
||||
self.freq = freq
|
||||
self.cleanup_freq = cleanup_freq
|
||||
self.timer = timer or self.timer or Timer()
|
||||
self.logger = logger
|
||||
self.maxrate = maxrate and TokenBucket(rate(maxrate))
|
||||
|
||||
def install(self):
|
||||
self._tref = self.timer.call_repeatedly(self.freq, self.capture)
|
||||
self._ctref = self.timer.call_repeatedly(
|
||||
self.cleanup_freq, self.cleanup,
|
||||
)
|
||||
|
||||
def on_shutter(self, state):
|
||||
pass
|
||||
|
||||
def on_cleanup(self):
|
||||
pass
|
||||
|
||||
def cleanup(self):
|
||||
logger.debug('Cleanup: Running...')
|
||||
self.cleanup_signal.send(sender=self.state)
|
||||
self.on_cleanup()
|
||||
|
||||
def shutter(self):
|
||||
if self.maxrate is None or self.maxrate.can_consume():
|
||||
logger.debug('Shutter: %s', self.state)
|
||||
self.shutter_signal.send(sender=self.state)
|
||||
self.on_shutter(self.state)
|
||||
|
||||
def capture(self):
|
||||
self.state.freeze_while(self.shutter, clear_after=self.clear_after)
|
||||
|
||||
def cancel(self):
|
||||
if self._tref:
|
||||
self._tref() # flush all received events.
|
||||
self._tref.cancel()
|
||||
if self._ctref:
|
||||
self._ctref.cancel()
|
||||
|
||||
def __enter__(self):
|
||||
self.install()
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
self.cancel()
|
||||
|
||||
|
||||
def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
|
||||
logfile=None, pidfile=None, timer=None, app=None,
|
||||
**kwargs):
|
||||
"""Start snapshot recorder."""
|
||||
app = app_or_default(app)
|
||||
|
||||
if pidfile:
|
||||
platforms.create_pidlock(pidfile)
|
||||
|
||||
app.log.setup_logging_subsystem(loglevel, logfile)
|
||||
|
||||
print(f'-> evcam: Taking snapshots with {camera} (every {freq} secs.)')
|
||||
state = app.events.State()
|
||||
cam = instantiate(camera, state, app=app, freq=freq,
|
||||
maxrate=maxrate, timer=timer)
|
||||
cam.install()
|
||||
conn = app.connection_for_read()
|
||||
recv = app.events.Receiver(conn, handlers={'*': state.event})
|
||||
try:
|
||||
try:
|
||||
recv.capture(limit=None)
|
||||
except KeyboardInterrupt:
|
||||
raise SystemExit
|
||||
finally:
|
||||
cam.cancel()
|
||||
conn.close()
|
||||
@@ -0,0 +1,730 @@
|
||||
"""In-memory representation of cluster state.
|
||||
|
||||
This module implements a data-structure used to keep
|
||||
track of the state of a cluster of workers and the tasks
|
||||
it is working on (by consuming events).
|
||||
|
||||
For every event consumed the state is updated,
|
||||
so the state represents the state of the cluster
|
||||
at the time of the last event.
|
||||
|
||||
Snapshots (:mod:`celery.events.snapshot`) can be used to
|
||||
take "pictures" of this state at regular intervals
|
||||
to for example, store that in a database.
|
||||
"""
|
||||
import bisect
|
||||
import sys
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
from collections.abc import Callable
|
||||
from datetime import datetime
|
||||
from decimal import Decimal
|
||||
from itertools import islice
|
||||
from operator import itemgetter
|
||||
from time import time
|
||||
from typing import Mapping, Optional # noqa
|
||||
from weakref import WeakSet, ref
|
||||
|
||||
from kombu.clocks import timetuple
|
||||
from kombu.utils.objects import cached_property
|
||||
|
||||
from celery import states
|
||||
from celery.utils.functional import LRUCache, memoize, pass1
|
||||
from celery.utils.log import get_logger
|
||||
|
||||
__all__ = ('Worker', 'Task', 'State', 'heartbeat_expires')
|
||||
|
||||
# pylint: disable=redefined-outer-name
|
||||
# We cache globals and attribute lookups, so disable this warning.
|
||||
# pylint: disable=too-many-function-args
|
||||
# For some reason pylint thinks ._event is a method, when it's a property.
|
||||
|
||||
#: Set if running PyPy
|
||||
PYPY = hasattr(sys, 'pypy_version_info')
|
||||
|
||||
#: The window (in percentage) is added to the workers heartbeat
|
||||
#: frequency. If the time between updates exceeds this window,
|
||||
#: then the worker is considered to be offline.
|
||||
HEARTBEAT_EXPIRE_WINDOW = 200
|
||||
|
||||
#: Max drift between event timestamp and time of event received
|
||||
#: before we alert that clocks may be unsynchronized.
|
||||
HEARTBEAT_DRIFT_MAX = 16
|
||||
|
||||
DRIFT_WARNING = (
|
||||
"Substantial drift from %s may mean clocks are out of sync. Current drift is "
|
||||
"%s seconds. [orig: %s recv: %s]"
|
||||
)
|
||||
|
||||
logger = get_logger(__name__)
|
||||
warn = logger.warning
|
||||
|
||||
R_STATE = '<State: events={0.event_count} tasks={0.task_count}>'
|
||||
R_WORKER = '<Worker: {0.hostname} ({0.status_string} clock:{0.clock})'
|
||||
R_TASK = '<Task: {0.name}({0.uuid}) {0.state} clock:{0.clock}>'
|
||||
|
||||
#: Mapping of task event names to task state.
|
||||
TASK_EVENT_TO_STATE = {
|
||||
'sent': states.PENDING,
|
||||
'received': states.RECEIVED,
|
||||
'started': states.STARTED,
|
||||
'failed': states.FAILURE,
|
||||
'retried': states.RETRY,
|
||||
'succeeded': states.SUCCESS,
|
||||
'revoked': states.REVOKED,
|
||||
'rejected': states.REJECTED,
|
||||
}
|
||||
|
||||
|
||||
class CallableDefaultdict(defaultdict):
|
||||
""":class:`~collections.defaultdict` with configurable __call__.
|
||||
|
||||
We use this for backwards compatibility in State.tasks_by_type
|
||||
etc, which used to be a method but is now an index instead.
|
||||
|
||||
So you can do::
|
||||
|
||||
>>> add_tasks = state.tasks_by_type['proj.tasks.add']
|
||||
|
||||
while still supporting the method call::
|
||||
|
||||
>>> add_tasks = list(state.tasks_by_type(
|
||||
... 'proj.tasks.add', reverse=True))
|
||||
"""
|
||||
|
||||
def __init__(self, fun, *args, **kwargs):
|
||||
self.fun = fun
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self.fun(*args, **kwargs)
|
||||
|
||||
|
||||
Callable.register(CallableDefaultdict)
|
||||
|
||||
|
||||
@memoize(maxsize=1000, keyfun=lambda a, _: a[0])
|
||||
def _warn_drift(hostname, drift, local_received, timestamp):
|
||||
# we use memoize here so the warning is only logged once per hostname
|
||||
warn(DRIFT_WARNING, hostname, drift,
|
||||
datetime.fromtimestamp(local_received),
|
||||
datetime.fromtimestamp(timestamp))
|
||||
|
||||
|
||||
def heartbeat_expires(timestamp, freq=60,
|
||||
expire_window=HEARTBEAT_EXPIRE_WINDOW,
|
||||
Decimal=Decimal, float=float, isinstance=isinstance):
|
||||
"""Return time when heartbeat expires."""
|
||||
# some json implementations returns decimal.Decimal objects,
|
||||
# which aren't compatible with float.
|
||||
freq = float(freq) if isinstance(freq, Decimal) else freq
|
||||
if isinstance(timestamp, Decimal):
|
||||
timestamp = float(timestamp)
|
||||
return timestamp + (freq * (expire_window / 1e2))
|
||||
|
||||
|
||||
def _depickle_task(cls, fields):
|
||||
return cls(**fields)
|
||||
|
||||
|
||||
def with_unique_field(attr):
|
||||
|
||||
def _decorate_cls(cls):
|
||||
|
||||
def __eq__(this, other):
|
||||
if isinstance(other, this.__class__):
|
||||
return getattr(this, attr) == getattr(other, attr)
|
||||
return NotImplemented
|
||||
cls.__eq__ = __eq__
|
||||
|
||||
def __hash__(this):
|
||||
return hash(getattr(this, attr))
|
||||
cls.__hash__ = __hash__
|
||||
|
||||
return cls
|
||||
return _decorate_cls
|
||||
|
||||
|
||||
@with_unique_field('hostname')
|
||||
class Worker:
|
||||
"""Worker State."""
|
||||
|
||||
heartbeat_max = 4
|
||||
expire_window = HEARTBEAT_EXPIRE_WINDOW
|
||||
|
||||
_fields = ('hostname', 'pid', 'freq', 'heartbeats', 'clock',
|
||||
'active', 'processed', 'loadavg', 'sw_ident',
|
||||
'sw_ver', 'sw_sys')
|
||||
if not PYPY: # pragma: no cover
|
||||
__slots__ = _fields + ('event', '__dict__', '__weakref__')
|
||||
|
||||
def __init__(self, hostname=None, pid=None, freq=60,
|
||||
heartbeats=None, clock=0, active=None, processed=None,
|
||||
loadavg=None, sw_ident=None, sw_ver=None, sw_sys=None):
|
||||
self.hostname = hostname
|
||||
self.pid = pid
|
||||
self.freq = freq
|
||||
self.heartbeats = [] if heartbeats is None else heartbeats
|
||||
self.clock = clock or 0
|
||||
self.active = active
|
||||
self.processed = processed
|
||||
self.loadavg = loadavg
|
||||
self.sw_ident = sw_ident
|
||||
self.sw_ver = sw_ver
|
||||
self.sw_sys = sw_sys
|
||||
self.event = self._create_event_handler()
|
||||
|
||||
def __reduce__(self):
|
||||
return self.__class__, (self.hostname, self.pid, self.freq,
|
||||
self.heartbeats, self.clock, self.active,
|
||||
self.processed, self.loadavg, self.sw_ident,
|
||||
self.sw_ver, self.sw_sys)
|
||||
|
||||
def _create_event_handler(self):
|
||||
_set = object.__setattr__
|
||||
hbmax = self.heartbeat_max
|
||||
heartbeats = self.heartbeats
|
||||
hb_pop = self.heartbeats.pop
|
||||
hb_append = self.heartbeats.append
|
||||
|
||||
def event(type_, timestamp=None,
|
||||
local_received=None, fields=None,
|
||||
max_drift=HEARTBEAT_DRIFT_MAX, abs=abs, int=int,
|
||||
insort=bisect.insort, len=len):
|
||||
fields = fields or {}
|
||||
for k, v in fields.items():
|
||||
_set(self, k, v)
|
||||
if type_ == 'offline':
|
||||
heartbeats[:] = []
|
||||
else:
|
||||
if not local_received or not timestamp:
|
||||
return
|
||||
drift = abs(int(local_received) - int(timestamp))
|
||||
if drift > max_drift:
|
||||
_warn_drift(self.hostname, drift,
|
||||
local_received, timestamp)
|
||||
if local_received: # pragma: no cover
|
||||
hearts = len(heartbeats)
|
||||
if hearts > hbmax - 1:
|
||||
hb_pop(0)
|
||||
if hearts and local_received > heartbeats[-1]:
|
||||
hb_append(local_received)
|
||||
else:
|
||||
insort(heartbeats, local_received)
|
||||
return event
|
||||
|
||||
def update(self, f, **kw):
|
||||
d = dict(f, **kw) if kw else f
|
||||
for k, v in d.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def __repr__(self):
|
||||
return R_WORKER.format(self)
|
||||
|
||||
@property
|
||||
def status_string(self):
|
||||
return 'ONLINE' if self.alive else 'OFFLINE'
|
||||
|
||||
@property
|
||||
def heartbeat_expires(self):
|
||||
return heartbeat_expires(self.heartbeats[-1],
|
||||
self.freq, self.expire_window)
|
||||
|
||||
@property
|
||||
def alive(self, nowfun=time):
|
||||
return bool(self.heartbeats and nowfun() < self.heartbeat_expires)
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return '{0.hostname}.{0.pid}'.format(self)
|
||||
|
||||
|
||||
@with_unique_field('uuid')
|
||||
class Task:
|
||||
"""Task State."""
|
||||
|
||||
name = received = sent = started = succeeded = failed = retried = \
|
||||
revoked = rejected = args = kwargs = eta = expires = retries = \
|
||||
worker = result = exception = timestamp = runtime = traceback = \
|
||||
exchange = routing_key = root_id = parent_id = client = None
|
||||
state = states.PENDING
|
||||
clock = 0
|
||||
|
||||
_fields = (
|
||||
'uuid', 'name', 'state', 'received', 'sent', 'started', 'rejected',
|
||||
'succeeded', 'failed', 'retried', 'revoked', 'args', 'kwargs',
|
||||
'eta', 'expires', 'retries', 'worker', 'result', 'exception',
|
||||
'timestamp', 'runtime', 'traceback', 'exchange', 'routing_key',
|
||||
'clock', 'client', 'root', 'root_id', 'parent', 'parent_id',
|
||||
'children',
|
||||
)
|
||||
if not PYPY: # pragma: no cover
|
||||
__slots__ = ('__dict__', '__weakref__')
|
||||
|
||||
#: How to merge out of order events.
|
||||
#: Disorder is detected by logical ordering (e.g., :event:`task-received`
|
||||
#: must've happened before a :event:`task-failed` event).
|
||||
#:
|
||||
#: A merge rule consists of a state and a list of fields to keep from
|
||||
#: that state. ``(RECEIVED, ('name', 'args')``, means the name and args
|
||||
#: fields are always taken from the RECEIVED state, and any values for
|
||||
#: these fields received before or after is simply ignored.
|
||||
merge_rules = {
|
||||
states.RECEIVED: (
|
||||
'name', 'args', 'kwargs', 'parent_id',
|
||||
'root_id', 'retries', 'eta', 'expires',
|
||||
),
|
||||
}
|
||||
|
||||
#: meth:`info` displays these fields by default.
|
||||
_info_fields = (
|
||||
'args', 'kwargs', 'retries', 'result', 'eta', 'runtime',
|
||||
'expires', 'exception', 'exchange', 'routing_key',
|
||||
'root_id', 'parent_id',
|
||||
)
|
||||
|
||||
def __init__(self, uuid=None, cluster_state=None, children=None, **kwargs):
|
||||
self.uuid = uuid
|
||||
self.cluster_state = cluster_state
|
||||
if self.cluster_state is not None:
|
||||
self.children = WeakSet(
|
||||
self.cluster_state.tasks.get(task_id)
|
||||
for task_id in children or ()
|
||||
if task_id in self.cluster_state.tasks
|
||||
)
|
||||
else:
|
||||
self.children = WeakSet()
|
||||
self._serializer_handlers = {
|
||||
'children': self._serializable_children,
|
||||
'root': self._serializable_root,
|
||||
'parent': self._serializable_parent,
|
||||
}
|
||||
if kwargs:
|
||||
self.__dict__.update(kwargs)
|
||||
|
||||
def event(self, type_, timestamp=None, local_received=None, fields=None,
|
||||
precedence=states.precedence, setattr=setattr,
|
||||
task_event_to_state=TASK_EVENT_TO_STATE.get, RETRY=states.RETRY):
|
||||
fields = fields or {}
|
||||
|
||||
# using .get is faster than catching KeyError in this case.
|
||||
state = task_event_to_state(type_)
|
||||
if state is not None:
|
||||
# sets, for example, self.succeeded to the timestamp.
|
||||
setattr(self, type_, timestamp)
|
||||
else:
|
||||
state = type_.upper() # custom state
|
||||
|
||||
# note that precedence here is reversed
|
||||
# see implementation in celery.states.state.__lt__
|
||||
if state != RETRY and self.state != RETRY and \
|
||||
precedence(state) > precedence(self.state):
|
||||
# this state logically happens-before the current state, so merge.
|
||||
keep = self.merge_rules.get(state)
|
||||
if keep is not None:
|
||||
fields = {
|
||||
k: v for k, v in fields.items() if k in keep
|
||||
}
|
||||
else:
|
||||
fields.update(state=state, timestamp=timestamp)
|
||||
|
||||
# update current state with info from this event.
|
||||
self.__dict__.update(fields)
|
||||
|
||||
def info(self, fields=None, extra=None):
|
||||
"""Information about this task suitable for on-screen display."""
|
||||
extra = [] if not extra else extra
|
||||
fields = self._info_fields if fields is None else fields
|
||||
|
||||
def _keys():
|
||||
for key in list(fields) + list(extra):
|
||||
value = getattr(self, key, None)
|
||||
if value is not None:
|
||||
yield key, value
|
||||
|
||||
return dict(_keys())
|
||||
|
||||
def __repr__(self):
|
||||
return R_TASK.format(self)
|
||||
|
||||
def as_dict(self):
|
||||
get = object.__getattribute__
|
||||
handler = self._serializer_handlers.get
|
||||
return {
|
||||
k: handler(k, pass1)(get(self, k)) for k in self._fields
|
||||
}
|
||||
|
||||
def _serializable_children(self, value):
|
||||
return [task.id for task in self.children]
|
||||
|
||||
def _serializable_root(self, value):
|
||||
return self.root_id
|
||||
|
||||
def _serializable_parent(self, value):
|
||||
return self.parent_id
|
||||
|
||||
def __reduce__(self):
|
||||
return _depickle_task, (self.__class__, self.as_dict())
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.uuid
|
||||
|
||||
@property
|
||||
def origin(self):
|
||||
return self.client if self.worker is None else self.worker.id
|
||||
|
||||
@property
|
||||
def ready(self):
|
||||
return self.state in states.READY_STATES
|
||||
|
||||
@cached_property
|
||||
def parent(self):
|
||||
# issue github.com/mher/flower/issues/648
|
||||
try:
|
||||
return self.parent_id and self.cluster_state.tasks.data[self.parent_id]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
@cached_property
|
||||
def root(self):
|
||||
# issue github.com/mher/flower/issues/648
|
||||
try:
|
||||
return self.root_id and self.cluster_state.tasks.data[self.root_id]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
||||
class State:
|
||||
"""Records clusters state."""
|
||||
|
||||
Worker = Worker
|
||||
Task = Task
|
||||
event_count = 0
|
||||
task_count = 0
|
||||
heap_multiplier = 4
|
||||
|
||||
def __init__(self, callback=None,
|
||||
workers=None, tasks=None, taskheap=None,
|
||||
max_workers_in_memory=5000, max_tasks_in_memory=10000,
|
||||
on_node_join=None, on_node_leave=None,
|
||||
tasks_by_type=None, tasks_by_worker=None):
|
||||
self.event_callback = callback
|
||||
self.workers = (LRUCache(max_workers_in_memory)
|
||||
if workers is None else workers)
|
||||
self.tasks = (LRUCache(max_tasks_in_memory)
|
||||
if tasks is None else tasks)
|
||||
self._taskheap = [] if taskheap is None else taskheap
|
||||
self.max_workers_in_memory = max_workers_in_memory
|
||||
self.max_tasks_in_memory = max_tasks_in_memory
|
||||
self.on_node_join = on_node_join
|
||||
self.on_node_leave = on_node_leave
|
||||
self._mutex = threading.Lock()
|
||||
self.handlers = {}
|
||||
self._seen_types = set()
|
||||
self._tasks_to_resolve = {}
|
||||
self.rebuild_taskheap()
|
||||
|
||||
self.tasks_by_type = CallableDefaultdict(
|
||||
self._tasks_by_type, WeakSet) # type: Mapping[str, WeakSet[Task]]
|
||||
self.tasks_by_type.update(
|
||||
_deserialize_Task_WeakSet_Mapping(tasks_by_type, self.tasks))
|
||||
|
||||
self.tasks_by_worker = CallableDefaultdict(
|
||||
self._tasks_by_worker, WeakSet) # type: Mapping[str, WeakSet[Task]]
|
||||
self.tasks_by_worker.update(
|
||||
_deserialize_Task_WeakSet_Mapping(tasks_by_worker, self.tasks))
|
||||
|
||||
@cached_property
|
||||
def _event(self):
|
||||
return self._create_dispatcher()
|
||||
|
||||
def freeze_while(self, fun, *args, **kwargs):
|
||||
clear_after = kwargs.pop('clear_after', False)
|
||||
with self._mutex:
|
||||
try:
|
||||
return fun(*args, **kwargs)
|
||||
finally:
|
||||
if clear_after:
|
||||
self._clear()
|
||||
|
||||
def clear_tasks(self, ready=True):
|
||||
with self._mutex:
|
||||
return self._clear_tasks(ready)
|
||||
|
||||
def _clear_tasks(self, ready: bool = True):
|
||||
if ready:
|
||||
in_progress = {
|
||||
uuid: task for uuid, task in self.itertasks()
|
||||
if task.state not in states.READY_STATES
|
||||
}
|
||||
self.tasks.clear()
|
||||
self.tasks.update(in_progress)
|
||||
else:
|
||||
self.tasks.clear()
|
||||
self._taskheap[:] = []
|
||||
|
||||
def _clear(self, ready=True):
|
||||
self.workers.clear()
|
||||
self._clear_tasks(ready)
|
||||
self.event_count = 0
|
||||
self.task_count = 0
|
||||
|
||||
def clear(self, ready: bool = True):
|
||||
with self._mutex:
|
||||
return self._clear(ready)
|
||||
|
||||
def get_or_create_worker(self, hostname, **kwargs):
|
||||
"""Get or create worker by hostname.
|
||||
|
||||
Returns:
|
||||
Tuple: of ``(worker, was_created)`` pairs.
|
||||
"""
|
||||
try:
|
||||
worker = self.workers[hostname]
|
||||
if kwargs:
|
||||
worker.update(kwargs)
|
||||
return worker, False
|
||||
except KeyError:
|
||||
worker = self.workers[hostname] = self.Worker(
|
||||
hostname, **kwargs)
|
||||
return worker, True
|
||||
|
||||
def get_or_create_task(self, uuid):
|
||||
"""Get or create task by uuid."""
|
||||
try:
|
||||
return self.tasks[uuid], False
|
||||
except KeyError:
|
||||
task = self.tasks[uuid] = self.Task(uuid, cluster_state=self)
|
||||
return task, True
|
||||
|
||||
def event(self, event):
|
||||
with self._mutex:
|
||||
return self._event(event)
|
||||
|
||||
def task_event(self, type_, fields):
|
||||
"""Deprecated, use :meth:`event`."""
|
||||
return self._event(dict(fields, type='-'.join(['task', type_])))[0]
|
||||
|
||||
def worker_event(self, type_, fields):
|
||||
"""Deprecated, use :meth:`event`."""
|
||||
return self._event(dict(fields, type='-'.join(['worker', type_])))[0]
|
||||
|
||||
def _create_dispatcher(self):
|
||||
|
||||
# pylint: disable=too-many-statements
|
||||
# This code is highly optimized, but not for reusability.
|
||||
get_handler = self.handlers.__getitem__
|
||||
event_callback = self.event_callback
|
||||
wfields = itemgetter('hostname', 'timestamp', 'local_received')
|
||||
tfields = itemgetter('uuid', 'hostname', 'timestamp',
|
||||
'local_received', 'clock')
|
||||
taskheap = self._taskheap
|
||||
th_append = taskheap.append
|
||||
th_pop = taskheap.pop
|
||||
# Removing events from task heap is an O(n) operation,
|
||||
# so easier to just account for the common number of events
|
||||
# for each task (PENDING->RECEIVED->STARTED->final)
|
||||
#: an O(n) operation
|
||||
max_events_in_heap = self.max_tasks_in_memory * self.heap_multiplier
|
||||
add_type = self._seen_types.add
|
||||
on_node_join, on_node_leave = self.on_node_join, self.on_node_leave
|
||||
tasks, Task = self.tasks, self.Task
|
||||
workers, Worker = self.workers, self.Worker
|
||||
# avoid updating LRU entry at getitem
|
||||
get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__
|
||||
|
||||
get_task_by_type_set = self.tasks_by_type.__getitem__
|
||||
get_task_by_worker_set = self.tasks_by_worker.__getitem__
|
||||
|
||||
def _event(event,
|
||||
timetuple=timetuple, KeyError=KeyError,
|
||||
insort=bisect.insort, created=True):
|
||||
self.event_count += 1
|
||||
if event_callback:
|
||||
event_callback(self, event)
|
||||
group, _, subject = event['type'].partition('-')
|
||||
try:
|
||||
handler = get_handler(group)
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
return handler(subject, event), subject
|
||||
|
||||
if group == 'worker':
|
||||
try:
|
||||
hostname, timestamp, local_received = wfields(event)
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
is_offline = subject == 'offline'
|
||||
try:
|
||||
worker, created = get_worker(hostname), False
|
||||
except KeyError:
|
||||
if is_offline:
|
||||
worker, created = Worker(hostname), False
|
||||
else:
|
||||
worker = workers[hostname] = Worker(hostname)
|
||||
worker.event(subject, timestamp, local_received, event)
|
||||
if on_node_join and (created or subject == 'online'):
|
||||
on_node_join(worker)
|
||||
if on_node_leave and is_offline:
|
||||
on_node_leave(worker)
|
||||
workers.pop(hostname, None)
|
||||
return (worker, created), subject
|
||||
elif group == 'task':
|
||||
(uuid, hostname, timestamp,
|
||||
local_received, clock) = tfields(event)
|
||||
# task-sent event is sent by client, not worker
|
||||
is_client_event = subject == 'sent'
|
||||
try:
|
||||
task, task_created = get_task(uuid), False
|
||||
except KeyError:
|
||||
task = tasks[uuid] = Task(uuid, cluster_state=self)
|
||||
task_created = True
|
||||
if is_client_event:
|
||||
task.client = hostname
|
||||
else:
|
||||
try:
|
||||
worker = get_worker(hostname)
|
||||
except KeyError:
|
||||
worker = workers[hostname] = Worker(hostname)
|
||||
task.worker = worker
|
||||
if worker is not None and local_received:
|
||||
worker.event(None, local_received, timestamp)
|
||||
|
||||
origin = hostname if is_client_event else worker.id
|
||||
|
||||
# remove oldest event if exceeding the limit.
|
||||
heaps = len(taskheap)
|
||||
if heaps + 1 > max_events_in_heap:
|
||||
th_pop(0)
|
||||
|
||||
# most events will be dated later than the previous.
|
||||
timetup = timetuple(clock, timestamp, origin, ref(task))
|
||||
if heaps and timetup > taskheap[-1]:
|
||||
th_append(timetup)
|
||||
else:
|
||||
insort(taskheap, timetup)
|
||||
|
||||
if subject == 'received':
|
||||
self.task_count += 1
|
||||
task.event(subject, timestamp, local_received, event)
|
||||
task_name = task.name
|
||||
if task_name is not None:
|
||||
add_type(task_name)
|
||||
if task_created: # add to tasks_by_type index
|
||||
get_task_by_type_set(task_name).add(task)
|
||||
get_task_by_worker_set(hostname).add(task)
|
||||
if task.parent_id:
|
||||
try:
|
||||
parent_task = self.tasks[task.parent_id]
|
||||
except KeyError:
|
||||
self._add_pending_task_child(task)
|
||||
else:
|
||||
parent_task.children.add(task)
|
||||
try:
|
||||
_children = self._tasks_to_resolve.pop(uuid)
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
task.children.update(_children)
|
||||
|
||||
return (task, task_created), subject
|
||||
return _event
|
||||
|
||||
def _add_pending_task_child(self, task):
|
||||
try:
|
||||
ch = self._tasks_to_resolve[task.parent_id]
|
||||
except KeyError:
|
||||
ch = self._tasks_to_resolve[task.parent_id] = WeakSet()
|
||||
ch.add(task)
|
||||
|
||||
def rebuild_taskheap(self, timetuple=timetuple):
|
||||
heap = self._taskheap[:] = [
|
||||
timetuple(t.clock, t.timestamp, t.origin, ref(t))
|
||||
for t in self.tasks.values()
|
||||
]
|
||||
heap.sort()
|
||||
|
||||
def itertasks(self, limit: Optional[int] = None):
|
||||
for index, row in enumerate(self.tasks.items()):
|
||||
yield row
|
||||
if limit and index + 1 >= limit:
|
||||
break
|
||||
|
||||
def tasks_by_time(self, limit=None, reverse: bool = True):
|
||||
"""Generator yielding tasks ordered by time.
|
||||
|
||||
Yields:
|
||||
Tuples of ``(uuid, Task)``.
|
||||
"""
|
||||
_heap = self._taskheap
|
||||
if reverse:
|
||||
_heap = reversed(_heap)
|
||||
|
||||
seen = set()
|
||||
for evtup in islice(_heap, 0, limit):
|
||||
task = evtup[3]()
|
||||
if task is not None:
|
||||
uuid = task.uuid
|
||||
if uuid not in seen:
|
||||
yield uuid, task
|
||||
seen.add(uuid)
|
||||
tasks_by_timestamp = tasks_by_time
|
||||
|
||||
def _tasks_by_type(self, name, limit=None, reverse=True):
|
||||
"""Get all tasks by type.
|
||||
|
||||
This is slower than accessing :attr:`tasks_by_type`,
|
||||
but will be ordered by time.
|
||||
|
||||
Returns:
|
||||
Generator: giving ``(uuid, Task)`` pairs.
|
||||
"""
|
||||
return islice(
|
||||
((uuid, task) for uuid, task in self.tasks_by_time(reverse=reverse)
|
||||
if task.name == name),
|
||||
0, limit,
|
||||
)
|
||||
|
||||
def _tasks_by_worker(self, hostname, limit=None, reverse=True):
|
||||
"""Get all tasks by worker.
|
||||
|
||||
Slower than accessing :attr:`tasks_by_worker`, but ordered by time.
|
||||
"""
|
||||
return islice(
|
||||
((uuid, task) for uuid, task in self.tasks_by_time(reverse=reverse)
|
||||
if task.worker.hostname == hostname),
|
||||
0, limit,
|
||||
)
|
||||
|
||||
def task_types(self):
|
||||
"""Return a list of all seen task types."""
|
||||
return sorted(self._seen_types)
|
||||
|
||||
def alive_workers(self):
|
||||
"""Return a list of (seemingly) alive workers."""
|
||||
return (w for w in self.workers.values() if w.alive)
|
||||
|
||||
def __repr__(self):
|
||||
return R_STATE.format(self)
|
||||
|
||||
def __reduce__(self):
|
||||
return self.__class__, (
|
||||
self.event_callback, self.workers, self.tasks, None,
|
||||
self.max_workers_in_memory, self.max_tasks_in_memory,
|
||||
self.on_node_join, self.on_node_leave,
|
||||
_serialize_Task_WeakSet_Mapping(self.tasks_by_type),
|
||||
_serialize_Task_WeakSet_Mapping(self.tasks_by_worker),
|
||||
)
|
||||
|
||||
|
||||
def _serialize_Task_WeakSet_Mapping(mapping):
|
||||
return {name: [t.id for t in tasks] for name, tasks in mapping.items()}
|
||||
|
||||
|
||||
def _deserialize_Task_WeakSet_Mapping(mapping, tasks):
|
||||
mapping = mapping or {}
|
||||
return {name: WeakSet(tasks[i] for i in ids if i in tasks)
|
||||
for name, ids in mapping.items()}
|
||||
Reference in New Issue
Block a user