GNXSOFT.COM

This commit is contained in:
Iliyan Angelov
2025-09-26 00:15:37 +03:00
commit fe26b7cca4
16323 changed files with 2011881 additions and 0 deletions

View File

@@ -0,0 +1,160 @@
"""Beat command-line program.
This module is the 'program-version' of :mod:`celery.beat`.
It does everything necessary to run that module
as an actual application, like installing signal handlers
and so on.
"""
from __future__ import annotations
import numbers
import socket
import sys
from datetime import datetime
from signal import Signals
from types import FrameType
from typing import Any
from celery import VERSION_BANNER, Celery, beat, platforms
from celery.utils.imports import qualname
from celery.utils.log import LOG_LEVELS, get_logger
from celery.utils.time import humanize_seconds
__all__ = ('Beat',)
STARTUP_INFO_FMT = """
LocalTime -> {timestamp}
Configuration ->
. broker -> {conninfo}
. loader -> {loader}
. scheduler -> {scheduler}
{scheduler_info}
. logfile -> {logfile}@%{loglevel}
. maxinterval -> {hmax_interval} ({max_interval}s)
""".strip()
logger = get_logger('celery.beat')
class Beat:
"""Beat as a service."""
Service = beat.Service
app: Celery = None
def __init__(self, max_interval: int | None = None, app: Celery | None = None,
socket_timeout: int = 30, pidfile: str | None = None, no_color: bool | None = None,
loglevel: str = 'WARN', logfile: str | None = None, schedule: str | None = None,
scheduler: str | None = None,
scheduler_cls: str | None = None, # XXX use scheduler
redirect_stdouts: bool | None = None,
redirect_stdouts_level: str | None = None,
quiet: bool = False, **kwargs: Any) -> None:
self.app = app = app or self.app
either = self.app.either
self.loglevel = loglevel
self.logfile = logfile
self.schedule = either('beat_schedule_filename', schedule)
self.scheduler_cls = either(
'beat_scheduler', scheduler, scheduler_cls)
self.redirect_stdouts = either(
'worker_redirect_stdouts', redirect_stdouts)
self.redirect_stdouts_level = either(
'worker_redirect_stdouts_level', redirect_stdouts_level)
self.quiet = quiet
self.max_interval = max_interval
self.socket_timeout = socket_timeout
self.no_color = no_color
self.colored = app.log.colored(
self.logfile,
enabled=not no_color if no_color is not None else no_color,
)
self.pidfile = pidfile
if not isinstance(self.loglevel, numbers.Integral):
self.loglevel = LOG_LEVELS[self.loglevel.upper()]
def run(self) -> None:
if not self.quiet:
print(str(self.colored.cyan(
f'celery beat v{VERSION_BANNER} is starting.')))
self.init_loader()
self.set_process_title()
self.start_scheduler()
def setup_logging(self, colorize: bool | None = None) -> None:
if colorize is None and self.no_color is not None:
colorize = not self.no_color
self.app.log.setup(self.loglevel, self.logfile,
self.redirect_stdouts, self.redirect_stdouts_level,
colorize=colorize)
def start_scheduler(self) -> None:
if self.pidfile:
platforms.create_pidlock(self.pidfile)
service = self.Service(
app=self.app,
max_interval=self.max_interval,
scheduler_cls=self.scheduler_cls,
schedule_filename=self.schedule,
)
if not self.quiet:
print(self.banner(service))
self.setup_logging()
if self.socket_timeout:
logger.debug('Setting default socket timeout to %r',
self.socket_timeout)
socket.setdefaulttimeout(self.socket_timeout)
try:
self.install_sync_handler(service)
service.start()
except Exception as exc:
logger.critical('beat raised exception %s: %r',
exc.__class__, exc,
exc_info=True)
raise
def banner(self, service: beat.Service) -> str:
c = self.colored
return str(
c.blue('__ ', c.magenta('-'),
c.blue(' ... __ '), c.magenta('-'),
c.blue(' _\n'),
c.reset(self.startup_info(service))),
)
def init_loader(self) -> None:
# Run the worker init handler.
# (Usually imports task modules and such.)
self.app.loader.init_worker()
self.app.finalize()
def startup_info(self, service: beat.Service) -> str:
scheduler = service.get_scheduler(lazy=True)
return STARTUP_INFO_FMT.format(
conninfo=self.app.connection().as_uri(),
timestamp=datetime.now().replace(microsecond=0),
logfile=self.logfile or '[stderr]',
loglevel=LOG_LEVELS[self.loglevel],
loader=qualname(self.app.loader),
scheduler=qualname(scheduler),
scheduler_info=scheduler.info,
hmax_interval=humanize_seconds(scheduler.max_interval),
max_interval=scheduler.max_interval,
)
def set_process_title(self) -> None:
arg_start = 'manage' in sys.argv[0] and 2 or 1
platforms.set_process_title(
'celery beat', info=' '.join(sys.argv[arg_start:]),
)
def install_sync_handler(self, service: beat.Service) -> None:
"""Install a `SIGTERM` + `SIGINT` handler saving the schedule."""
def _sync(signum: Signals, frame: FrameType) -> None:
service.sync()
raise SystemExit()
platforms.signals.update(SIGTERM=_sync, SIGINT=_sync)

View File

@@ -0,0 +1,506 @@
"""Start/stop/manage workers."""
import errno
import os
import shlex
import signal
import sys
from collections import OrderedDict, UserList, defaultdict
from functools import partial
from subprocess import Popen
from time import sleep
from kombu.utils.encoding import from_utf8
from kombu.utils.objects import cached_property
from celery.platforms import IS_WINDOWS, Pidfile, signal_name
from celery.utils.nodenames import gethostname, host_format, node_format, nodesplit
from celery.utils.saferepr import saferepr
__all__ = ('Cluster', 'Node')
CELERY_EXE = 'celery'
def celery_exe(*args):
return ' '.join((CELERY_EXE,) + args)
def build_nodename(name, prefix, suffix):
hostname = suffix
if '@' in name:
nodename = host_format(name)
shortname, hostname = nodesplit(nodename)
name = shortname
else:
shortname = f'{prefix}{name}'
nodename = host_format(
f'{shortname}@{hostname}',
)
return name, nodename, hostname
def build_expander(nodename, shortname, hostname):
return partial(
node_format,
name=nodename,
N=shortname,
d=hostname,
h=nodename,
i='%i',
I='%I',
)
def format_opt(opt, value):
if not value:
return opt
if opt.startswith('--'):
return f'{opt}={value}'
return f'{opt} {value}'
def _kwargs_to_command_line(kwargs):
return {
('--{}'.format(k.replace('_', '-'))
if len(k) > 1 else f'-{k}'): f'{v}'
for k, v in kwargs.items()
}
class NamespacedOptionParser:
def __init__(self, args):
self.args = args
self.options = OrderedDict()
self.values = []
self.passthrough = ''
self.namespaces = defaultdict(lambda: OrderedDict())
def parse(self):
rargs = [arg for arg in self.args if arg]
pos = 0
while pos < len(rargs):
arg = rargs[pos]
if arg == '--':
self.passthrough = ' '.join(rargs[pos:])
break
elif arg[0] == '-':
if arg[1] == '-':
self.process_long_opt(arg[2:])
else:
value = None
if len(rargs) > pos + 1 and rargs[pos + 1][0] != '-':
value = rargs[pos + 1]
pos += 1
self.process_short_opt(arg[1:], value)
else:
self.values.append(arg)
pos += 1
def process_long_opt(self, arg, value=None):
if '=' in arg:
arg, value = arg.split('=', 1)
self.add_option(arg, value, short=False)
def process_short_opt(self, arg, value=None):
self.add_option(arg, value, short=True)
def optmerge(self, ns, defaults=None):
if defaults is None:
defaults = self.options
return OrderedDict(defaults, **self.namespaces[ns])
def add_option(self, name, value, short=False, ns=None):
prefix = short and '-' or '--'
dest = self.options
if ':' in name:
name, ns = name.split(':')
dest = self.namespaces[ns]
dest[prefix + name] = value
class Node:
"""Represents a node in a cluster."""
def __init__(self, name,
cmd=None, append=None, options=None, extra_args=None):
self.name = name
self.cmd = cmd or f"-m {celery_exe('worker', '--detach')}"
self.append = append
self.extra_args = extra_args or ''
self.options = self._annotate_with_default_opts(
options or OrderedDict())
self.expander = self._prepare_expander()
self.argv = self._prepare_argv()
self._pid = None
def _annotate_with_default_opts(self, options):
options['-n'] = self.name
self._setdefaultopt(options, ['--pidfile', '-p'], '/var/run/celery/%n.pid')
self._setdefaultopt(options, ['--logfile', '-f'], '/var/log/celery/%n%I.log')
self._setdefaultopt(options, ['--executable'], sys.executable)
return options
def _setdefaultopt(self, d, alt, value):
for opt in alt[1:]:
try:
return d[opt]
except KeyError:
pass
value = d.setdefault(alt[0], os.path.normpath(value))
dir_path = os.path.dirname(value)
if dir_path and not os.path.exists(dir_path):
os.makedirs(dir_path)
return value
def _prepare_expander(self):
shortname, hostname = self.name.split('@', 1)
return build_expander(
self.name, shortname, hostname)
def _prepare_argv(self):
cmd = self.expander(self.cmd).split(' ')
i = cmd.index('celery') + 1
options = self.options.copy()
for opt, value in self.options.items():
if opt in (
'-A', '--app',
'-b', '--broker',
'--result-backend',
'--loader',
'--config',
'--workdir',
'-C', '--no-color',
'-q', '--quiet',
):
cmd.insert(i, format_opt(opt, self.expander(value)))
options.pop(opt)
cmd = [' '.join(cmd)]
argv = tuple(
cmd +
[format_opt(opt, self.expander(value))
for opt, value in options.items()] +
[self.extra_args]
)
if self.append:
argv += (self.expander(self.append),)
return argv
def alive(self):
return self.send(0)
def send(self, sig, on_error=None):
pid = self.pid
if pid:
try:
os.kill(pid, sig)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
maybe_call(on_error, self)
return False
return True
maybe_call(on_error, self)
def start(self, env=None, **kwargs):
return self._waitexec(
self.argv, path=self.executable, env=env, **kwargs)
def _waitexec(self, argv, path=sys.executable, env=None,
on_spawn=None, on_signalled=None, on_failure=None):
argstr = self.prepare_argv(argv, path)
maybe_call(on_spawn, self, argstr=' '.join(argstr), env=env)
pipe = Popen(argstr, env=env)
return self.handle_process_exit(
pipe.wait(),
on_signalled=on_signalled,
on_failure=on_failure,
)
def handle_process_exit(self, retcode, on_signalled=None, on_failure=None):
if retcode < 0:
maybe_call(on_signalled, self, -retcode)
return -retcode
elif retcode > 0:
maybe_call(on_failure, self, retcode)
return retcode
def prepare_argv(self, argv, path):
args = ' '.join([path] + list(argv))
return shlex.split(from_utf8(args), posix=not IS_WINDOWS)
def getopt(self, *alt):
for opt in alt:
try:
return self.options[opt]
except KeyError:
pass
raise KeyError(alt[0])
def __repr__(self):
return f'<{type(self).__name__}: {self.name}>'
@cached_property
def pidfile(self):
return self.expander(self.getopt('--pidfile', '-p'))
@cached_property
def logfile(self):
return self.expander(self.getopt('--logfile', '-f'))
@property
def pid(self):
if self._pid is not None:
return self._pid
try:
return Pidfile(self.pidfile).read_pid()
except ValueError:
pass
@pid.setter
def pid(self, value):
self._pid = value
@cached_property
def executable(self):
return self.options['--executable']
@cached_property
def argv_with_executable(self):
return (self.executable,) + self.argv
@classmethod
def from_kwargs(cls, name, **kwargs):
return cls(name, options=_kwargs_to_command_line(kwargs))
def maybe_call(fun, *args, **kwargs):
if fun is not None:
fun(*args, **kwargs)
class MultiParser:
Node = Node
def __init__(self, cmd='celery worker',
append='', prefix='', suffix='',
range_prefix='celery'):
self.cmd = cmd
self.append = append
self.prefix = prefix
self.suffix = suffix
self.range_prefix = range_prefix
def parse(self, p):
names = p.values
options = dict(p.options)
ranges = len(names) == 1
prefix = self.prefix
cmd = options.pop('--cmd', self.cmd)
append = options.pop('--append', self.append)
hostname = options.pop('--hostname', options.pop('-n', gethostname()))
prefix = options.pop('--prefix', prefix) or ''
suffix = options.pop('--suffix', self.suffix) or hostname
suffix = '' if suffix in ('""', "''") else suffix
range_prefix = options.pop('--range-prefix', '') or self.range_prefix
if ranges:
try:
names, prefix = self._get_ranges(names), range_prefix
except ValueError:
pass
self._update_ns_opts(p, names)
self._update_ns_ranges(p, ranges)
return (
self._node_from_options(
p, name, prefix, suffix, cmd, append, options)
for name in names
)
def _node_from_options(self, p, name, prefix,
suffix, cmd, append, options):
namespace, nodename, _ = build_nodename(name, prefix, suffix)
namespace = nodename if nodename in p.namespaces else namespace
return Node(nodename, cmd, append,
p.optmerge(namespace, options), p.passthrough)
def _get_ranges(self, names):
noderange = int(names[0])
return [str(n) for n in range(1, noderange + 1)]
def _update_ns_opts(self, p, names):
# Numbers in args always refers to the index in the list of names.
# (e.g., `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on).
for ns_name, ns_opts in list(p.namespaces.items()):
if ns_name.isdigit():
ns_index = int(ns_name) - 1
if ns_index < 0:
raise KeyError(f'Indexes start at 1 got: {ns_name!r}')
try:
p.namespaces[names[ns_index]].update(ns_opts)
except IndexError:
raise KeyError(f'No node at index {ns_name!r}')
def _update_ns_ranges(self, p, ranges):
for ns_name, ns_opts in list(p.namespaces.items()):
if ',' in ns_name or (ranges and '-' in ns_name):
for subns in self._parse_ns_range(ns_name, ranges):
p.namespaces[subns].update(ns_opts)
p.namespaces.pop(ns_name)
def _parse_ns_range(self, ns, ranges=False):
ret = []
for space in ',' in ns and ns.split(',') or [ns]:
if ranges and '-' in space:
start, stop = space.split('-')
ret.extend(
str(n) for n in range(int(start), int(stop) + 1)
)
else:
ret.append(space)
return ret
class Cluster(UserList):
"""Represent a cluster of workers."""
def __init__(self, nodes, cmd=None, env=None,
on_stopping_preamble=None,
on_send_signal=None,
on_still_waiting_for=None,
on_still_waiting_progress=None,
on_still_waiting_end=None,
on_node_start=None,
on_node_restart=None,
on_node_shutdown_ok=None,
on_node_status=None,
on_node_signal=None,
on_node_signal_dead=None,
on_node_down=None,
on_child_spawn=None,
on_child_signalled=None,
on_child_failure=None):
self.nodes = nodes
self.cmd = cmd or celery_exe('worker')
self.env = env
self.on_stopping_preamble = on_stopping_preamble
self.on_send_signal = on_send_signal
self.on_still_waiting_for = on_still_waiting_for
self.on_still_waiting_progress = on_still_waiting_progress
self.on_still_waiting_end = on_still_waiting_end
self.on_node_start = on_node_start
self.on_node_restart = on_node_restart
self.on_node_shutdown_ok = on_node_shutdown_ok
self.on_node_status = on_node_status
self.on_node_signal = on_node_signal
self.on_node_signal_dead = on_node_signal_dead
self.on_node_down = on_node_down
self.on_child_spawn = on_child_spawn
self.on_child_signalled = on_child_signalled
self.on_child_failure = on_child_failure
def start(self):
return [self.start_node(node) for node in self]
def start_node(self, node):
maybe_call(self.on_node_start, node)
retcode = self._start_node(node)
maybe_call(self.on_node_status, node, retcode)
return retcode
def _start_node(self, node):
return node.start(
self.env,
on_spawn=self.on_child_spawn,
on_signalled=self.on_child_signalled,
on_failure=self.on_child_failure,
)
def send_all(self, sig):
for node in self.getpids(on_down=self.on_node_down):
maybe_call(self.on_node_signal, node, signal_name(sig))
node.send(sig, self.on_node_signal_dead)
def kill(self):
return self.send_all(signal.SIGKILL)
def restart(self, sig=signal.SIGTERM):
retvals = []
def restart_on_down(node):
maybe_call(self.on_node_restart, node)
retval = self._start_node(node)
maybe_call(self.on_node_status, node, retval)
retvals.append(retval)
self._stop_nodes(retry=2, on_down=restart_on_down, sig=sig)
return retvals
def stop(self, retry=None, callback=None, sig=signal.SIGTERM):
return self._stop_nodes(retry=retry, on_down=callback, sig=sig)
def stopwait(self, retry=2, callback=None, sig=signal.SIGTERM):
return self._stop_nodes(retry=retry, on_down=callback, sig=sig)
def _stop_nodes(self, retry=None, on_down=None, sig=signal.SIGTERM):
on_down = on_down if on_down is not None else self.on_node_down
nodes = list(self.getpids(on_down=on_down))
if nodes:
for node in self.shutdown_nodes(nodes, sig=sig, retry=retry):
maybe_call(on_down, node)
def shutdown_nodes(self, nodes, sig=signal.SIGTERM, retry=None):
P = set(nodes)
maybe_call(self.on_stopping_preamble, nodes)
to_remove = set()
for node in P:
maybe_call(self.on_send_signal, node, signal_name(sig))
if not node.send(sig, self.on_node_signal_dead):
to_remove.add(node)
yield node
P -= to_remove
if retry:
maybe_call(self.on_still_waiting_for, P)
its = 0
while P:
to_remove = set()
for node in P:
its += 1
maybe_call(self.on_still_waiting_progress, P)
if not node.alive():
maybe_call(self.on_node_shutdown_ok, node)
to_remove.add(node)
yield node
maybe_call(self.on_still_waiting_for, P)
break
P -= to_remove
if P and not its % len(P):
sleep(float(retry))
maybe_call(self.on_still_waiting_end)
def find(self, name):
for node in self:
if node.name == name:
return node
raise KeyError(name)
def getpids(self, on_down=None):
for node in self:
if node.pid:
yield node
else:
maybe_call(on_down, node)
def __repr__(self):
return '<{name}({0}): {1}>'.format(
len(self), saferepr([n.name for n in self]),
name=type(self).__name__,
)
@property
def data(self):
return self.nodes

View File

@@ -0,0 +1,387 @@
"""Worker command-line program.
This module is the 'program-version' of :mod:`celery.worker`.
It does everything necessary to run that module
as an actual application, like installing signal handlers,
platform tweaks, and so on.
"""
import logging
import os
import platform as _platform
import sys
from datetime import datetime
from functools import partial
from billiard.common import REMAP_SIGTERM
from billiard.process import current_process
from kombu.utils.encoding import safe_str
from celery import VERSION_BANNER, platforms, signals
from celery.app import trace
from celery.loaders.app import AppLoader
from celery.platforms import EX_FAILURE, EX_OK, check_privileges
from celery.utils import static, term
from celery.utils.debug import cry
from celery.utils.imports import qualname
from celery.utils.log import get_logger, in_sighandler, set_in_sighandler
from celery.utils.text import pluralize
from celery.worker import WorkController
__all__ = ('Worker',)
logger = get_logger(__name__)
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
ARTLINES = [
' --------------',
'--- ***** -----',
'-- ******* ----',
'- *** --- * ---',
'- ** ----------',
'- ** ----------',
'- ** ----------',
'- ** ----------',
'- *** --- * ---',
'-- ******* ----',
'--- ***** -----',
' --------------',
]
BANNER = """\
{hostname} v{version}
{platform} {timestamp}
[config]
.> app: {app}
.> transport: {conninfo}
.> results: {results}
.> concurrency: {concurrency}
.> task events: {events}
[queues]
{queues}
"""
EXTRA_INFO_FMT = """
[tasks]
{tasks}
"""
def active_thread_count():
from threading import enumerate
return sum(1 for t in enumerate()
if not t.name.startswith('Dummy-'))
def safe_say(msg):
print(f'\n{msg}', file=sys.__stderr__, flush=True)
class Worker(WorkController):
"""Worker as a program."""
def on_before_init(self, quiet=False, **kwargs):
self.quiet = quiet
trace.setup_worker_optimizations(self.app, self.hostname)
# this signal can be used to set up configuration for
# workers by name.
signals.celeryd_init.send(
sender=self.hostname, instance=self,
conf=self.app.conf, options=kwargs,
)
check_privileges(self.app.conf.accept_content)
def on_after_init(self, purge=False, no_color=None,
redirect_stdouts=None, redirect_stdouts_level=None,
**kwargs):
self.redirect_stdouts = self.app.either(
'worker_redirect_stdouts', redirect_stdouts)
self.redirect_stdouts_level = self.app.either(
'worker_redirect_stdouts_level', redirect_stdouts_level)
super().setup_defaults(**kwargs)
self.purge = purge
self.no_color = no_color
self._isatty = sys.stdout.isatty()
self.colored = self.app.log.colored(
self.logfile,
enabled=not no_color if no_color is not None else no_color
)
def on_init_blueprint(self):
self._custom_logging = self.setup_logging()
# apply task execution optimizations
# -- This will finalize the app!
trace.setup_worker_optimizations(self.app, self.hostname)
def on_start(self):
app = self.app
super().on_start()
# this signal can be used to, for example, change queues after
# the -Q option has been applied.
signals.celeryd_after_setup.send(
sender=self.hostname, instance=self, conf=app.conf,
)
if self.purge:
self.purge_messages()
if not self.quiet:
self.emit_banner()
self.set_process_status('-active-')
self.install_platform_tweaks(self)
if not self._custom_logging and self.redirect_stdouts:
app.log.redirect_stdouts(self.redirect_stdouts_level)
# TODO: Remove the following code in Celery 6.0
# This qualifies as a hack for issue #6366.
warn_deprecated = True
config_source = app._config_source
if isinstance(config_source, str):
# Don't raise the warning when the settings originate from
# django.conf:settings
warn_deprecated = config_source.lower() not in [
'django.conf:settings',
]
if warn_deprecated:
if app.conf.maybe_warn_deprecated_settings():
logger.warning(
"Please run `celery upgrade settings path/to/settings.py` "
"to avoid these warnings and to allow a smoother upgrade "
"to Celery 6.0."
)
def emit_banner(self):
# Dump configuration to screen so we have some basic information
# for when users sends bug reports.
use_image = term.supports_images()
if use_image:
print(term.imgcat(static.logo()))
print(safe_str(''.join([
str(self.colored.cyan(
' \n', self.startup_info(artlines=not use_image))),
str(self.colored.reset(self.extra_info() or '')),
])), file=sys.__stdout__, flush=True)
def on_consumer_ready(self, consumer):
signals.worker_ready.send(sender=consumer)
logger.info('%s ready.', safe_str(self.hostname))
def setup_logging(self, colorize=None):
if colorize is None and self.no_color is not None:
colorize = not self.no_color
return self.app.log.setup(
self.loglevel, self.logfile,
redirect_stdouts=False, colorize=colorize, hostname=self.hostname,
)
def purge_messages(self):
with self.app.connection_for_write() as connection:
count = self.app.control.purge(connection=connection)
if count: # pragma: no cover
print(f"purge: Erased {count} {pluralize(count, 'message')} from the queue.\n", flush=True)
def tasklist(self, include_builtins=True, sep='\n', int_='celery.'):
return sep.join(
f' . {task}' for task in sorted(self.app.tasks)
if (not task.startswith(int_) if not include_builtins else task)
)
def extra_info(self):
if self.loglevel is None:
return
if self.loglevel <= logging.INFO:
include_builtins = self.loglevel <= logging.DEBUG
tasklist = self.tasklist(include_builtins=include_builtins)
return EXTRA_INFO_FMT.format(tasks=tasklist)
def startup_info(self, artlines=True):
app = self.app
concurrency = str(self.concurrency)
appr = '{}:{:#x}'.format(app.main or '__main__', id(app))
if not isinstance(app.loader, AppLoader):
loader = qualname(app.loader)
if loader.startswith('celery.loaders'): # pragma: no cover
loader = loader[14:]
appr += f' ({loader})'
if self.autoscale:
max, min = self.autoscale
concurrency = f'{{min={min}, max={max}}}'
pool = self.pool_cls
if not isinstance(pool, str):
pool = pool.__module__
concurrency += f" ({pool.split('.')[-1]})"
events = 'ON'
if not self.task_events:
events = 'OFF (enable -E to monitor tasks in this worker)'
banner = BANNER.format(
app=appr,
hostname=safe_str(self.hostname),
timestamp=datetime.now().replace(microsecond=0),
version=VERSION_BANNER,
conninfo=self.app.connection().as_uri(),
results=self.app.backend.as_uri(),
concurrency=concurrency,
platform=safe_str(_platform.platform()),
events=events,
queues=app.amqp.queues.format(indent=0, indent_first=False),
).splitlines()
# integrate the ASCII art.
if artlines:
for i, _ in enumerate(banner):
try:
banner[i] = ' '.join([ARTLINES[i], banner[i]])
except IndexError:
banner[i] = ' ' * 16 + banner[i]
return '\n'.join(banner) + '\n'
def install_platform_tweaks(self, worker):
"""Install platform specific tweaks and workarounds."""
if self.app.IS_macOS:
self.macOS_proxy_detection_workaround()
# Install signal handler so SIGHUP restarts the worker.
if not self._isatty:
# only install HUP handler if detached from terminal,
# so closing the terminal window doesn't restart the worker
# into the background.
if self.app.IS_macOS:
# macOS can't exec from a process using threads.
# See https://github.com/celery/celery/issues#issue/152
install_HUP_not_supported_handler(worker)
else:
install_worker_restart_handler(worker)
install_worker_term_handler(worker)
install_worker_term_hard_handler(worker)
install_worker_int_handler(worker)
install_cry_handler()
install_rdb_handler()
def macOS_proxy_detection_workaround(self):
"""See https://github.com/celery/celery/issues#issue/161."""
os.environ.setdefault('celery_dummy_proxy', 'set_by_celeryd')
def set_process_status(self, info):
return platforms.set_mp_process_title(
'celeryd',
info=f'{info} ({platforms.strargv(sys.argv)})',
hostname=self.hostname,
)
def _shutdown_handler(worker, sig='TERM', how='Warm',
callback=None, exitcode=EX_OK):
def _handle_request(*args):
with in_sighandler():
from celery.worker import state
if current_process()._name == 'MainProcess':
if callback:
callback(worker)
safe_say(f'worker: {how} shutdown (MainProcess)')
signals.worker_shutting_down.send(
sender=worker.hostname, sig=sig, how=how,
exitcode=exitcode,
)
setattr(state, {'Warm': 'should_stop',
'Cold': 'should_terminate'}[how], exitcode)
_handle_request.__name__ = str(f'worker_{how}')
platforms.signals[sig] = _handle_request
if REMAP_SIGTERM == "SIGQUIT":
install_worker_term_handler = partial(
_shutdown_handler, sig='SIGTERM', how='Cold', exitcode=EX_FAILURE,
)
else:
install_worker_term_handler = partial(
_shutdown_handler, sig='SIGTERM', how='Warm',
)
if not is_jython: # pragma: no cover
install_worker_term_hard_handler = partial(
_shutdown_handler, sig='SIGQUIT', how='Cold',
exitcode=EX_FAILURE,
)
else: # pragma: no cover
install_worker_term_handler = \
install_worker_term_hard_handler = lambda *a, **kw: None
def on_SIGINT(worker):
safe_say('worker: Hitting Ctrl+C again will terminate all running tasks!')
install_worker_term_hard_handler(worker, sig='SIGINT')
if not is_jython: # pragma: no cover
install_worker_int_handler = partial(
_shutdown_handler, sig='SIGINT', callback=on_SIGINT,
exitcode=EX_FAILURE,
)
else: # pragma: no cover
def install_worker_int_handler(*args, **kwargs):
pass
def _reload_current_worker():
platforms.close_open_fds([
sys.__stdin__, sys.__stdout__, sys.__stderr__,
])
os.execv(sys.executable, [sys.executable] + sys.argv)
def install_worker_restart_handler(worker, sig='SIGHUP'):
def restart_worker_sig_handler(*args):
"""Signal handler restarting the current python program."""
set_in_sighandler(True)
safe_say(f"Restarting celery worker ({' '.join(sys.argv)})")
import atexit
atexit.register(_reload_current_worker)
from celery.worker import state
state.should_stop = EX_OK
platforms.signals[sig] = restart_worker_sig_handler
def install_cry_handler(sig='SIGUSR1'):
# PyPy does not have sys._current_frames
if is_pypy: # pragma: no cover
return
def cry_handler(*args):
"""Signal handler logging the stack-trace of all active threads."""
with in_sighandler():
safe_say(cry())
platforms.signals[sig] = cry_handler
def install_rdb_handler(envvar='CELERY_RDBSIG',
sig='SIGUSR2'): # pragma: no cover
def rdb_handler(*args):
"""Signal handler setting a rdb breakpoint at the current frame."""
with in_sighandler():
from celery.contrib.rdb import _frame, set_trace
# gevent does not pass standard signal handler args
frame = args[1] if args else _frame().f_back
set_trace(frame)
if os.environ.get(envvar):
platforms.signals[sig] = rdb_handler
def install_HUP_not_supported_handler(worker, sig='SIGHUP'):
def warn_on_HUP_handler(signum, frame):
with in_sighandler():
safe_say('{sig} not supported: Restarting with {sig} is '
'unstable on this platform!'.format(sig=sig))
platforms.signals[sig] = warn_on_HUP_handler