Updates
This commit is contained in:
@@ -0,0 +1,61 @@
|
||||
"""Python multiprocessing fork with improvements and bugfixes"""
|
||||
#
|
||||
# Package analogous to 'threading.py' but using processes
|
||||
#
|
||||
# multiprocessing/__init__.py
|
||||
#
|
||||
# This package is intended to duplicate the functionality (and much of
|
||||
# the API) of threading.py but uses processes instead of threads. A
|
||||
# subpackage 'multiprocessing.dummy' has the same API but is a simple
|
||||
# wrapper for 'threading'.
|
||||
#
|
||||
# Try calling `multiprocessing.doc.main()` to read the html
|
||||
# documentation in a webbrowser.
|
||||
#
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
from . import context
|
||||
|
||||
VERSION = (4, 2, 1)
|
||||
__version__ = '.'.join(map(str, VERSION[0:4])) + "".join(VERSION[4:])
|
||||
__author__ = 'R Oudkerk / Python Software Foundation'
|
||||
__author_email__ = 'python-dev@python.org'
|
||||
__maintainer__ = 'Asif Saif Uddin'
|
||||
__contact__ = "auvipy@gmail.com"
|
||||
__homepage__ = "https://github.com/celery/billiard"
|
||||
__docformat__ = "restructuredtext"
|
||||
|
||||
# -eof meta-
|
||||
|
||||
#
|
||||
# Copy stuff from default context
|
||||
#
|
||||
|
||||
globals().update((name, getattr(context._default_context, name))
|
||||
for name in context._default_context.__all__)
|
||||
__all__ = context._default_context.__all__
|
||||
|
||||
#
|
||||
# XXX These should not really be documented or public.
|
||||
#
|
||||
|
||||
SUBDEBUG = 5
|
||||
SUBWARNING = 25
|
||||
|
||||
#
|
||||
# Alias for main module -- will be reset by bootstrapping child processes
|
||||
#
|
||||
|
||||
if '__main__' in sys.modules:
|
||||
sys.modules['__mp_main__'] = sys.modules['__main__']
|
||||
|
||||
|
||||
def ensure_multiprocessing():
|
||||
from ._ext import ensure_multiprocessing
|
||||
return ensure_multiprocessing()
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
32
ETB-API/venv/lib/python3.12/site-packages/billiard/_ext.py
Normal file
32
ETB-API/venv/lib/python3.12/site-packages/billiard/_ext.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import sys
|
||||
|
||||
supports_exec = True
|
||||
|
||||
from .compat import _winapi as win32 # noqa
|
||||
|
||||
if sys.platform.startswith("java"):
|
||||
_billiard = None
|
||||
else:
|
||||
try:
|
||||
import _billiard # noqa
|
||||
except ImportError:
|
||||
import _multiprocessing as _billiard # noqa
|
||||
supports_exec = False
|
||||
|
||||
|
||||
def ensure_multiprocessing():
|
||||
if _billiard is None:
|
||||
raise NotImplementedError("multiprocessing not supported")
|
||||
|
||||
|
||||
def ensure_SemLock():
|
||||
try:
|
||||
from _billiard import SemLock # noqa
|
||||
except ImportError:
|
||||
try:
|
||||
from _multiprocessing import SemLock # noqa
|
||||
except ImportError:
|
||||
raise ImportError("""\
|
||||
This platform lacks a functioning sem_open implementation, therefore,
|
||||
the required synchronization primitives needed will not function,
|
||||
see issue 3770.""")
|
||||
114
ETB-API/venv/lib/python3.12/site-packages/billiard/_win.py
Normal file
114
ETB-API/venv/lib/python3.12/site-packages/billiard/_win.py
Normal file
@@ -0,0 +1,114 @@
|
||||
"""
|
||||
billiard._win
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Windows utilities to terminate process groups.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
# psutil is painfully slow in win32. So to avoid adding big
|
||||
# dependencies like pywin32 a ctypes based solution is preferred
|
||||
|
||||
# Code based on the winappdbg project http://winappdbg.sourceforge.net/
|
||||
# (BSD License)
|
||||
from ctypes import (
|
||||
byref, sizeof, windll,
|
||||
Structure, WinError, POINTER,
|
||||
c_size_t, c_char, c_void_p,
|
||||
)
|
||||
from ctypes.wintypes import DWORD, LONG
|
||||
|
||||
ERROR_NO_MORE_FILES = 18
|
||||
INVALID_HANDLE_VALUE = c_void_p(-1).value
|
||||
|
||||
|
||||
class PROCESSENTRY32(Structure):
|
||||
_fields_ = [
|
||||
('dwSize', DWORD),
|
||||
('cntUsage', DWORD),
|
||||
('th32ProcessID', DWORD),
|
||||
('th32DefaultHeapID', c_size_t),
|
||||
('th32ModuleID', DWORD),
|
||||
('cntThreads', DWORD),
|
||||
('th32ParentProcessID', DWORD),
|
||||
('pcPriClassBase', LONG),
|
||||
('dwFlags', DWORD),
|
||||
('szExeFile', c_char * 260),
|
||||
]
|
||||
LPPROCESSENTRY32 = POINTER(PROCESSENTRY32)
|
||||
|
||||
|
||||
def CreateToolhelp32Snapshot(dwFlags=2, th32ProcessID=0):
|
||||
hSnapshot = windll.kernel32.CreateToolhelp32Snapshot(dwFlags,
|
||||
th32ProcessID)
|
||||
if hSnapshot == INVALID_HANDLE_VALUE:
|
||||
raise WinError()
|
||||
return hSnapshot
|
||||
|
||||
|
||||
def Process32First(hSnapshot, pe=None):
|
||||
return _Process32n(windll.kernel32.Process32First, hSnapshot, pe)
|
||||
|
||||
|
||||
def Process32Next(hSnapshot, pe=None):
|
||||
return _Process32n(windll.kernel32.Process32Next, hSnapshot, pe)
|
||||
|
||||
|
||||
def _Process32n(fun, hSnapshot, pe=None):
|
||||
if pe is None:
|
||||
pe = PROCESSENTRY32()
|
||||
pe.dwSize = sizeof(PROCESSENTRY32)
|
||||
success = fun(hSnapshot, byref(pe))
|
||||
if not success:
|
||||
if windll.kernel32.GetLastError() == ERROR_NO_MORE_FILES:
|
||||
return
|
||||
raise WinError()
|
||||
return pe
|
||||
|
||||
|
||||
def get_all_processes_pids():
|
||||
"""Return a dictionary with all processes pids as keys and their
|
||||
parents as value. Ignore processes with no parents.
|
||||
"""
|
||||
h = CreateToolhelp32Snapshot()
|
||||
parents = {}
|
||||
pe = Process32First(h)
|
||||
while pe:
|
||||
if pe.th32ParentProcessID:
|
||||
parents[pe.th32ProcessID] = pe.th32ParentProcessID
|
||||
pe = Process32Next(h, pe)
|
||||
|
||||
return parents
|
||||
|
||||
|
||||
def get_processtree_pids(pid, include_parent=True):
|
||||
"""Return a list with all the pids of a process tree"""
|
||||
parents = get_all_processes_pids()
|
||||
all_pids = list(parents.keys())
|
||||
pids = {pid}
|
||||
while 1:
|
||||
pids_new = pids.copy()
|
||||
|
||||
for _pid in all_pids:
|
||||
if parents[_pid] in pids:
|
||||
pids_new.add(_pid)
|
||||
|
||||
if pids_new == pids:
|
||||
break
|
||||
|
||||
pids = pids_new.copy()
|
||||
|
||||
if not include_parent:
|
||||
pids.remove(pid)
|
||||
|
||||
return list(pids)
|
||||
|
||||
|
||||
def kill_processtree(pid, signum):
|
||||
"""Kill a process and all its descendants"""
|
||||
family_pids = get_processtree_pids(pid)
|
||||
|
||||
for _pid in family_pids:
|
||||
os.kill(_pid, signum)
|
||||
156
ETB-API/venv/lib/python3.12/site-packages/billiard/common.py
Normal file
156
ETB-API/venv/lib/python3.12/site-packages/billiard/common.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""
|
||||
This module contains utilities added by billiard, to keep
|
||||
"non-core" functionality out of ``.util``."""
|
||||
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
|
||||
import pickle
|
||||
|
||||
from .exceptions import RestartFreqExceeded
|
||||
from time import monotonic
|
||||
|
||||
pickle_load = pickle.load
|
||||
pickle_loads = pickle.loads
|
||||
|
||||
# cPickle.loads does not support buffer() objects,
|
||||
# but we can just create a StringIO and use load.
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
SIGMAP = dict(
|
||||
(getattr(signal, n), n) for n in dir(signal) if n.startswith('SIG')
|
||||
)
|
||||
for _alias_sig in ('SIGHUP', 'SIGABRT'):
|
||||
try:
|
||||
# Alias for deprecated signal overwrites the name we want
|
||||
SIGMAP[getattr(signal, _alias_sig)] = _alias_sig
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
TERM_SIGNAL, TERM_SIGNAME = signal.SIGTERM, 'SIGTERM'
|
||||
REMAP_SIGTERM = os.environ.get('REMAP_SIGTERM')
|
||||
if REMAP_SIGTERM:
|
||||
TERM_SIGNAL, TERM_SIGNAME = (
|
||||
getattr(signal, REMAP_SIGTERM), REMAP_SIGTERM)
|
||||
|
||||
|
||||
TERMSIGS_IGNORE = {'SIGTERM'} if REMAP_SIGTERM else set()
|
||||
TERMSIGS_FORCE = {'SIGQUIT'} if REMAP_SIGTERM else set()
|
||||
|
||||
EX_SOFTWARE = 70
|
||||
|
||||
TERMSIGS_DEFAULT = {
|
||||
'SIGHUP',
|
||||
'SIGQUIT',
|
||||
TERM_SIGNAME,
|
||||
'SIGUSR1',
|
||||
}
|
||||
|
||||
TERMSIGS_FULL = {
|
||||
'SIGHUP',
|
||||
'SIGQUIT',
|
||||
'SIGTRAP',
|
||||
'SIGABRT',
|
||||
'SIGEMT',
|
||||
'SIGSYS',
|
||||
'SIGPIPE',
|
||||
'SIGALRM',
|
||||
TERM_SIGNAME,
|
||||
'SIGXCPU',
|
||||
'SIGXFSZ',
|
||||
'SIGVTALRM',
|
||||
'SIGPROF',
|
||||
'SIGUSR1',
|
||||
'SIGUSR2',
|
||||
}
|
||||
|
||||
#: set by signal handlers just before calling exit.
|
||||
#: if this is true after the sighandler returns it means that something
|
||||
#: went wrong while terminating the process, and :func:`os._exit`
|
||||
#: must be called ASAP.
|
||||
_should_have_exited = [False]
|
||||
|
||||
|
||||
def human_status(status):
|
||||
if (status or 0) < 0:
|
||||
try:
|
||||
return 'signal {0} ({1})'.format(-status, SIGMAP[-status])
|
||||
except KeyError:
|
||||
return 'signal {0}'.format(-status)
|
||||
return 'exitcode {0}'.format(status)
|
||||
|
||||
|
||||
def pickle_loads(s, load=pickle_load):
|
||||
# used to support buffer objects
|
||||
return load(BytesIO(s))
|
||||
|
||||
|
||||
def maybe_setsignal(signum, handler):
|
||||
try:
|
||||
signal.signal(signum, handler)
|
||||
except (OSError, AttributeError, ValueError, RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
def _shutdown_cleanup(signum, frame):
|
||||
# we will exit here so if the signal is received a second time
|
||||
# we can be sure that something is very wrong and we may be in
|
||||
# a crashing loop.
|
||||
if _should_have_exited[0]:
|
||||
os._exit(EX_SOFTWARE)
|
||||
maybe_setsignal(signum, signal.SIG_DFL)
|
||||
_should_have_exited[0] = True
|
||||
sys.exit(-(256 - signum))
|
||||
|
||||
|
||||
def signum(sig):
|
||||
return getattr(signal, sig, None)
|
||||
|
||||
|
||||
def _should_override_term_signal(sig, current):
|
||||
return (
|
||||
sig in TERMSIGS_FORCE or
|
||||
(current is not None and current != signal.SIG_IGN)
|
||||
)
|
||||
|
||||
|
||||
def reset_signals(handler=_shutdown_cleanup, full=False):
|
||||
for sig in TERMSIGS_FULL if full else TERMSIGS_DEFAULT:
|
||||
num = signum(sig)
|
||||
if num:
|
||||
if _should_override_term_signal(sig, signal.getsignal(num)):
|
||||
maybe_setsignal(num, handler)
|
||||
for sig in TERMSIGS_IGNORE:
|
||||
num = signum(sig)
|
||||
if num:
|
||||
maybe_setsignal(num, signal.SIG_IGN)
|
||||
|
||||
|
||||
class restart_state:
|
||||
RestartFreqExceeded = RestartFreqExceeded
|
||||
|
||||
def __init__(self, maxR, maxT):
|
||||
self.maxR, self.maxT = maxR, maxT
|
||||
self.R, self.T = 0, None
|
||||
|
||||
def step(self, now=None):
|
||||
now = monotonic() if now is None else now
|
||||
R = self.R
|
||||
if self.T and now - self.T >= self.maxT:
|
||||
# maxT passed, reset counter and time passed.
|
||||
self.T, self.R = now, 0
|
||||
elif self.maxR and self.R >= self.maxR:
|
||||
# verify that R has a value as the result handler
|
||||
# resets this when a job is accepted. If a job is accepted
|
||||
# the startup probably went fine (startup restart burst
|
||||
# protection)
|
||||
if self.R: # pragma: no cover
|
||||
self.R = 0 # reset in case someone catches the error
|
||||
raise self.RestartFreqExceeded("%r in %rs" % (R, self.maxT))
|
||||
# first run sets T
|
||||
if self.T is None:
|
||||
self.T = now
|
||||
self.R += 1
|
||||
279
ETB-API/venv/lib/python3.12/site-packages/billiard/compat.py
Normal file
279
ETB-API/venv/lib/python3.12/site-packages/billiard/compat.py
Normal file
@@ -0,0 +1,279 @@
|
||||
import errno
|
||||
import numbers
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from itertools import zip_longest
|
||||
|
||||
if sys.platform == 'win32':
|
||||
try:
|
||||
import _winapi # noqa
|
||||
except ImportError: # pragma: no cover
|
||||
from _multiprocessing import win32 as _winapi # noqa
|
||||
else:
|
||||
_winapi = None # noqa
|
||||
|
||||
try:
|
||||
import resource
|
||||
except ImportError: # pragma: no cover
|
||||
resource = None
|
||||
|
||||
from io import UnsupportedOperation
|
||||
FILENO_ERRORS = (AttributeError, ValueError, UnsupportedOperation)
|
||||
|
||||
|
||||
if hasattr(os, 'write'):
|
||||
__write__ = os.write
|
||||
|
||||
def send_offset(fd, buf, offset):
|
||||
return __write__(fd, buf[offset:])
|
||||
|
||||
else: # non-posix platform
|
||||
|
||||
def send_offset(fd, buf, offset): # noqa
|
||||
raise NotImplementedError('send_offset')
|
||||
|
||||
|
||||
try:
|
||||
fsencode = os.fsencode
|
||||
fsdecode = os.fsdecode
|
||||
except AttributeError:
|
||||
def _fscodec():
|
||||
encoding = sys.getfilesystemencoding()
|
||||
if encoding == 'mbcs':
|
||||
errors = 'strict'
|
||||
else:
|
||||
errors = 'surrogateescape'
|
||||
|
||||
def fsencode(filename):
|
||||
"""
|
||||
Encode filename to the filesystem encoding with 'surrogateescape'
|
||||
error handler, return bytes unchanged. On Windows, use 'strict'
|
||||
error handler if the file system encoding is 'mbcs' (which is the
|
||||
default encoding).
|
||||
"""
|
||||
if isinstance(filename, bytes):
|
||||
return filename
|
||||
elif isinstance(filename, str):
|
||||
return filename.encode(encoding, errors)
|
||||
else:
|
||||
raise TypeError("expect bytes or str, not %s"
|
||||
% type(filename).__name__)
|
||||
|
||||
def fsdecode(filename):
|
||||
"""
|
||||
Decode filename from the filesystem encoding with 'surrogateescape'
|
||||
error handler, return str unchanged. On Windows, use 'strict' error
|
||||
handler if the file system encoding is 'mbcs' (which is the default
|
||||
encoding).
|
||||
"""
|
||||
if isinstance(filename, str):
|
||||
return filename
|
||||
elif isinstance(filename, bytes):
|
||||
return filename.decode(encoding, errors)
|
||||
else:
|
||||
raise TypeError("expect bytes or str, not %s"
|
||||
% type(filename).__name__)
|
||||
|
||||
return fsencode, fsdecode
|
||||
|
||||
fsencode, fsdecode = _fscodec()
|
||||
del _fscodec
|
||||
|
||||
|
||||
def maybe_fileno(f):
|
||||
"""Get object fileno, or :const:`None` if not defined."""
|
||||
if isinstance(f, numbers.Integral):
|
||||
return f
|
||||
try:
|
||||
return f.fileno()
|
||||
except FILENO_ERRORS:
|
||||
pass
|
||||
|
||||
|
||||
def get_fdmax(default=None):
|
||||
"""Return the maximum number of open file descriptors
|
||||
on this system.
|
||||
|
||||
:keyword default: Value returned if there's no file
|
||||
descriptor limit.
|
||||
|
||||
"""
|
||||
try:
|
||||
return os.sysconf('SC_OPEN_MAX')
|
||||
except:
|
||||
pass
|
||||
if resource is None: # Windows
|
||||
return default
|
||||
fdmax = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
|
||||
if fdmax == resource.RLIM_INFINITY:
|
||||
return default
|
||||
return fdmax
|
||||
|
||||
|
||||
def uniq(it):
|
||||
"""Return all unique elements in ``it``, preserving order."""
|
||||
seen = set()
|
||||
return (seen.add(obj) or obj for obj in it if obj not in seen)
|
||||
|
||||
|
||||
try:
|
||||
closerange = os.closerange
|
||||
except AttributeError:
|
||||
|
||||
def closerange(fd_low, fd_high): # noqa
|
||||
for fd in reversed(range(fd_low, fd_high)):
|
||||
try:
|
||||
os.close(fd)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.EBADF:
|
||||
raise
|
||||
|
||||
def close_open_fds(keep=None):
|
||||
# must make sure this is 0-inclusive (Issue #celery/1882)
|
||||
keep = list(uniq(sorted(
|
||||
f for f in map(maybe_fileno, keep or []) if f is not None
|
||||
)))
|
||||
maxfd = get_fdmax(default=2048)
|
||||
kL, kH = iter([-1] + keep), iter(keep + [maxfd])
|
||||
for low, high in zip_longest(kL, kH):
|
||||
if low + 1 != high:
|
||||
closerange(low + 1, high)
|
||||
else:
|
||||
def close_open_fds(keep=None): # noqa
|
||||
keep = [maybe_fileno(f)
|
||||
for f in (keep or []) if maybe_fileno(f) is not None]
|
||||
for fd in reversed(range(get_fdmax(default=2048))):
|
||||
if fd not in keep:
|
||||
try:
|
||||
os.close(fd)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.EBADF:
|
||||
raise
|
||||
|
||||
|
||||
def get_errno(exc):
|
||||
""":exc:`socket.error` and :exc:`IOError` first got
|
||||
the ``.errno`` attribute in Py2.7"""
|
||||
try:
|
||||
return exc.errno
|
||||
except AttributeError:
|
||||
return 0
|
||||
|
||||
|
||||
try:
|
||||
import _posixsubprocess
|
||||
except ImportError:
|
||||
def spawnv_passfds(path, args, passfds):
|
||||
if sys.platform != 'win32':
|
||||
# when not using _posixsubprocess (on earlier python) and not on
|
||||
# windows, we want to keep stdout/stderr open...
|
||||
passfds = passfds + [
|
||||
maybe_fileno(sys.stdout),
|
||||
maybe_fileno(sys.stderr),
|
||||
]
|
||||
pid = os.fork()
|
||||
if not pid:
|
||||
close_open_fds(keep=sorted(f for f in passfds if f))
|
||||
os.execv(fsencode(path), args)
|
||||
return pid
|
||||
else:
|
||||
def spawnv_passfds(path, args, passfds):
|
||||
passfds = sorted(passfds)
|
||||
errpipe_read, errpipe_write = os.pipe()
|
||||
try:
|
||||
args = [
|
||||
args, [fsencode(path)], True, tuple(passfds), None, None,
|
||||
-1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,
|
||||
False, False]
|
||||
if sys.version_info >= (3, 11):
|
||||
args.append(-1) # process_group
|
||||
if sys.version_info >= (3, 9):
|
||||
args.extend((None, None, None, -1)) # group, extra_groups, user, umask
|
||||
args.append(None) # preexec_fn
|
||||
if sys.version_info >= (3, 11):
|
||||
args.append(subprocess._USE_VFORK)
|
||||
return _posixsubprocess.fork_exec(*args)
|
||||
finally:
|
||||
os.close(errpipe_read)
|
||||
os.close(errpipe_write)
|
||||
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
def setblocking(handle, blocking):
|
||||
raise NotImplementedError('setblocking not implemented on win32')
|
||||
|
||||
def isblocking(handle):
|
||||
raise NotImplementedError('isblocking not implemented on win32')
|
||||
|
||||
else:
|
||||
from os import O_NONBLOCK
|
||||
from fcntl import fcntl, F_GETFL, F_SETFL
|
||||
|
||||
def isblocking(handle): # noqa
|
||||
return not (fcntl(handle, F_GETFL) & O_NONBLOCK)
|
||||
|
||||
def setblocking(handle, blocking): # noqa
|
||||
flags = fcntl(handle, F_GETFL, 0)
|
||||
fcntl(
|
||||
handle, F_SETFL,
|
||||
flags & (~O_NONBLOCK) if blocking else flags | O_NONBLOCK,
|
||||
)
|
||||
|
||||
|
||||
E_PSUTIL_MISSING = """
|
||||
On Windows, the ability to inspect memory usage requires the psutil library.
|
||||
|
||||
You can install it using pip:
|
||||
|
||||
$ pip install psutil
|
||||
"""
|
||||
|
||||
|
||||
E_RESOURCE_MISSING = """
|
||||
Your platform ({0}) does not seem to have the `resource.getrusage' function.
|
||||
|
||||
Please open an issue so that we can add support for this platform.
|
||||
"""
|
||||
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
try:
|
||||
import psutil
|
||||
except ImportError: # pragma: no cover
|
||||
psutil = None # noqa
|
||||
|
||||
def mem_rss():
|
||||
# type () -> int
|
||||
if psutil is None:
|
||||
raise ImportError(E_PSUTIL_MISSING.strip())
|
||||
return int(psutil.Process(os.getpid()).memory_info()[0] / 1024.0)
|
||||
|
||||
else:
|
||||
try:
|
||||
from resource import getrusage, RUSAGE_SELF
|
||||
except ImportError: # pragma: no cover
|
||||
getrusage = RUSAGE_SELF = None # noqa
|
||||
|
||||
if 'bsd' in sys.platform or sys.platform == 'darwin':
|
||||
# On BSD platforms :man:`getrusage(2)` ru_maxrss field is in bytes.
|
||||
|
||||
def maxrss_to_kb(v):
|
||||
# type: (SupportsInt) -> int
|
||||
return int(v) / 1024.0
|
||||
|
||||
else:
|
||||
# On Linux it's kilobytes.
|
||||
|
||||
def maxrss_to_kb(v):
|
||||
# type: (SupportsInt) -> int
|
||||
return int(v)
|
||||
|
||||
def mem_rss():
|
||||
# type () -> int
|
||||
if resource is None:
|
||||
raise ImportError(E_RESOURCE_MISSING.strip().format(sys.platform))
|
||||
return maxrss_to_kb(getrusage(RUSAGE_SELF).ru_maxrss)
|
||||
1034
ETB-API/venv/lib/python3.12/site-packages/billiard/connection.py
Normal file
1034
ETB-API/venv/lib/python3.12/site-packages/billiard/connection.py
Normal file
File diff suppressed because it is too large
Load Diff
420
ETB-API/venv/lib/python3.12/site-packages/billiard/context.py
Normal file
420
ETB-API/venv/lib/python3.12/site-packages/billiard/context.py
Normal file
@@ -0,0 +1,420 @@
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import warnings
|
||||
|
||||
from . import process
|
||||
|
||||
__all__ = [] # things are copied from here to __init__.py
|
||||
|
||||
|
||||
W_NO_EXECV = """\
|
||||
force_execv is not supported as the billiard C extension \
|
||||
is not installed\
|
||||
"""
|
||||
|
||||
|
||||
#
|
||||
# Exceptions
|
||||
#
|
||||
|
||||
from .exceptions import ( # noqa
|
||||
ProcessError,
|
||||
BufferTooShort,
|
||||
TimeoutError,
|
||||
AuthenticationError,
|
||||
TimeLimitExceeded,
|
||||
SoftTimeLimitExceeded,
|
||||
WorkerLostError,
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Base type for contexts
|
||||
#
|
||||
|
||||
class BaseContext:
|
||||
|
||||
ProcessError = ProcessError
|
||||
BufferTooShort = BufferTooShort
|
||||
TimeoutError = TimeoutError
|
||||
AuthenticationError = AuthenticationError
|
||||
TimeLimitExceeded = TimeLimitExceeded
|
||||
SoftTimeLimitExceeded = SoftTimeLimitExceeded
|
||||
WorkerLostError = WorkerLostError
|
||||
|
||||
current_process = staticmethod(process.current_process)
|
||||
active_children = staticmethod(process.active_children)
|
||||
|
||||
if hasattr(os, 'cpu_count'):
|
||||
def cpu_count(self):
|
||||
'''Returns the number of CPUs in the system'''
|
||||
num = os.cpu_count()
|
||||
if num is None:
|
||||
raise NotImplementedError('cannot determine number of cpus')
|
||||
else:
|
||||
return num
|
||||
else:
|
||||
def cpu_count(self): # noqa
|
||||
if sys.platform == 'win32':
|
||||
try:
|
||||
num = int(os.environ['NUMBER_OF_PROCESSORS'])
|
||||
except (ValueError, KeyError):
|
||||
num = 0
|
||||
elif 'bsd' in sys.platform or sys.platform == 'darwin':
|
||||
comm = '/sbin/sysctl -n hw.ncpu'
|
||||
if sys.platform == 'darwin':
|
||||
comm = '/usr' + comm
|
||||
try:
|
||||
with os.popen(comm) as p:
|
||||
num = int(p.read())
|
||||
except ValueError:
|
||||
num = 0
|
||||
else:
|
||||
try:
|
||||
num = os.sysconf('SC_NPROCESSORS_ONLN')
|
||||
except (ValueError, OSError, AttributeError):
|
||||
num = 0
|
||||
|
||||
if num >= 1:
|
||||
return num
|
||||
else:
|
||||
raise NotImplementedError('cannot determine number of cpus')
|
||||
|
||||
def Manager(self):
|
||||
'''Returns a manager associated with a running server process
|
||||
|
||||
The managers methods such as `Lock()`, `Condition()` and `Queue()`
|
||||
can be used to create shared objects.
|
||||
'''
|
||||
from .managers import SyncManager
|
||||
m = SyncManager(ctx=self.get_context())
|
||||
m.start()
|
||||
return m
|
||||
|
||||
def Pipe(self, duplex=True, rnonblock=False, wnonblock=False):
|
||||
'''Returns two connection object connected by a pipe'''
|
||||
from .connection import Pipe
|
||||
return Pipe(duplex, rnonblock, wnonblock)
|
||||
|
||||
def Lock(self):
|
||||
'''Returns a non-recursive lock object'''
|
||||
from .synchronize import Lock
|
||||
return Lock(ctx=self.get_context())
|
||||
|
||||
def RLock(self):
|
||||
'''Returns a recursive lock object'''
|
||||
from .synchronize import RLock
|
||||
return RLock(ctx=self.get_context())
|
||||
|
||||
def Condition(self, lock=None):
|
||||
'''Returns a condition object'''
|
||||
from .synchronize import Condition
|
||||
return Condition(lock, ctx=self.get_context())
|
||||
|
||||
def Semaphore(self, value=1):
|
||||
'''Returns a semaphore object'''
|
||||
from .synchronize import Semaphore
|
||||
return Semaphore(value, ctx=self.get_context())
|
||||
|
||||
def BoundedSemaphore(self, value=1):
|
||||
'''Returns a bounded semaphore object'''
|
||||
from .synchronize import BoundedSemaphore
|
||||
return BoundedSemaphore(value, ctx=self.get_context())
|
||||
|
||||
def Event(self):
|
||||
'''Returns an event object'''
|
||||
from .synchronize import Event
|
||||
return Event(ctx=self.get_context())
|
||||
|
||||
def Barrier(self, parties, action=None, timeout=None):
|
||||
'''Returns a barrier object'''
|
||||
from .synchronize import Barrier
|
||||
return Barrier(parties, action, timeout, ctx=self.get_context())
|
||||
|
||||
def Queue(self, maxsize=0):
|
||||
'''Returns a queue object'''
|
||||
from .queues import Queue
|
||||
return Queue(maxsize, ctx=self.get_context())
|
||||
|
||||
def JoinableQueue(self, maxsize=0):
|
||||
'''Returns a queue object'''
|
||||
from .queues import JoinableQueue
|
||||
return JoinableQueue(maxsize, ctx=self.get_context())
|
||||
|
||||
def SimpleQueue(self):
|
||||
'''Returns a queue object'''
|
||||
from .queues import SimpleQueue
|
||||
return SimpleQueue(ctx=self.get_context())
|
||||
|
||||
def Pool(self, processes=None, initializer=None, initargs=(),
|
||||
maxtasksperchild=None, timeout=None, soft_timeout=None,
|
||||
lost_worker_timeout=None, max_restarts=None,
|
||||
max_restart_freq=1, on_process_up=None, on_process_down=None,
|
||||
on_timeout_set=None, on_timeout_cancel=None, threads=True,
|
||||
semaphore=None, putlocks=False, allow_restart=False):
|
||||
'''Returns a process pool object'''
|
||||
from .pool import Pool
|
||||
return Pool(processes, initializer, initargs, maxtasksperchild,
|
||||
timeout, soft_timeout, lost_worker_timeout,
|
||||
max_restarts, max_restart_freq, on_process_up,
|
||||
on_process_down, on_timeout_set, on_timeout_cancel,
|
||||
threads, semaphore, putlocks, allow_restart,
|
||||
context=self.get_context())
|
||||
|
||||
def RawValue(self, typecode_or_type, *args):
|
||||
'''Returns a shared object'''
|
||||
from .sharedctypes import RawValue
|
||||
return RawValue(typecode_or_type, *args)
|
||||
|
||||
def RawArray(self, typecode_or_type, size_or_initializer):
|
||||
'''Returns a shared array'''
|
||||
from .sharedctypes import RawArray
|
||||
return RawArray(typecode_or_type, size_or_initializer)
|
||||
|
||||
def Value(self, typecode_or_type, *args, **kwargs):
|
||||
'''Returns a synchronized shared object'''
|
||||
from .sharedctypes import Value
|
||||
lock = kwargs.get('lock', True)
|
||||
return Value(typecode_or_type, *args, lock=lock,
|
||||
ctx=self.get_context())
|
||||
|
||||
def Array(self, typecode_or_type, size_or_initializer, *args, **kwargs):
|
||||
'''Returns a synchronized shared array'''
|
||||
from .sharedctypes import Array
|
||||
lock = kwargs.get('lock', True)
|
||||
return Array(typecode_or_type, size_or_initializer, lock=lock,
|
||||
ctx=self.get_context())
|
||||
|
||||
def freeze_support(self):
|
||||
'''Check whether this is a fake forked process in a frozen executable.
|
||||
If so then run code specified by commandline and exit.
|
||||
'''
|
||||
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
|
||||
from .spawn import freeze_support
|
||||
freeze_support()
|
||||
|
||||
def get_logger(self):
|
||||
'''Return package logger -- if it does not already exist then
|
||||
it is created.
|
||||
'''
|
||||
from .util import get_logger
|
||||
return get_logger()
|
||||
|
||||
def log_to_stderr(self, level=None):
|
||||
'''Turn on logging and add a handler which prints to stderr'''
|
||||
from .util import log_to_stderr
|
||||
return log_to_stderr(level)
|
||||
|
||||
def allow_connection_pickling(self):
|
||||
'''Install support for sending connections and sockets
|
||||
between processes
|
||||
'''
|
||||
# This is undocumented. In previous versions of multiprocessing
|
||||
# its only effect was to make socket objects inheritable on Windows.
|
||||
from . import connection # noqa
|
||||
|
||||
def set_executable(self, executable):
|
||||
'''Sets the path to a python.exe or pythonw.exe binary used to run
|
||||
child processes instead of sys.executable when using the 'spawn'
|
||||
start method. Useful for people embedding Python.
|
||||
'''
|
||||
from .spawn import set_executable
|
||||
set_executable(executable)
|
||||
|
||||
def set_forkserver_preload(self, module_names):
|
||||
'''Set list of module names to try to load in forkserver process.
|
||||
This is really just a hint.
|
||||
'''
|
||||
from .forkserver import set_forkserver_preload
|
||||
set_forkserver_preload(module_names)
|
||||
|
||||
def get_context(self, method=None):
|
||||
if method is None:
|
||||
return self
|
||||
try:
|
||||
ctx = _concrete_contexts[method]
|
||||
except KeyError:
|
||||
raise ValueError('cannot find context for %r' % method)
|
||||
ctx._check_available()
|
||||
return ctx
|
||||
|
||||
def get_start_method(self, allow_none=False):
|
||||
return self._name
|
||||
|
||||
def set_start_method(self, method=None):
|
||||
raise ValueError('cannot set start method of concrete context')
|
||||
|
||||
def forking_is_enabled(self):
|
||||
# XXX for compatibility with billiard <3.4
|
||||
return (self.get_start_method() or 'fork') == 'fork'
|
||||
|
||||
def forking_enable(self, value):
|
||||
# XXX for compatibility with billiard <3.4
|
||||
if not value:
|
||||
from ._ext import supports_exec
|
||||
if supports_exec:
|
||||
self.set_start_method('spawn', force=True)
|
||||
else:
|
||||
warnings.warn(RuntimeWarning(W_NO_EXECV))
|
||||
|
||||
def _check_available(self):
|
||||
pass
|
||||
|
||||
#
|
||||
# Type of default context -- underlying context can be set at most once
|
||||
#
|
||||
|
||||
|
||||
class Process(process.BaseProcess):
|
||||
_start_method = None
|
||||
|
||||
@staticmethod
|
||||
def _Popen(process_obj):
|
||||
return _default_context.get_context().Process._Popen(process_obj)
|
||||
|
||||
|
||||
class DefaultContext(BaseContext):
|
||||
Process = Process
|
||||
|
||||
def __init__(self, context):
|
||||
self._default_context = context
|
||||
self._actual_context = None
|
||||
|
||||
def get_context(self, method=None):
|
||||
if method is None:
|
||||
if self._actual_context is None:
|
||||
self._actual_context = self._default_context
|
||||
return self._actual_context
|
||||
else:
|
||||
return super(DefaultContext, self).get_context(method)
|
||||
|
||||
def set_start_method(self, method, force=False):
|
||||
if self._actual_context is not None and not force:
|
||||
raise RuntimeError('context has already been set')
|
||||
if method is None and force:
|
||||
self._actual_context = None
|
||||
return
|
||||
self._actual_context = self.get_context(method)
|
||||
|
||||
def get_start_method(self, allow_none=False):
|
||||
if self._actual_context is None:
|
||||
if allow_none:
|
||||
return None
|
||||
self._actual_context = self._default_context
|
||||
return self._actual_context._name
|
||||
|
||||
def get_all_start_methods(self):
|
||||
if sys.platform == 'win32':
|
||||
return ['spawn']
|
||||
else:
|
||||
from . import reduction
|
||||
if reduction.HAVE_SEND_HANDLE:
|
||||
return ['fork', 'spawn', 'forkserver']
|
||||
else:
|
||||
return ['fork', 'spawn']
|
||||
|
||||
DefaultContext.__all__ = list(x for x in dir(DefaultContext) if x[0] != '_')
|
||||
|
||||
#
|
||||
# Context types for fixed start method
|
||||
#
|
||||
|
||||
if sys.platform != 'win32':
|
||||
|
||||
class ForkProcess(process.BaseProcess):
|
||||
_start_method = 'fork'
|
||||
|
||||
@staticmethod
|
||||
def _Popen(process_obj):
|
||||
from .popen_fork import Popen
|
||||
return Popen(process_obj)
|
||||
|
||||
class SpawnProcess(process.BaseProcess):
|
||||
_start_method = 'spawn'
|
||||
|
||||
@staticmethod
|
||||
def _Popen(process_obj):
|
||||
from .popen_spawn_posix import Popen
|
||||
return Popen(process_obj)
|
||||
|
||||
class ForkServerProcess(process.BaseProcess):
|
||||
_start_method = 'forkserver'
|
||||
|
||||
@staticmethod
|
||||
def _Popen(process_obj):
|
||||
from .popen_forkserver import Popen
|
||||
return Popen(process_obj)
|
||||
|
||||
class ForkContext(BaseContext):
|
||||
_name = 'fork'
|
||||
Process = ForkProcess
|
||||
|
||||
class SpawnContext(BaseContext):
|
||||
_name = 'spawn'
|
||||
Process = SpawnProcess
|
||||
|
||||
class ForkServerContext(BaseContext):
|
||||
_name = 'forkserver'
|
||||
Process = ForkServerProcess
|
||||
|
||||
def _check_available(self):
|
||||
from . import reduction
|
||||
if not reduction.HAVE_SEND_HANDLE:
|
||||
raise ValueError('forkserver start method not available')
|
||||
|
||||
_concrete_contexts = {
|
||||
'fork': ForkContext(),
|
||||
'spawn': SpawnContext(),
|
||||
'forkserver': ForkServerContext(),
|
||||
}
|
||||
_default_context = DefaultContext(_concrete_contexts['fork'])
|
||||
|
||||
else:
|
||||
|
||||
class SpawnProcess(process.BaseProcess):
|
||||
_start_method = 'spawn'
|
||||
|
||||
@staticmethod
|
||||
def _Popen(process_obj):
|
||||
from .popen_spawn_win32 import Popen
|
||||
return Popen(process_obj)
|
||||
|
||||
class SpawnContext(BaseContext):
|
||||
_name = 'spawn'
|
||||
Process = SpawnProcess
|
||||
|
||||
_concrete_contexts = {
|
||||
'spawn': SpawnContext(),
|
||||
}
|
||||
_default_context = DefaultContext(_concrete_contexts['spawn'])
|
||||
|
||||
#
|
||||
# Force the start method
|
||||
#
|
||||
|
||||
|
||||
def _force_start_method(method):
|
||||
_default_context._actual_context = _concrete_contexts[method]
|
||||
|
||||
#
|
||||
# Check that the current thread is spawning a child process
|
||||
#
|
||||
|
||||
_tls = threading.local()
|
||||
|
||||
|
||||
def get_spawning_popen():
|
||||
return getattr(_tls, 'spawning_popen', None)
|
||||
|
||||
|
||||
def set_spawning_popen(popen):
|
||||
_tls.spawning_popen = popen
|
||||
|
||||
|
||||
def assert_spawning(obj):
|
||||
if get_spawning_popen() is None:
|
||||
raise RuntimeError(
|
||||
'%s objects should only be shared between processes'
|
||||
' through inheritance' % type(obj).__name__
|
||||
)
|
||||
@@ -0,0 +1,166 @@
|
||||
#
|
||||
# Support for the API of the multiprocessing package using threads
|
||||
#
|
||||
# multiprocessing/dummy/__init__.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# 3. Neither the name of author nor the names of any contributors may be
|
||||
# used to endorse or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
|
||||
#
|
||||
# Imports
|
||||
#
|
||||
|
||||
import threading
|
||||
import sys
|
||||
import weakref
|
||||
import array
|
||||
|
||||
from threading import Lock, RLock, Semaphore, BoundedSemaphore
|
||||
from threading import Event
|
||||
|
||||
from queue import Queue
|
||||
|
||||
from billiard.connection import Pipe
|
||||
|
||||
__all__ = [
|
||||
'Process', 'current_process', 'active_children', 'freeze_support',
|
||||
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
|
||||
'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
|
||||
]
|
||||
|
||||
|
||||
class DummyProcess(threading.Thread):
|
||||
|
||||
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
|
||||
threading.Thread.__init__(self, group, target, name, args, kwargs)
|
||||
self._pid = None
|
||||
self._children = weakref.WeakKeyDictionary()
|
||||
self._start_called = False
|
||||
self._parent = current_process()
|
||||
|
||||
def start(self):
|
||||
assert self._parent is current_process()
|
||||
self._start_called = True
|
||||
if hasattr(self._parent, '_children'):
|
||||
self._parent._children[self] = None
|
||||
threading.Thread.start(self)
|
||||
|
||||
@property
|
||||
def exitcode(self):
|
||||
if self._start_called and not self.is_alive():
|
||||
return 0
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
try:
|
||||
_Condition = threading._Condition
|
||||
except AttributeError: # Py3
|
||||
_Condition = threading.Condition # noqa
|
||||
|
||||
|
||||
class Condition(_Condition):
|
||||
if sys.version_info[0] == 3:
|
||||
notify_all = _Condition.notifyAll
|
||||
else:
|
||||
notify_all = _Condition.notifyAll.__func__
|
||||
|
||||
|
||||
Process = DummyProcess
|
||||
current_process = threading.current_thread
|
||||
current_process()._children = weakref.WeakKeyDictionary()
|
||||
|
||||
|
||||
def active_children():
|
||||
children = current_process()._children
|
||||
for p in list(children):
|
||||
if not p.is_alive():
|
||||
children.pop(p, None)
|
||||
return list(children)
|
||||
|
||||
|
||||
def freeze_support():
|
||||
pass
|
||||
|
||||
|
||||
class Namespace(object):
|
||||
|
||||
def __init__(self, **kwds):
|
||||
self.__dict__.update(kwds)
|
||||
|
||||
def __repr__(self):
|
||||
items = list(self.__dict__.items())
|
||||
temp = []
|
||||
for name, value in items:
|
||||
if not name.startswith('_'):
|
||||
temp.append('%s=%r' % (name, value))
|
||||
temp.sort()
|
||||
return '%s(%s)' % (self.__class__.__name__, str.join(', ', temp))
|
||||
|
||||
|
||||
dict = dict
|
||||
list = list
|
||||
|
||||
|
||||
def Array(typecode, sequence, lock=True):
|
||||
return array.array(typecode, sequence)
|
||||
|
||||
|
||||
class Value(object):
|
||||
|
||||
def __init__(self, typecode, value, lock=True):
|
||||
self._typecode = typecode
|
||||
self._value = value
|
||||
|
||||
def _get(self):
|
||||
return self._value
|
||||
|
||||
def _set(self, value):
|
||||
self._value = value
|
||||
value = property(_get, _set)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%r(%r, %r)>' % (type(self).__name__,
|
||||
self._typecode, self._value)
|
||||
|
||||
|
||||
def Manager():
|
||||
return sys.modules[__name__]
|
||||
|
||||
|
||||
def shutdown():
|
||||
pass
|
||||
|
||||
|
||||
def Pool(processes=None, initializer=None, initargs=()):
|
||||
from billiard.pool import ThreadPool
|
||||
return ThreadPool(processes, initializer, initargs)
|
||||
|
||||
|
||||
JoinableQueue = Queue
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,92 @@
|
||||
#
|
||||
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
|
||||
#
|
||||
# multiprocessing/dummy/connection.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# 3. Neither the name of author nor the names of any contributors may be
|
||||
# used to endorse or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
|
||||
from queue import Queue
|
||||
|
||||
__all__ = ['Client', 'Listener', 'Pipe']
|
||||
|
||||
families = [None]
|
||||
|
||||
|
||||
class Listener(object):
|
||||
|
||||
def __init__(self, address=None, family=None, backlog=1):
|
||||
self._backlog_queue = Queue(backlog)
|
||||
|
||||
def accept(self):
|
||||
return Connection(*self._backlog_queue.get())
|
||||
|
||||
def close(self):
|
||||
self._backlog_queue = None
|
||||
|
||||
address = property(lambda self: self._backlog_queue)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
self.close()
|
||||
|
||||
|
||||
def Client(address):
|
||||
_in, _out = Queue(), Queue()
|
||||
address.put((_out, _in))
|
||||
return Connection(_in, _out)
|
||||
|
||||
|
||||
def Pipe(duplex=True):
|
||||
a, b = Queue(), Queue()
|
||||
return Connection(a, b), Connection(b, a)
|
||||
|
||||
|
||||
class Connection(object):
|
||||
|
||||
def __init__(self, _in, _out):
|
||||
self._out = _out
|
||||
self._in = _in
|
||||
self.send = self.send_bytes = _out.put
|
||||
self.recv = self.recv_bytes = _in.get
|
||||
|
||||
def poll(self, timeout=0.0):
|
||||
if self._in.qsize() > 0:
|
||||
return True
|
||||
if timeout <= 0.0:
|
||||
return False
|
||||
self._in.not_empty.acquire()
|
||||
self._in.not_empty.wait(timeout)
|
||||
self._in.not_empty.release()
|
||||
return self._in.qsize() > 0
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
192
ETB-API/venv/lib/python3.12/site-packages/billiard/einfo.py
Normal file
192
ETB-API/venv/lib/python3.12/site-packages/billiard/einfo.py
Normal file
@@ -0,0 +1,192 @@
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
__all__ = ['ExceptionInfo', 'Traceback']
|
||||
|
||||
DEFAULT_MAX_FRAMES = sys.getrecursionlimit() // 8
|
||||
|
||||
|
||||
class _Code:
|
||||
|
||||
def __init__(self, code):
|
||||
self.co_filename = code.co_filename
|
||||
self.co_name = code.co_name
|
||||
self.co_argcount = code.co_argcount
|
||||
self.co_cellvars = ()
|
||||
self.co_firstlineno = code.co_firstlineno
|
||||
self.co_flags = code.co_flags
|
||||
self.co_freevars = ()
|
||||
self.co_code = b''
|
||||
self.co_lnotab = b''
|
||||
self.co_names = code.co_names
|
||||
self.co_nlocals = code.co_nlocals
|
||||
self.co_stacksize = code.co_stacksize
|
||||
self.co_varnames = ()
|
||||
if sys.version_info >= (3, 11):
|
||||
self._co_positions = list(code.co_positions())
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
@property
|
||||
def co_positions(self):
|
||||
return self._co_positions.__iter__
|
||||
|
||||
|
||||
class _Frame:
|
||||
Code = _Code
|
||||
|
||||
def __init__(self, frame):
|
||||
self.f_builtins = {}
|
||||
self.f_globals = {
|
||||
"__file__": frame.f_globals.get("__file__", "__main__"),
|
||||
"__name__": frame.f_globals.get("__name__"),
|
||||
"__loader__": None,
|
||||
}
|
||||
self.f_locals = fl = {}
|
||||
try:
|
||||
fl["__traceback_hide__"] = frame.f_locals["__traceback_hide__"]
|
||||
except KeyError:
|
||||
pass
|
||||
self.f_back = None
|
||||
self.f_trace = None
|
||||
self.f_exc_traceback = None
|
||||
self.f_exc_type = None
|
||||
self.f_exc_value = None
|
||||
self.f_code = self.Code(frame.f_code)
|
||||
self.f_lineno = frame.f_lineno
|
||||
self.f_lasti = frame.f_lasti
|
||||
# don't want to hit https://bugs.python.org/issue21967
|
||||
self.f_restricted = False
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
@property
|
||||
def co_positions(self):
|
||||
return self.f_code.co_positions
|
||||
|
||||
|
||||
class _Object:
|
||||
|
||||
def __init__(self, **kw):
|
||||
[setattr(self, k, v) for k, v in kw.items()]
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
__default_co_positions__ = ((None, None, None, None),)
|
||||
|
||||
@property
|
||||
def co_positions(self):
|
||||
return getattr(
|
||||
self,
|
||||
"_co_positions",
|
||||
self.__default_co_positions__
|
||||
).__iter__
|
||||
|
||||
@co_positions.setter
|
||||
def co_positions(self, value):
|
||||
self._co_positions = value # noqa
|
||||
|
||||
|
||||
class _Truncated:
|
||||
|
||||
def __init__(self):
|
||||
self.tb_lineno = -1
|
||||
self.tb_frame = _Object(
|
||||
f_globals={"__file__": "",
|
||||
"__name__": "",
|
||||
"__loader__": None},
|
||||
f_fileno=None,
|
||||
f_code=_Object(co_filename="...",
|
||||
co_name="[rest of traceback truncated]"),
|
||||
)
|
||||
self.tb_next = None
|
||||
self.tb_lasti = 0
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
@property
|
||||
def co_positions(self):
|
||||
return self.tb_frame.co_positions
|
||||
|
||||
|
||||
class Traceback:
|
||||
Frame = _Frame
|
||||
|
||||
def __init__(self, tb, max_frames=DEFAULT_MAX_FRAMES, depth=0):
|
||||
self.tb_frame = self.Frame(tb.tb_frame)
|
||||
self.tb_lineno = tb.tb_lineno
|
||||
self.tb_lasti = tb.tb_lasti
|
||||
self.tb_next = None
|
||||
if tb.tb_next is not None:
|
||||
if depth <= max_frames:
|
||||
self.tb_next = Traceback(tb.tb_next, max_frames, depth + 1)
|
||||
else:
|
||||
self.tb_next = _Truncated()
|
||||
|
||||
|
||||
class RemoteTraceback(Exception):
|
||||
def __init__(self, tb):
|
||||
self.tb = tb
|
||||
|
||||
def __str__(self):
|
||||
return self.tb
|
||||
|
||||
|
||||
class ExceptionWithTraceback(Exception):
|
||||
def __init__(self, exc, tb):
|
||||
self.exc = exc
|
||||
self.tb = '\n"""\n%s"""' % tb
|
||||
super().__init__()
|
||||
|
||||
def __str__(self):
|
||||
return self.tb
|
||||
|
||||
def __reduce__(self):
|
||||
return rebuild_exc, (self.exc, self.tb)
|
||||
|
||||
|
||||
def rebuild_exc(exc, tb):
|
||||
exc.__cause__ = RemoteTraceback(tb)
|
||||
return exc
|
||||
|
||||
|
||||
class ExceptionInfo:
|
||||
"""Exception wrapping an exception and its traceback.
|
||||
|
||||
:param exc_info: The exception info tuple as returned by
|
||||
:func:`sys.exc_info`.
|
||||
|
||||
"""
|
||||
|
||||
#: Exception type.
|
||||
type = None
|
||||
|
||||
#: Exception instance.
|
||||
exception = None
|
||||
|
||||
#: Pickleable traceback instance for use with :mod:`traceback`
|
||||
tb = None
|
||||
|
||||
#: String representation of the traceback.
|
||||
traceback = None
|
||||
|
||||
#: Set to true if this is an internal error.
|
||||
internal = False
|
||||
|
||||
def __init__(self, exc_info=None, internal=False):
|
||||
self.type, exception, tb = exc_info or sys.exc_info()
|
||||
try:
|
||||
self.tb = Traceback(tb)
|
||||
self.traceback = ''.join(
|
||||
traceback.format_exception(self.type, exception, tb),
|
||||
)
|
||||
self.internal = internal
|
||||
finally:
|
||||
del tb
|
||||
self.exception = ExceptionWithTraceback(exception, self.traceback)
|
||||
|
||||
def __str__(self):
|
||||
return self.traceback
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %r>" % (self.__class__.__name__, self.exception, )
|
||||
|
||||
@property
|
||||
def exc_info(self):
|
||||
return self.type, self.exception, self.tb
|
||||
@@ -0,0 +1,52 @@
|
||||
try:
|
||||
from multiprocessing import (
|
||||
ProcessError,
|
||||
BufferTooShort,
|
||||
TimeoutError,
|
||||
AuthenticationError,
|
||||
)
|
||||
except ImportError:
|
||||
class ProcessError(Exception): # noqa
|
||||
pass
|
||||
|
||||
class BufferTooShort(ProcessError): # noqa
|
||||
pass
|
||||
|
||||
class TimeoutError(ProcessError): # noqa
|
||||
pass
|
||||
|
||||
class AuthenticationError(ProcessError): # noqa
|
||||
pass
|
||||
|
||||
|
||||
class TimeLimitExceeded(Exception):
|
||||
"""The time limit has been exceeded and the job has been terminated."""
|
||||
|
||||
def __str__(self):
|
||||
return "TimeLimitExceeded%s" % (self.args, )
|
||||
|
||||
|
||||
class SoftTimeLimitExceeded(Exception):
|
||||
"""The soft time limit has been exceeded. This exception is raised
|
||||
to give the task a chance to clean up."""
|
||||
|
||||
def __str__(self):
|
||||
return "SoftTimeLimitExceeded%s" % (self.args, )
|
||||
|
||||
|
||||
class WorkerLostError(Exception):
|
||||
"""The worker processing a job has exited prematurely."""
|
||||
|
||||
|
||||
class Terminated(Exception):
|
||||
"""The worker processing a job has been terminated by user request."""
|
||||
|
||||
|
||||
class RestartFreqExceeded(Exception):
|
||||
"""Restarts too fast."""
|
||||
|
||||
|
||||
class CoroStop(Exception):
|
||||
"""Coroutine exit, as opposed to StopIteration which may
|
||||
mean it should be restarted."""
|
||||
pass
|
||||
264
ETB-API/venv/lib/python3.12/site-packages/billiard/forkserver.py
Normal file
264
ETB-API/venv/lib/python3.12/site-packages/billiard/forkserver.py
Normal file
@@ -0,0 +1,264 @@
|
||||
import errno
|
||||
import os
|
||||
import selectors
|
||||
import signal
|
||||
import socket
|
||||
import struct
|
||||
import sys
|
||||
import threading
|
||||
|
||||
from . import connection
|
||||
from . import process
|
||||
from . import reduction
|
||||
from . import semaphore_tracker
|
||||
from . import spawn
|
||||
from . import util
|
||||
|
||||
from .compat import spawnv_passfds
|
||||
|
||||
__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process',
|
||||
'set_forkserver_preload']
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
MAXFDS_TO_SEND = 256
|
||||
UNSIGNED_STRUCT = struct.Struct('Q') # large enough for pid_t
|
||||
|
||||
#
|
||||
# Forkserver class
|
||||
#
|
||||
|
||||
|
||||
class ForkServer:
|
||||
|
||||
def __init__(self):
|
||||
self._forkserver_address = None
|
||||
self._forkserver_alive_fd = None
|
||||
self._inherited_fds = None
|
||||
self._lock = threading.Lock()
|
||||
self._preload_modules = ['__main__']
|
||||
|
||||
def set_forkserver_preload(self, modules_names):
|
||||
'''Set list of module names to try to load in forkserver process.'''
|
||||
if not all(type(mod) is str for mod in self._preload_modules):
|
||||
raise TypeError('module_names must be a list of strings')
|
||||
self._preload_modules = modules_names
|
||||
|
||||
def get_inherited_fds(self):
|
||||
'''Return list of fds inherited from parent process.
|
||||
|
||||
This returns None if the current process was not started by fork
|
||||
server.
|
||||
'''
|
||||
return self._inherited_fds
|
||||
|
||||
def connect_to_new_process(self, fds):
|
||||
'''Request forkserver to create a child process.
|
||||
|
||||
Returns a pair of fds (status_r, data_w). The calling process can read
|
||||
the child process's pid and (eventually) its returncode from status_r.
|
||||
The calling process should write to data_w the pickled preparation and
|
||||
process data.
|
||||
'''
|
||||
self.ensure_running()
|
||||
if len(fds) + 4 >= MAXFDS_TO_SEND:
|
||||
raise ValueError('too many fds')
|
||||
with socket.socket(socket.AF_UNIX) as client:
|
||||
client.connect(self._forkserver_address)
|
||||
parent_r, child_w = os.pipe()
|
||||
child_r, parent_w = os.pipe()
|
||||
allfds = [child_r, child_w, self._forkserver_alive_fd,
|
||||
semaphore_tracker.getfd()]
|
||||
allfds += fds
|
||||
try:
|
||||
reduction.sendfds(client, allfds)
|
||||
return parent_r, parent_w
|
||||
except:
|
||||
os.close(parent_r)
|
||||
os.close(parent_w)
|
||||
raise
|
||||
finally:
|
||||
os.close(child_r)
|
||||
os.close(child_w)
|
||||
|
||||
def ensure_running(self):
|
||||
'''Make sure that a fork server is running.
|
||||
|
||||
This can be called from any process. Note that usually a child
|
||||
process will just reuse the forkserver started by its parent, so
|
||||
ensure_running() will do nothing.
|
||||
'''
|
||||
with self._lock:
|
||||
semaphore_tracker.ensure_running()
|
||||
if self._forkserver_alive_fd is not None:
|
||||
return
|
||||
|
||||
cmd = ('from billiard.forkserver import main; ' +
|
||||
'main(%d, %d, %r, **%r)')
|
||||
|
||||
if self._preload_modules:
|
||||
desired_keys = {'main_path', 'sys_path'}
|
||||
data = spawn.get_preparation_data('ignore')
|
||||
data = {
|
||||
x: y for (x, y) in data.items() if x in desired_keys
|
||||
}
|
||||
else:
|
||||
data = {}
|
||||
|
||||
with socket.socket(socket.AF_UNIX) as listener:
|
||||
address = connection.arbitrary_address('AF_UNIX')
|
||||
listener.bind(address)
|
||||
os.chmod(address, 0o600)
|
||||
listener.listen()
|
||||
|
||||
# all client processes own the write end of the "alive" pipe;
|
||||
# when they all terminate the read end becomes ready.
|
||||
alive_r, alive_w = os.pipe()
|
||||
try:
|
||||
fds_to_pass = [listener.fileno(), alive_r]
|
||||
cmd %= (listener.fileno(), alive_r, self._preload_modules,
|
||||
data)
|
||||
exe = spawn.get_executable()
|
||||
args = [exe] + util._args_from_interpreter_flags()
|
||||
args += ['-c', cmd]
|
||||
spawnv_passfds(exe, args, fds_to_pass)
|
||||
except:
|
||||
os.close(alive_w)
|
||||
raise
|
||||
finally:
|
||||
os.close(alive_r)
|
||||
self._forkserver_address = address
|
||||
self._forkserver_alive_fd = alive_w
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
|
||||
'''Run forkserver.'''
|
||||
if preload:
|
||||
if '__main__' in preload and main_path is not None:
|
||||
process.current_process()._inheriting = True
|
||||
try:
|
||||
spawn.import_main_path(main_path)
|
||||
finally:
|
||||
del process.current_process()._inheriting
|
||||
for modname in preload:
|
||||
try:
|
||||
__import__(modname)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# close sys.stdin
|
||||
if sys.stdin is not None:
|
||||
try:
|
||||
sys.stdin.close()
|
||||
sys.stdin = open(os.devnull)
|
||||
except (OSError, ValueError):
|
||||
pass
|
||||
|
||||
# ignoring SIGCHLD means no need to reap zombie processes
|
||||
handler = signal.signal(signal.SIGCHLD, signal.SIG_IGN)
|
||||
with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \
|
||||
selectors.DefaultSelector() as selector:
|
||||
_forkserver._forkserver_address = listener.getsockname()
|
||||
selector.register(listener, selectors.EVENT_READ)
|
||||
selector.register(alive_r, selectors.EVENT_READ)
|
||||
|
||||
while True:
|
||||
try:
|
||||
while True:
|
||||
rfds = [key.fileobj for (key, events) in selector.select()]
|
||||
if rfds:
|
||||
break
|
||||
|
||||
if alive_r in rfds:
|
||||
# EOF because no more client processes left
|
||||
assert os.read(alive_r, 1) == b''
|
||||
raise SystemExit
|
||||
|
||||
assert listener in rfds
|
||||
with listener.accept()[0] as s:
|
||||
code = 1
|
||||
if os.fork() == 0:
|
||||
try:
|
||||
_serve_one(s, listener, alive_r, handler)
|
||||
except Exception:
|
||||
sys.excepthook(*sys.exc_info())
|
||||
sys.stderr.flush()
|
||||
finally:
|
||||
os._exit(code)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ECONNABORTED:
|
||||
raise
|
||||
|
||||
|
||||
def __unpack_fds(child_r, child_w, alive, stfd, *inherited):
|
||||
return child_r, child_w, alive, stfd, inherited
|
||||
|
||||
|
||||
def _serve_one(s, listener, alive_r, handler):
|
||||
# close unnecessary stuff and reset SIGCHLD handler
|
||||
listener.close()
|
||||
os.close(alive_r)
|
||||
signal.signal(signal.SIGCHLD, handler)
|
||||
|
||||
# receive fds from parent process
|
||||
fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
|
||||
s.close()
|
||||
assert len(fds) <= MAXFDS_TO_SEND
|
||||
|
||||
(child_r, child_w, _forkserver._forkserver_alive_fd,
|
||||
stfd, _forkserver._inherited_fds) = __unpack_fds(*fds)
|
||||
semaphore_tracker._semaphore_tracker._fd = stfd
|
||||
|
||||
# send pid to client processes
|
||||
write_unsigned(child_w, os.getpid())
|
||||
|
||||
# reseed random number generator
|
||||
if 'random' in sys.modules:
|
||||
import random
|
||||
random.seed()
|
||||
|
||||
# run process object received over pipe
|
||||
code = spawn._main(child_r)
|
||||
|
||||
# write the exit code to the pipe
|
||||
write_unsigned(child_w, code)
|
||||
|
||||
#
|
||||
# Read and write unsigned numbers
|
||||
#
|
||||
|
||||
|
||||
def read_unsigned(fd):
|
||||
data = b''
|
||||
length = UNSIGNED_STRUCT.size
|
||||
while len(data) < length:
|
||||
s = os.read(fd, length - len(data))
|
||||
if not s:
|
||||
raise EOFError('unexpected EOF')
|
||||
data += s
|
||||
return UNSIGNED_STRUCT.unpack(data)[0]
|
||||
|
||||
|
||||
def write_unsigned(fd, n):
|
||||
msg = UNSIGNED_STRUCT.pack(n)
|
||||
while msg:
|
||||
nbytes = os.write(fd, msg)
|
||||
if nbytes == 0:
|
||||
raise RuntimeError('should not get here')
|
||||
msg = msg[nbytes:]
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
_forkserver = ForkServer()
|
||||
ensure_running = _forkserver.ensure_running
|
||||
get_inherited_fds = _forkserver.get_inherited_fds
|
||||
connect_to_new_process = _forkserver.connect_to_new_process
|
||||
set_forkserver_preload = _forkserver.set_forkserver_preload
|
||||
285
ETB-API/venv/lib/python3.12/site-packages/billiard/heap.py
Normal file
285
ETB-API/venv/lib/python3.12/site-packages/billiard/heap.py
Normal file
@@ -0,0 +1,285 @@
|
||||
#
|
||||
# Module which supports allocation of memory from an mmap
|
||||
#
|
||||
# multiprocessing/heap.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
|
||||
import bisect
|
||||
import errno
|
||||
import io
|
||||
import mmap
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import tempfile
|
||||
|
||||
from . import context
|
||||
from . import reduction
|
||||
from . import util
|
||||
|
||||
from ._ext import _billiard, win32
|
||||
|
||||
__all__ = ['BufferWrapper']
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
#
|
||||
# Inheritable class which wraps an mmap, and from which blocks can be allocated
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
class Arena:
|
||||
|
||||
_rand = tempfile._RandomNameSequence()
|
||||
|
||||
def __init__(self, size):
|
||||
self.size = size
|
||||
for i in range(100):
|
||||
name = 'pym-%d-%s' % (os.getpid(), next(self._rand))
|
||||
buf = mmap.mmap(-1, size, tagname=name)
|
||||
if win32.GetLastError() == 0:
|
||||
break
|
||||
# we have reopened a preexisting map
|
||||
buf.close()
|
||||
else:
|
||||
exc = IOError('Cannot find name for new mmap')
|
||||
exc.errno = errno.EEXIST
|
||||
raise exc
|
||||
self.name = name
|
||||
self.buffer = buf
|
||||
self._state = (self.size, self.name)
|
||||
|
||||
def __getstate__(self):
|
||||
context.assert_spawning(self)
|
||||
return self._state
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.size, self.name = self._state = state
|
||||
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
|
||||
# XXX Temporarily preventing buildbot failures while determining
|
||||
# XXX the correct long-term fix. See issue #23060
|
||||
# assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
|
||||
|
||||
else:
|
||||
|
||||
class Arena:
|
||||
|
||||
def __init__(self, size, fd=-1):
|
||||
self.size = size
|
||||
self.fd = fd
|
||||
if fd == -1:
|
||||
if PY3:
|
||||
self.fd, name = tempfile.mkstemp(
|
||||
prefix='pym-%d-' % (os.getpid(),),
|
||||
dir=util.get_temp_dir(),
|
||||
)
|
||||
|
||||
os.unlink(name)
|
||||
util.Finalize(self, os.close, (self.fd,))
|
||||
with io.open(self.fd, 'wb', closefd=False) as f:
|
||||
bs = 1024 * 1024
|
||||
if size >= bs:
|
||||
zeros = b'\0' * bs
|
||||
for _ in range(size // bs):
|
||||
f.write(zeros)
|
||||
del(zeros)
|
||||
f.write(b'\0' * (size % bs))
|
||||
assert f.tell() == size
|
||||
else:
|
||||
self.fd, name = tempfile.mkstemp(
|
||||
prefix='pym-%d-' % (os.getpid(),),
|
||||
dir=util.get_temp_dir(),
|
||||
)
|
||||
os.unlink(name)
|
||||
util.Finalize(self, os.close, (self.fd,))
|
||||
os.ftruncate(self.fd, size)
|
||||
self.buffer = mmap.mmap(self.fd, self.size)
|
||||
|
||||
def reduce_arena(a):
|
||||
if a.fd == -1:
|
||||
raise ValueError('Arena is unpicklable because'
|
||||
'forking was enabled when it was created')
|
||||
return rebuild_arena, (a.size, reduction.DupFd(a.fd))
|
||||
|
||||
def rebuild_arena(size, dupfd):
|
||||
return Arena(size, dupfd.detach())
|
||||
|
||||
reduction.register(Arena, reduce_arena)
|
||||
|
||||
#
|
||||
# Class allowing allocation of chunks of memory from arenas
|
||||
#
|
||||
|
||||
|
||||
class Heap:
|
||||
|
||||
_alignment = 8
|
||||
|
||||
def __init__(self, size=mmap.PAGESIZE):
|
||||
self._lastpid = os.getpid()
|
||||
self._lock = threading.Lock()
|
||||
self._size = size
|
||||
self._lengths = []
|
||||
self._len_to_seq = {}
|
||||
self._start_to_block = {}
|
||||
self._stop_to_block = {}
|
||||
self._allocated_blocks = set()
|
||||
self._arenas = []
|
||||
# list of pending blocks to free - see free() comment below
|
||||
self._pending_free_blocks = []
|
||||
|
||||
@staticmethod
|
||||
def _roundup(n, alignment):
|
||||
# alignment must be a power of 2
|
||||
mask = alignment - 1
|
||||
return (n + mask) & ~mask
|
||||
|
||||
def _malloc(self, size):
|
||||
# returns a large enough block -- it might be much larger
|
||||
i = bisect.bisect_left(self._lengths, size)
|
||||
if i == len(self._lengths):
|
||||
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
|
||||
self._size *= 2
|
||||
util.info('allocating a new mmap of length %d', length)
|
||||
arena = Arena(length)
|
||||
self._arenas.append(arena)
|
||||
return (arena, 0, length)
|
||||
else:
|
||||
length = self._lengths[i]
|
||||
seq = self._len_to_seq[length]
|
||||
block = seq.pop()
|
||||
if not seq:
|
||||
del self._len_to_seq[length], self._lengths[i]
|
||||
|
||||
(arena, start, stop) = block
|
||||
del self._start_to_block[(arena, start)]
|
||||
del self._stop_to_block[(arena, stop)]
|
||||
return block
|
||||
|
||||
def _free(self, block):
|
||||
# free location and try to merge with neighbours
|
||||
(arena, start, stop) = block
|
||||
|
||||
try:
|
||||
prev_block = self._stop_to_block[(arena, start)]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
start, _ = self._absorb(prev_block)
|
||||
|
||||
try:
|
||||
next_block = self._start_to_block[(arena, stop)]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
_, stop = self._absorb(next_block)
|
||||
|
||||
block = (arena, start, stop)
|
||||
length = stop - start
|
||||
|
||||
try:
|
||||
self._len_to_seq[length].append(block)
|
||||
except KeyError:
|
||||
self._len_to_seq[length] = [block]
|
||||
bisect.insort(self._lengths, length)
|
||||
|
||||
self._start_to_block[(arena, start)] = block
|
||||
self._stop_to_block[(arena, stop)] = block
|
||||
|
||||
def _absorb(self, block):
|
||||
# deregister this block so it can be merged with a neighbour
|
||||
(arena, start, stop) = block
|
||||
del self._start_to_block[(arena, start)]
|
||||
del self._stop_to_block[(arena, stop)]
|
||||
|
||||
length = stop - start
|
||||
seq = self._len_to_seq[length]
|
||||
seq.remove(block)
|
||||
if not seq:
|
||||
del self._len_to_seq[length]
|
||||
self._lengths.remove(length)
|
||||
|
||||
return start, stop
|
||||
|
||||
def _free_pending_blocks(self):
|
||||
# Free all the blocks in the pending list - called with the lock held
|
||||
while 1:
|
||||
try:
|
||||
block = self._pending_free_blocks.pop()
|
||||
except IndexError:
|
||||
break
|
||||
self._allocated_blocks.remove(block)
|
||||
self._free(block)
|
||||
|
||||
def free(self, block):
|
||||
# free a block returned by malloc()
|
||||
# Since free() can be called asynchronously by the GC, it could happen
|
||||
# that it's called while self._lock is held: in that case,
|
||||
# self._lock.acquire() would deadlock (issue #12352). To avoid that, a
|
||||
# trylock is used instead, and if the lock can't be acquired
|
||||
# immediately, the block is added to a list of blocks to be freed
|
||||
# synchronously sometimes later from malloc() or free(), by calling
|
||||
# _free_pending_blocks() (appending and retrieving from a list is not
|
||||
# strictly thread-safe but under cPython it's atomic
|
||||
# thanks to the GIL).
|
||||
assert os.getpid() == self._lastpid
|
||||
if not self._lock.acquire(False):
|
||||
# can't acquire the lock right now, add the block to the list of
|
||||
# pending blocks to free
|
||||
self._pending_free_blocks.append(block)
|
||||
else:
|
||||
# we hold the lock
|
||||
try:
|
||||
self._free_pending_blocks()
|
||||
self._allocated_blocks.remove(block)
|
||||
self._free(block)
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def malloc(self, size):
|
||||
# return a block of right size (possibly rounded up)
|
||||
assert 0 <= size < sys.maxsize
|
||||
if os.getpid() != self._lastpid:
|
||||
self.__init__() # reinitialize after fork
|
||||
with self._lock:
|
||||
self._free_pending_blocks()
|
||||
size = self._roundup(max(size, 1), self._alignment)
|
||||
(arena, start, stop) = self._malloc(size)
|
||||
new_stop = start + size
|
||||
if new_stop < stop:
|
||||
self._free((arena, new_stop, stop))
|
||||
block = (arena, start, new_stop)
|
||||
self._allocated_blocks.add(block)
|
||||
return block
|
||||
|
||||
#
|
||||
# Class representing a chunk of an mmap -- can be inherited
|
||||
#
|
||||
|
||||
|
||||
class BufferWrapper:
|
||||
|
||||
_heap = Heap()
|
||||
|
||||
def __init__(self, size):
|
||||
assert 0 <= size < sys.maxsize
|
||||
block = BufferWrapper._heap.malloc(size)
|
||||
self._state = (block, size)
|
||||
util.Finalize(self, BufferWrapper._heap.free, args=(block,))
|
||||
|
||||
def get_address(self):
|
||||
(arena, start, stop), size = self._state
|
||||
address, length = _billiard.address_of_buffer(arena.buffer)
|
||||
assert size <= length
|
||||
return address + start
|
||||
|
||||
def get_size(self):
|
||||
return self._state[1]
|
||||
|
||||
def create_memoryview(self):
|
||||
(arena, start, stop), size = self._state
|
||||
return memoryview(arena.buffer)[start:start + size]
|
||||
1210
ETB-API/venv/lib/python3.12/site-packages/billiard/managers.py
Normal file
1210
ETB-API/venv/lib/python3.12/site-packages/billiard/managers.py
Normal file
File diff suppressed because it is too large
Load Diff
2053
ETB-API/venv/lib/python3.12/site-packages/billiard/pool.py
Normal file
2053
ETB-API/venv/lib/python3.12/site-packages/billiard/pool.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,89 @@
|
||||
import os
|
||||
import sys
|
||||
import errno
|
||||
|
||||
from .common import TERM_SIGNAL
|
||||
|
||||
__all__ = ['Popen']
|
||||
|
||||
#
|
||||
# Start child process using fork
|
||||
#
|
||||
|
||||
|
||||
class Popen:
|
||||
method = 'fork'
|
||||
sentinel = None
|
||||
|
||||
def __init__(self, process_obj):
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
self.returncode = None
|
||||
self._launch(process_obj)
|
||||
|
||||
def duplicate_for_child(self, fd):
|
||||
return fd
|
||||
|
||||
def poll(self, flag=os.WNOHANG):
|
||||
if self.returncode is None:
|
||||
while True:
|
||||
try:
|
||||
pid, sts = os.waitpid(self.pid, flag)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EINTR:
|
||||
continue
|
||||
# Child process not yet created. See #1731717
|
||||
# e.errno == errno.ECHILD == 10
|
||||
return None
|
||||
else:
|
||||
break
|
||||
if pid == self.pid:
|
||||
if os.WIFSIGNALED(sts):
|
||||
self.returncode = -os.WTERMSIG(sts)
|
||||
else:
|
||||
assert os.WIFEXITED(sts)
|
||||
self.returncode = os.WEXITSTATUS(sts)
|
||||
return self.returncode
|
||||
|
||||
def wait(self, timeout=None):
|
||||
if self.returncode is None:
|
||||
if timeout is not None:
|
||||
from .connection import wait
|
||||
if not wait([self.sentinel], timeout):
|
||||
return None
|
||||
# This shouldn't block if wait() returned successfully.
|
||||
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
|
||||
return self.returncode
|
||||
|
||||
def terminate(self):
|
||||
if self.returncode is None:
|
||||
try:
|
||||
os.kill(self.pid, TERM_SIGNAL)
|
||||
except OSError as exc:
|
||||
if getattr(exc, 'errno', None) != errno.ESRCH:
|
||||
if self.wait(timeout=0.1) is None:
|
||||
raise
|
||||
|
||||
def _launch(self, process_obj):
|
||||
code = 1
|
||||
parent_r, child_w = os.pipe()
|
||||
self.pid = os.fork()
|
||||
if self.pid == 0:
|
||||
try:
|
||||
os.close(parent_r)
|
||||
if 'random' in sys.modules:
|
||||
import random
|
||||
random.seed()
|
||||
code = process_obj._bootstrap()
|
||||
finally:
|
||||
os._exit(code)
|
||||
else:
|
||||
os.close(child_w)
|
||||
self.sentinel = parent_r
|
||||
|
||||
def close(self):
|
||||
if self.sentinel is not None:
|
||||
try:
|
||||
os.close(self.sentinel)
|
||||
finally:
|
||||
self.sentinel = None
|
||||
@@ -0,0 +1,68 @@
|
||||
import io
|
||||
import os
|
||||
|
||||
from . import reduction
|
||||
from . import context
|
||||
from . import forkserver
|
||||
from . import popen_fork
|
||||
from . import spawn
|
||||
|
||||
__all__ = ['Popen']
|
||||
|
||||
#
|
||||
# Wrapper for an fd used while launching a process
|
||||
#
|
||||
|
||||
|
||||
class _DupFd:
|
||||
|
||||
def __init__(self, ind):
|
||||
self.ind = ind
|
||||
|
||||
def detach(self):
|
||||
return forkserver.get_inherited_fds()[self.ind]
|
||||
|
||||
#
|
||||
# Start child process using a server process
|
||||
#
|
||||
|
||||
|
||||
class Popen(popen_fork.Popen):
|
||||
method = 'forkserver'
|
||||
DupFd = _DupFd
|
||||
|
||||
def __init__(self, process_obj):
|
||||
self._fds = []
|
||||
super().__init__(process_obj)
|
||||
|
||||
def duplicate_for_child(self, fd):
|
||||
self._fds.append(fd)
|
||||
return len(self._fds) - 1
|
||||
|
||||
def _launch(self, process_obj):
|
||||
prep_data = spawn.get_preparation_data(process_obj._name)
|
||||
buf = io.BytesIO()
|
||||
context.set_spawning_popen(self)
|
||||
try:
|
||||
reduction.dump(prep_data, buf)
|
||||
reduction.dump(process_obj, buf)
|
||||
finally:
|
||||
context.set_spawning_popen(None)
|
||||
|
||||
self.sentinel, w = forkserver.connect_to_new_process(self._fds)
|
||||
with io.open(w, 'wb', closefd=True) as f:
|
||||
f.write(buf.getbuffer())
|
||||
self.pid = forkserver.read_unsigned(self.sentinel)
|
||||
|
||||
def poll(self, flag=os.WNOHANG):
|
||||
if self.returncode is None:
|
||||
from .connection import wait
|
||||
timeout = 0 if flag == os.WNOHANG else None
|
||||
if not wait([self.sentinel], timeout):
|
||||
return None
|
||||
try:
|
||||
self.returncode = forkserver.read_unsigned(self.sentinel)
|
||||
except (OSError, EOFError):
|
||||
# The process ended abnormally perhaps because of a signal
|
||||
self.returncode = 255
|
||||
return self.returncode
|
||||
@@ -0,0 +1,74 @@
|
||||
import io
|
||||
import os
|
||||
|
||||
from . import context
|
||||
from . import popen_fork
|
||||
from . import reduction
|
||||
from . import spawn
|
||||
|
||||
from .compat import spawnv_passfds
|
||||
|
||||
__all__ = ['Popen']
|
||||
|
||||
|
||||
#
|
||||
# Wrapper for an fd used while launching a process
|
||||
#
|
||||
|
||||
class _DupFd:
|
||||
|
||||
def __init__(self, fd):
|
||||
self.fd = fd
|
||||
|
||||
def detach(self):
|
||||
return self.fd
|
||||
|
||||
#
|
||||
# Start child process using a fresh interpreter
|
||||
#
|
||||
|
||||
|
||||
class Popen(popen_fork.Popen):
|
||||
method = 'spawn'
|
||||
DupFd = _DupFd
|
||||
|
||||
def __init__(self, process_obj):
|
||||
self._fds = []
|
||||
super().__init__(process_obj)
|
||||
|
||||
def duplicate_for_child(self, fd):
|
||||
self._fds.append(fd)
|
||||
return fd
|
||||
|
||||
def _launch(self, process_obj):
|
||||
os.environ["MULTIPROCESSING_FORKING_DISABLE"] = "1"
|
||||
spawn._Django_old_layout_hack__save()
|
||||
from . import semaphore_tracker
|
||||
tracker_fd = semaphore_tracker.getfd()
|
||||
self._fds.append(tracker_fd)
|
||||
prep_data = spawn.get_preparation_data(process_obj._name)
|
||||
fp = io.BytesIO()
|
||||
context.set_spawning_popen(self)
|
||||
try:
|
||||
reduction.dump(prep_data, fp)
|
||||
reduction.dump(process_obj, fp)
|
||||
finally:
|
||||
context.set_spawning_popen(None)
|
||||
|
||||
parent_r = child_w = child_r = parent_w = None
|
||||
try:
|
||||
parent_r, child_w = os.pipe()
|
||||
child_r, parent_w = os.pipe()
|
||||
cmd = spawn.get_command_line(tracker_fd=tracker_fd,
|
||||
pipe_handle=child_r)
|
||||
self._fds.extend([child_r, child_w])
|
||||
self.pid = spawnv_passfds(
|
||||
spawn.get_executable(), cmd, self._fds,
|
||||
)
|
||||
self.sentinel = parent_r
|
||||
with io.open(parent_w, 'wb', closefd=False) as f:
|
||||
f.write(fp.getvalue())
|
||||
finally:
|
||||
for fd in (child_r, child_w, parent_w):
|
||||
if fd is not None:
|
||||
os.close(fd)
|
||||
@@ -0,0 +1,121 @@
|
||||
import io
|
||||
import os
|
||||
import msvcrt
|
||||
import signal
|
||||
import sys
|
||||
|
||||
from . import context
|
||||
from . import spawn
|
||||
from . import reduction
|
||||
|
||||
from .compat import _winapi
|
||||
|
||||
__all__ = ['Popen']
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
TERMINATE = 0x10000
|
||||
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
|
||||
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
|
||||
|
||||
#
|
||||
# We define a Popen class similar to the one from subprocess, but
|
||||
# whose constructor takes a process object as its argument.
|
||||
#
|
||||
|
||||
|
||||
if sys.platform == 'win32':
|
||||
try:
|
||||
from _winapi import CreateProcess, GetExitCodeProcess
|
||||
close_thread_handle = _winapi.CloseHandle
|
||||
except ImportError: # Py2.7
|
||||
from _subprocess import CreateProcess, GetExitCodeProcess
|
||||
|
||||
def close_thread_handle(handle):
|
||||
handle.Close()
|
||||
|
||||
|
||||
class Popen:
|
||||
'''
|
||||
Start a subprocess to run the code of a process object
|
||||
'''
|
||||
method = 'spawn'
|
||||
sentinel = None
|
||||
|
||||
def __init__(self, process_obj):
|
||||
os.environ["MULTIPROCESSING_FORKING_DISABLE"] = "1"
|
||||
spawn._Django_old_layout_hack__save()
|
||||
prep_data = spawn.get_preparation_data(process_obj._name)
|
||||
|
||||
# read end of pipe will be "stolen" by the child process
|
||||
# -- see spawn_main() in spawn.py.
|
||||
rhandle, whandle = _winapi.CreatePipe(None, 0)
|
||||
wfd = msvcrt.open_osfhandle(whandle, 0)
|
||||
cmd = spawn.get_command_line(parent_pid=os.getpid(),
|
||||
pipe_handle=rhandle)
|
||||
cmd = ' '.join('"%s"' % x for x in cmd)
|
||||
|
||||
with io.open(wfd, 'wb', closefd=True) as to_child:
|
||||
# start process
|
||||
try:
|
||||
hp, ht, pid, tid = CreateProcess(
|
||||
spawn.get_executable(), cmd,
|
||||
None, None, False, 0, None, None, None)
|
||||
close_thread_handle(ht)
|
||||
except:
|
||||
_winapi.CloseHandle(rhandle)
|
||||
raise
|
||||
|
||||
# set attributes of self
|
||||
self.pid = pid
|
||||
self.returncode = None
|
||||
self._handle = hp
|
||||
self.sentinel = int(hp)
|
||||
|
||||
# send information to child
|
||||
context.set_spawning_popen(self)
|
||||
try:
|
||||
reduction.dump(prep_data, to_child)
|
||||
reduction.dump(process_obj, to_child)
|
||||
finally:
|
||||
context.set_spawning_popen(None)
|
||||
|
||||
def close(self):
|
||||
if self.sentinel is not None:
|
||||
try:
|
||||
_winapi.CloseHandle(self.sentinel)
|
||||
finally:
|
||||
self.sentinel = None
|
||||
|
||||
def duplicate_for_child(self, handle):
|
||||
assert self is context.get_spawning_popen()
|
||||
return reduction.duplicate(handle, self.sentinel)
|
||||
|
||||
def wait(self, timeout=None):
|
||||
if self.returncode is None:
|
||||
if timeout is None:
|
||||
msecs = _winapi.INFINITE
|
||||
else:
|
||||
msecs = max(0, int(timeout * 1000 + 0.5))
|
||||
|
||||
res = _winapi.WaitForSingleObject(int(self._handle), msecs)
|
||||
if res == _winapi.WAIT_OBJECT_0:
|
||||
code = GetExitCodeProcess(self._handle)
|
||||
if code == TERMINATE:
|
||||
code = -signal.SIGTERM
|
||||
self.returncode = code
|
||||
|
||||
return self.returncode
|
||||
|
||||
def poll(self):
|
||||
return self.wait(timeout=0)
|
||||
|
||||
def terminate(self):
|
||||
if self.returncode is None:
|
||||
try:
|
||||
_winapi.TerminateProcess(int(self._handle), TERMINATE)
|
||||
except OSError:
|
||||
if self.wait(timeout=1.0) is None:
|
||||
raise
|
||||
400
ETB-API/venv/lib/python3.12/site-packages/billiard/process.py
Normal file
400
ETB-API/venv/lib/python3.12/site-packages/billiard/process.py
Normal file
@@ -0,0 +1,400 @@
|
||||
#
|
||||
# Module providing the `Process` class which emulates `threading.Thread`
|
||||
#
|
||||
# multiprocessing/process.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
#
|
||||
# Imports
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import itertools
|
||||
import logging
|
||||
import threading
|
||||
from _weakrefset import WeakSet
|
||||
|
||||
from multiprocessing import process as _mproc
|
||||
|
||||
try:
|
||||
ORIGINAL_DIR = os.path.abspath(os.getcwd())
|
||||
except OSError:
|
||||
ORIGINAL_DIR = None
|
||||
|
||||
__all__ = ['BaseProcess', 'Process', 'current_process', 'active_children']
|
||||
|
||||
#
|
||||
# Public functions
|
||||
#
|
||||
|
||||
|
||||
def current_process():
|
||||
'''
|
||||
Return process object representing the current process
|
||||
'''
|
||||
return _current_process
|
||||
|
||||
|
||||
def _set_current_process(process):
|
||||
global _current_process
|
||||
_current_process = _mproc._current_process = process
|
||||
|
||||
|
||||
def _cleanup():
|
||||
# check for processes which have finished
|
||||
for p in list(_children):
|
||||
if p._popen.poll() is not None:
|
||||
_children.discard(p)
|
||||
|
||||
|
||||
def _maybe_flush(f):
|
||||
try:
|
||||
f.flush()
|
||||
except (AttributeError, EnvironmentError, NotImplementedError):
|
||||
pass
|
||||
|
||||
|
||||
def active_children(_cleanup=_cleanup):
|
||||
'''
|
||||
Return list of process objects corresponding to live child processes
|
||||
'''
|
||||
try:
|
||||
_cleanup()
|
||||
except TypeError:
|
||||
# called after gc collect so _cleanup does not exist anymore
|
||||
return []
|
||||
return list(_children)
|
||||
|
||||
|
||||
class BaseProcess:
|
||||
'''
|
||||
Process objects represent activity that is run in a separate process
|
||||
|
||||
The class is analogous to `threading.Thread`
|
||||
'''
|
||||
|
||||
def _Popen(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def __init__(self, group=None, target=None, name=None,
|
||||
args=(), kwargs={}, daemon=None, **_kw):
|
||||
assert group is None, 'group argument must be None for now'
|
||||
count = next(_process_counter)
|
||||
self._identity = _current_process._identity + (count, )
|
||||
self._config = _current_process._config.copy()
|
||||
self._parent_pid = os.getpid()
|
||||
self._popen = None
|
||||
self._target = target
|
||||
self._args = tuple(args)
|
||||
self._kwargs = dict(kwargs)
|
||||
self._name = (
|
||||
name or type(self).__name__ + '-' +
|
||||
':'.join(str(i) for i in self._identity)
|
||||
)
|
||||
if daemon is not None:
|
||||
self.daemon = daemon
|
||||
if _dangling is not None:
|
||||
_dangling.add(self)
|
||||
|
||||
self._controlled_termination = False
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Method to be run in sub-process; can be overridden in sub-class
|
||||
'''
|
||||
if self._target:
|
||||
self._target(*self._args, **self._kwargs)
|
||||
|
||||
def start(self):
|
||||
'''
|
||||
Start child process
|
||||
'''
|
||||
assert self._popen is None, 'cannot start a process twice'
|
||||
assert self._parent_pid == os.getpid(), \
|
||||
'can only start a process object created by current process'
|
||||
_cleanup()
|
||||
self._popen = self._Popen(self)
|
||||
self._sentinel = self._popen.sentinel
|
||||
_children.add(self)
|
||||
|
||||
def close(self):
|
||||
if self._popen is not None:
|
||||
self._popen.close()
|
||||
|
||||
def terminate(self):
|
||||
'''
|
||||
Terminate process; sends SIGTERM signal or uses TerminateProcess()
|
||||
'''
|
||||
self._popen.terminate()
|
||||
|
||||
def terminate_controlled(self):
|
||||
self._controlled_termination = True
|
||||
self.terminate()
|
||||
|
||||
def join(self, timeout=None):
|
||||
'''
|
||||
Wait until child process terminates
|
||||
'''
|
||||
assert self._parent_pid == os.getpid(), 'can only join a child process'
|
||||
assert self._popen is not None, 'can only join a started process'
|
||||
res = self._popen.wait(timeout)
|
||||
if res is not None:
|
||||
_children.discard(self)
|
||||
self.close()
|
||||
|
||||
def is_alive(self):
|
||||
'''
|
||||
Return whether process is alive
|
||||
'''
|
||||
if self is _current_process:
|
||||
return True
|
||||
assert self._parent_pid == os.getpid(), 'can only test a child process'
|
||||
if self._popen is None:
|
||||
return False
|
||||
self._popen.poll()
|
||||
return self._popen.returncode is None
|
||||
|
||||
def _is_alive(self):
|
||||
if self._popen is None:
|
||||
return False
|
||||
return self._popen.poll() is None
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._name
|
||||
|
||||
@name.setter
|
||||
def name(self, name): # noqa
|
||||
assert isinstance(name, str), 'name must be a string'
|
||||
self._name = name
|
||||
|
||||
@property
|
||||
def daemon(self):
|
||||
'''
|
||||
Return whether process is a daemon
|
||||
'''
|
||||
return self._config.get('daemon', False)
|
||||
|
||||
@daemon.setter # noqa
|
||||
def daemon(self, daemonic):
|
||||
'''
|
||||
Set whether process is a daemon
|
||||
'''
|
||||
assert self._popen is None, 'process has already started'
|
||||
self._config['daemon'] = daemonic
|
||||
|
||||
@property
|
||||
def authkey(self):
|
||||
return self._config['authkey']
|
||||
|
||||
@authkey.setter # noqa
|
||||
def authkey(self, authkey):
|
||||
'''
|
||||
Set authorization key of process
|
||||
'''
|
||||
self._config['authkey'] = AuthenticationString(authkey)
|
||||
|
||||
@property
|
||||
def exitcode(self):
|
||||
'''
|
||||
Return exit code of process or `None` if it has yet to stop
|
||||
'''
|
||||
if self._popen is None:
|
||||
return self._popen
|
||||
return self._popen.poll()
|
||||
|
||||
@property
|
||||
def ident(self):
|
||||
'''
|
||||
Return identifier (PID) of process or `None` if it has yet to start
|
||||
'''
|
||||
if self is _current_process:
|
||||
return os.getpid()
|
||||
else:
|
||||
return self._popen and self._popen.pid
|
||||
|
||||
pid = ident
|
||||
|
||||
@property
|
||||
def sentinel(self):
|
||||
'''
|
||||
Return a file descriptor (Unix) or handle (Windows) suitable for
|
||||
waiting for process termination.
|
||||
'''
|
||||
try:
|
||||
return self._sentinel
|
||||
except AttributeError:
|
||||
raise ValueError("process not started")
|
||||
|
||||
@property
|
||||
def _counter(self):
|
||||
# compat for 2.7
|
||||
return _process_counter
|
||||
|
||||
@property
|
||||
def _children(self):
|
||||
# compat for 2.7
|
||||
return _children
|
||||
|
||||
@property
|
||||
def _authkey(self):
|
||||
# compat for 2.7
|
||||
return self.authkey
|
||||
|
||||
@property
|
||||
def _daemonic(self):
|
||||
# compat for 2.7
|
||||
return self.daemon
|
||||
|
||||
@property
|
||||
def _tempdir(self):
|
||||
# compat for 2.7
|
||||
return self._config.get('tempdir')
|
||||
|
||||
def __repr__(self):
|
||||
if self is _current_process:
|
||||
status = 'started'
|
||||
elif self._parent_pid != os.getpid():
|
||||
status = 'unknown'
|
||||
elif self._popen is None:
|
||||
status = 'initial'
|
||||
else:
|
||||
if self._popen.poll() is not None:
|
||||
status = self.exitcode
|
||||
else:
|
||||
status = 'started'
|
||||
|
||||
if type(status) is int:
|
||||
if status == 0:
|
||||
status = 'stopped'
|
||||
else:
|
||||
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
|
||||
|
||||
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
|
||||
status, self.daemon and ' daemon' or '')
|
||||
|
||||
##
|
||||
|
||||
def _bootstrap(self):
|
||||
from . import util, context
|
||||
global _current_process, _process_counter, _children
|
||||
|
||||
try:
|
||||
if self._start_method is not None:
|
||||
context._force_start_method(self._start_method)
|
||||
_process_counter = itertools.count(1)
|
||||
_children = set()
|
||||
if sys.stdin is not None:
|
||||
try:
|
||||
sys.stdin.close()
|
||||
sys.stdin = open(os.devnull)
|
||||
except (EnvironmentError, OSError, ValueError):
|
||||
pass
|
||||
old_process = _current_process
|
||||
_set_current_process(self)
|
||||
|
||||
# Re-init logging system.
|
||||
# Workaround for https://bugs.python.org/issue6721/#msg140215
|
||||
# Python logging module uses RLock() objects which are broken
|
||||
# after fork. This can result in a deadlock (Celery Issue #496).
|
||||
loggerDict = logging.Logger.manager.loggerDict
|
||||
logger_names = list(loggerDict.keys())
|
||||
logger_names.append(None) # for root logger
|
||||
for name in logger_names:
|
||||
if not name or not isinstance(loggerDict[name],
|
||||
logging.PlaceHolder):
|
||||
for handler in logging.getLogger(name).handlers:
|
||||
handler.createLock()
|
||||
logging._lock = threading.RLock()
|
||||
|
||||
try:
|
||||
util._finalizer_registry.clear()
|
||||
util._run_after_forkers()
|
||||
finally:
|
||||
# delay finalization of the old process object until after
|
||||
# _run_after_forkers() is executed
|
||||
del old_process
|
||||
util.info('child process %s calling self.run()', self.pid)
|
||||
try:
|
||||
self.run()
|
||||
exitcode = 0
|
||||
finally:
|
||||
util._exit_function()
|
||||
except SystemExit as exc:
|
||||
if not exc.args:
|
||||
exitcode = 1
|
||||
elif isinstance(exc.args[0], int):
|
||||
exitcode = exc.args[0]
|
||||
else:
|
||||
sys.stderr.write(str(exc.args[0]) + '\n')
|
||||
_maybe_flush(sys.stderr)
|
||||
exitcode = 0 if isinstance(exc.args[0], str) else 1
|
||||
except:
|
||||
exitcode = 1
|
||||
if not util.error('Process %s', self.name, exc_info=True):
|
||||
import traceback
|
||||
sys.stderr.write('Process %s:\n' % self.name)
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
util.info('process %s exiting with exitcode %d',
|
||||
self.pid, exitcode)
|
||||
_maybe_flush(sys.stdout)
|
||||
_maybe_flush(sys.stderr)
|
||||
|
||||
return exitcode
|
||||
|
||||
#
|
||||
# We subclass bytes to avoid accidental transmission of auth keys over network
|
||||
#
|
||||
|
||||
|
||||
class AuthenticationString(bytes):
|
||||
|
||||
def __reduce__(self):
|
||||
from .context import get_spawning_popen
|
||||
|
||||
if get_spawning_popen() is None:
|
||||
raise TypeError(
|
||||
'Pickling an AuthenticationString object is '
|
||||
'disallowed for security reasons')
|
||||
return AuthenticationString, (bytes(self),)
|
||||
|
||||
#
|
||||
# Create object representing the main process
|
||||
#
|
||||
|
||||
|
||||
class _MainProcess(BaseProcess):
|
||||
|
||||
def __init__(self):
|
||||
self._identity = ()
|
||||
self._name = 'MainProcess'
|
||||
self._parent_pid = None
|
||||
self._popen = None
|
||||
self._config = {'authkey': AuthenticationString(os.urandom(32)),
|
||||
'semprefix': '/mp'}
|
||||
|
||||
_current_process = _MainProcess()
|
||||
_process_counter = itertools.count(1)
|
||||
_children = set()
|
||||
del _MainProcess
|
||||
|
||||
|
||||
Process = BaseProcess
|
||||
|
||||
#
|
||||
# Give names to some return codes
|
||||
#
|
||||
|
||||
_exitcode_to_name = {}
|
||||
|
||||
for name, signum in signal.__dict__.items():
|
||||
if name[:3] == 'SIG' and '_' not in name:
|
||||
_exitcode_to_name[-signum] = name
|
||||
|
||||
# For debug and leak testing
|
||||
_dangling = WeakSet()
|
||||
403
ETB-API/venv/lib/python3.12/site-packages/billiard/queues.py
Normal file
403
ETB-API/venv/lib/python3.12/site-packages/billiard/queues.py
Normal file
@@ -0,0 +1,403 @@
|
||||
#
|
||||
# Module implementing queues
|
||||
#
|
||||
# multiprocessing/queues.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
|
||||
import sys
|
||||
import os
|
||||
import threading
|
||||
import collections
|
||||
import weakref
|
||||
import errno
|
||||
|
||||
from . import connection
|
||||
from . import context
|
||||
|
||||
from .compat import get_errno
|
||||
from time import monotonic
|
||||
from queue import Empty, Full
|
||||
from .util import (
|
||||
debug, error, info, Finalize, register_after_fork, is_exiting,
|
||||
)
|
||||
from .reduction import ForkingPickler
|
||||
|
||||
__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
|
||||
|
||||
|
||||
class Queue:
|
||||
'''
|
||||
Queue type using a pipe, buffer and thread
|
||||
'''
|
||||
def __init__(self, maxsize=0, *args, **kwargs):
|
||||
try:
|
||||
ctx = kwargs['ctx']
|
||||
except KeyError:
|
||||
raise TypeError('missing 1 required keyword-only argument: ctx')
|
||||
if maxsize <= 0:
|
||||
# Can raise ImportError (see issues #3770 and #23400)
|
||||
from .synchronize import SEM_VALUE_MAX as maxsize # noqa
|
||||
self._maxsize = maxsize
|
||||
self._reader, self._writer = connection.Pipe(duplex=False)
|
||||
self._rlock = ctx.Lock()
|
||||
self._opid = os.getpid()
|
||||
if sys.platform == 'win32':
|
||||
self._wlock = None
|
||||
else:
|
||||
self._wlock = ctx.Lock()
|
||||
self._sem = ctx.BoundedSemaphore(maxsize)
|
||||
# For use by concurrent.futures
|
||||
self._ignore_epipe = False
|
||||
|
||||
self._after_fork()
|
||||
|
||||
if sys.platform != 'win32':
|
||||
register_after_fork(self, Queue._after_fork)
|
||||
|
||||
def __getstate__(self):
|
||||
context.assert_spawning(self)
|
||||
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
|
||||
self._rlock, self._wlock, self._sem, self._opid)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
|
||||
self._rlock, self._wlock, self._sem, self._opid) = state
|
||||
self._after_fork()
|
||||
|
||||
def _after_fork(self):
|
||||
debug('Queue._after_fork()')
|
||||
self._notempty = threading.Condition(threading.Lock())
|
||||
self._buffer = collections.deque()
|
||||
self._thread = None
|
||||
self._jointhread = None
|
||||
self._joincancelled = False
|
||||
self._closed = False
|
||||
self._close = None
|
||||
self._send_bytes = self._writer.send
|
||||
self._recv = self._reader.recv
|
||||
self._send_bytes = self._writer.send_bytes
|
||||
self._recv_bytes = self._reader.recv_bytes
|
||||
self._poll = self._reader.poll
|
||||
|
||||
def put(self, obj, block=True, timeout=None):
|
||||
assert not self._closed
|
||||
if not self._sem.acquire(block, timeout):
|
||||
raise Full
|
||||
|
||||
with self._notempty:
|
||||
if self._thread is None:
|
||||
self._start_thread()
|
||||
self._buffer.append(obj)
|
||||
self._notempty.notify()
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
if block and timeout is None:
|
||||
with self._rlock:
|
||||
res = self._recv_bytes()
|
||||
self._sem.release()
|
||||
|
||||
else:
|
||||
if block:
|
||||
deadline = monotonic() + timeout
|
||||
if not self._rlock.acquire(block, timeout):
|
||||
raise Empty
|
||||
try:
|
||||
if block:
|
||||
timeout = deadline - monotonic()
|
||||
if timeout < 0 or not self._poll(timeout):
|
||||
raise Empty
|
||||
elif not self._poll():
|
||||
raise Empty
|
||||
res = self._recv_bytes()
|
||||
self._sem.release()
|
||||
finally:
|
||||
self._rlock.release()
|
||||
# unserialize the data after having released the lock
|
||||
return ForkingPickler.loads(res)
|
||||
|
||||
def qsize(self):
|
||||
# Raises NotImplementedError on macOS because
|
||||
# of broken sem_getvalue()
|
||||
return self._maxsize - self._sem._semlock._get_value()
|
||||
|
||||
def empty(self):
|
||||
return not self._poll()
|
||||
|
||||
def full(self):
|
||||
return self._sem._semlock._is_zero()
|
||||
|
||||
def get_nowait(self):
|
||||
return self.get(False)
|
||||
|
||||
def put_nowait(self, obj):
|
||||
return self.put(obj, False)
|
||||
|
||||
def close(self):
|
||||
self._closed = True
|
||||
try:
|
||||
self._reader.close()
|
||||
finally:
|
||||
close = self._close
|
||||
if close:
|
||||
self._close = None
|
||||
close()
|
||||
|
||||
def join_thread(self):
|
||||
debug('Queue.join_thread()')
|
||||
assert self._closed
|
||||
if self._jointhread:
|
||||
self._jointhread()
|
||||
|
||||
def cancel_join_thread(self):
|
||||
debug('Queue.cancel_join_thread()')
|
||||
self._joincancelled = True
|
||||
try:
|
||||
self._jointhread.cancel()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def _start_thread(self):
|
||||
debug('Queue._start_thread()')
|
||||
|
||||
# Start thread which transfers data from buffer to pipe
|
||||
self._buffer.clear()
|
||||
self._thread = threading.Thread(
|
||||
target=Queue._feed,
|
||||
args=(self._buffer, self._notempty, self._send_bytes,
|
||||
self._wlock, self._writer.close, self._ignore_epipe),
|
||||
name='QueueFeederThread'
|
||||
)
|
||||
self._thread.daemon = True
|
||||
|
||||
debug('doing self._thread.start()')
|
||||
self._thread.start()
|
||||
debug('... done self._thread.start()')
|
||||
|
||||
# On process exit we will wait for data to be flushed to pipe.
|
||||
#
|
||||
# However, if this process created the queue then all
|
||||
# processes which use the queue will be descendants of this
|
||||
# process. Therefore waiting for the queue to be flushed
|
||||
# is pointless once all the child processes have been joined.
|
||||
created_by_this_process = (self._opid == os.getpid())
|
||||
if not self._joincancelled and not created_by_this_process:
|
||||
self._jointhread = Finalize(
|
||||
self._thread, Queue._finalize_join,
|
||||
[weakref.ref(self._thread)],
|
||||
exitpriority=-5
|
||||
)
|
||||
|
||||
# Send sentinel to the thread queue object when garbage collected
|
||||
self._close = Finalize(
|
||||
self, Queue._finalize_close,
|
||||
[self._buffer, self._notempty],
|
||||
exitpriority=10
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _finalize_join(twr):
|
||||
debug('joining queue thread')
|
||||
thread = twr()
|
||||
if thread is not None:
|
||||
thread.join()
|
||||
debug('... queue thread joined')
|
||||
else:
|
||||
debug('... queue thread already dead')
|
||||
|
||||
@staticmethod
|
||||
def _finalize_close(buffer, notempty):
|
||||
debug('telling queue thread to quit')
|
||||
with notempty:
|
||||
buffer.append(_sentinel)
|
||||
notempty.notify()
|
||||
|
||||
@staticmethod
|
||||
def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe):
|
||||
debug('starting thread to feed data to pipe')
|
||||
|
||||
nacquire = notempty.acquire
|
||||
nrelease = notempty.release
|
||||
nwait = notempty.wait
|
||||
bpopleft = buffer.popleft
|
||||
sentinel = _sentinel
|
||||
if sys.platform != 'win32':
|
||||
wacquire = writelock.acquire
|
||||
wrelease = writelock.release
|
||||
else:
|
||||
wacquire = None
|
||||
|
||||
try:
|
||||
while 1:
|
||||
nacquire()
|
||||
try:
|
||||
if not buffer:
|
||||
nwait()
|
||||
finally:
|
||||
nrelease()
|
||||
try:
|
||||
while 1:
|
||||
obj = bpopleft()
|
||||
if obj is sentinel:
|
||||
debug('feeder thread got sentinel -- exiting')
|
||||
close()
|
||||
return
|
||||
|
||||
# serialize the data before acquiring the lock
|
||||
obj = ForkingPickler.dumps(obj)
|
||||
if wacquire is None:
|
||||
send_bytes(obj)
|
||||
else:
|
||||
wacquire()
|
||||
try:
|
||||
send_bytes(obj)
|
||||
finally:
|
||||
wrelease()
|
||||
except IndexError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
if ignore_epipe and get_errno(exc) == errno.EPIPE:
|
||||
return
|
||||
# Since this runs in a daemon thread the resources it uses
|
||||
# may be become unusable while the process is cleaning up.
|
||||
# We ignore errors which happen after the process has
|
||||
# started to cleanup.
|
||||
try:
|
||||
if is_exiting():
|
||||
info('error in queue thread: %r', exc, exc_info=True)
|
||||
else:
|
||||
if not error('error in queue thread: %r', exc,
|
||||
exc_info=True):
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_sentinel = object()
|
||||
|
||||
|
||||
class JoinableQueue(Queue):
|
||||
'''
|
||||
A queue type which also supports join() and task_done() methods
|
||||
|
||||
Note that if you do not call task_done() for each finished task then
|
||||
eventually the counter's semaphore may overflow causing Bad Things
|
||||
to happen.
|
||||
'''
|
||||
|
||||
def __init__(self, maxsize=0, *args, **kwargs):
|
||||
try:
|
||||
ctx = kwargs['ctx']
|
||||
except KeyError:
|
||||
raise TypeError('missing 1 required keyword argument: ctx')
|
||||
Queue.__init__(self, maxsize, ctx=ctx)
|
||||
self._unfinished_tasks = ctx.Semaphore(0)
|
||||
self._cond = ctx.Condition()
|
||||
|
||||
def __getstate__(self):
|
||||
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
|
||||
|
||||
def __setstate__(self, state):
|
||||
Queue.__setstate__(self, state[:-2])
|
||||
self._cond, self._unfinished_tasks = state[-2:]
|
||||
|
||||
def put(self, obj, block=True, timeout=None):
|
||||
assert not self._closed
|
||||
if not self._sem.acquire(block, timeout):
|
||||
raise Full
|
||||
|
||||
with self._notempty:
|
||||
with self._cond:
|
||||
if self._thread is None:
|
||||
self._start_thread()
|
||||
self._buffer.append(obj)
|
||||
self._unfinished_tasks.release()
|
||||
self._notempty.notify()
|
||||
|
||||
def task_done(self):
|
||||
with self._cond:
|
||||
if not self._unfinished_tasks.acquire(False):
|
||||
raise ValueError('task_done() called too many times')
|
||||
if self._unfinished_tasks._semlock._is_zero():
|
||||
self._cond.notify_all()
|
||||
|
||||
def join(self):
|
||||
with self._cond:
|
||||
if not self._unfinished_tasks._semlock._is_zero():
|
||||
self._cond.wait()
|
||||
|
||||
|
||||
class _SimpleQueue:
|
||||
'''
|
||||
Simplified Queue type -- really just a locked pipe
|
||||
'''
|
||||
|
||||
def __init__(self, rnonblock=False, wnonblock=False, ctx=None):
|
||||
self._reader, self._writer = connection.Pipe(
|
||||
duplex=False, rnonblock=rnonblock, wnonblock=wnonblock,
|
||||
)
|
||||
self._poll = self._reader.poll
|
||||
self._rlock = self._wlock = None
|
||||
|
||||
def empty(self):
|
||||
return not self._poll()
|
||||
|
||||
def __getstate__(self):
|
||||
context.assert_spawning(self)
|
||||
return (self._reader, self._writer, self._rlock, self._wlock)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._reader, self._writer, self._rlock, self._wlock) = state
|
||||
|
||||
def get_payload(self):
|
||||
return self._reader.recv_bytes()
|
||||
|
||||
def send_payload(self, value):
|
||||
self._writer.send_bytes(value)
|
||||
|
||||
def get(self):
|
||||
# unserialize the data after having released the lock
|
||||
return ForkingPickler.loads(self.get_payload())
|
||||
|
||||
def put(self, obj):
|
||||
# serialize the data before acquiring the lock
|
||||
self.send_payload(ForkingPickler.dumps(obj))
|
||||
|
||||
def close(self):
|
||||
if self._reader is not None:
|
||||
try:
|
||||
self._reader.close()
|
||||
finally:
|
||||
self._reader = None
|
||||
|
||||
if self._writer is not None:
|
||||
try:
|
||||
self._writer.close()
|
||||
finally:
|
||||
self._writer = None
|
||||
|
||||
|
||||
class SimpleQueue(_SimpleQueue):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
try:
|
||||
ctx = kwargs['ctx']
|
||||
except KeyError:
|
||||
raise TypeError('missing required keyword argument: ctx')
|
||||
self._reader, self._writer = connection.Pipe(duplex=False)
|
||||
self._rlock = ctx.Lock()
|
||||
self._wlock = ctx.Lock() if sys.platform != 'win32' else None
|
||||
|
||||
def get_payload(self):
|
||||
with self._rlock:
|
||||
return self._reader.recv_bytes()
|
||||
|
||||
def send_payload(self, value):
|
||||
if self._wlock is None:
|
||||
# writes to a message oriented win32 pipe are atomic
|
||||
self._writer.send_bytes(value)
|
||||
else:
|
||||
with self._wlock:
|
||||
self._writer.send_bytes(value)
|
||||
293
ETB-API/venv/lib/python3.12/site-packages/billiard/reduction.py
Normal file
293
ETB-API/venv/lib/python3.12/site-packages/billiard/reduction.py
Normal file
@@ -0,0 +1,293 @@
|
||||
#
|
||||
# Module which deals with pickling of objects.
|
||||
#
|
||||
# multiprocessing/reduction.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
|
||||
import functools
|
||||
import io
|
||||
import os
|
||||
import pickle
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from . import context
|
||||
|
||||
__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
|
||||
HAVE_SEND_HANDLE = (sys.platform == 'win32' or
|
||||
(hasattr(socket, 'CMSG_LEN') and
|
||||
hasattr(socket, 'SCM_RIGHTS') and
|
||||
hasattr(socket.socket, 'sendmsg')))
|
||||
|
||||
#
|
||||
# Pickler subclass
|
||||
#
|
||||
|
||||
|
||||
if PY3:
|
||||
import copyreg
|
||||
|
||||
class ForkingPickler(pickle.Pickler):
|
||||
'''Pickler subclass used by multiprocessing.'''
|
||||
_extra_reducers = {}
|
||||
_copyreg_dispatch_table = copyreg.dispatch_table
|
||||
|
||||
def __init__(self, *args):
|
||||
super(ForkingPickler, self).__init__(*args)
|
||||
self.dispatch_table = self._copyreg_dispatch_table.copy()
|
||||
self.dispatch_table.update(self._extra_reducers)
|
||||
|
||||
@classmethod
|
||||
def register(cls, type, reduce):
|
||||
'''Register a reduce function for a type.'''
|
||||
cls._extra_reducers[type] = reduce
|
||||
|
||||
@classmethod
|
||||
def dumps(cls, obj, protocol=None):
|
||||
buf = io.BytesIO()
|
||||
cls(buf, protocol).dump(obj)
|
||||
return buf.getbuffer()
|
||||
|
||||
@classmethod
|
||||
def loadbuf(cls, buf, protocol=None):
|
||||
return cls.loads(buf.getbuffer())
|
||||
|
||||
loads = pickle.loads
|
||||
|
||||
else:
|
||||
|
||||
class ForkingPickler(pickle.Pickler): # noqa
|
||||
'''Pickler subclass used by multiprocessing.'''
|
||||
dispatch = pickle.Pickler.dispatch.copy()
|
||||
|
||||
@classmethod
|
||||
def register(cls, type, reduce):
|
||||
'''Register a reduce function for a type.'''
|
||||
def dispatcher(self, obj):
|
||||
rv = reduce(obj)
|
||||
self.save_reduce(obj=obj, *rv)
|
||||
cls.dispatch[type] = dispatcher
|
||||
|
||||
@classmethod
|
||||
def dumps(cls, obj, protocol=None):
|
||||
buf = io.BytesIO()
|
||||
cls(buf, protocol).dump(obj)
|
||||
return buf.getvalue()
|
||||
|
||||
@classmethod
|
||||
def loadbuf(cls, buf, protocol=None):
|
||||
return cls.loads(buf.getvalue())
|
||||
|
||||
@classmethod
|
||||
def loads(cls, buf, loads=pickle.loads):
|
||||
if isinstance(buf, io.BytesIO):
|
||||
buf = buf.getvalue()
|
||||
return loads(buf)
|
||||
register = ForkingPickler.register
|
||||
|
||||
|
||||
def dump(obj, file, protocol=None):
|
||||
'''Replacement for pickle.dump() using ForkingPickler.'''
|
||||
ForkingPickler(file, protocol).dump(obj)
|
||||
|
||||
#
|
||||
# Platform specific definitions
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
# Windows
|
||||
__all__ += ['DupHandle', 'duplicate', 'steal_handle']
|
||||
from .compat import _winapi
|
||||
|
||||
def duplicate(handle, target_process=None, inheritable=False):
|
||||
'''Duplicate a handle. (target_process is a handle not a pid!)'''
|
||||
if target_process is None:
|
||||
target_process = _winapi.GetCurrentProcess()
|
||||
return _winapi.DuplicateHandle(
|
||||
_winapi.GetCurrentProcess(), handle, target_process,
|
||||
0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)
|
||||
|
||||
def steal_handle(source_pid, handle):
|
||||
'''Steal a handle from process identified by source_pid.'''
|
||||
source_process_handle = _winapi.OpenProcess(
|
||||
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
|
||||
try:
|
||||
return _winapi.DuplicateHandle(
|
||||
source_process_handle, handle,
|
||||
_winapi.GetCurrentProcess(), 0, False,
|
||||
_winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
|
||||
finally:
|
||||
_winapi.CloseHandle(source_process_handle)
|
||||
|
||||
def send_handle(conn, handle, destination_pid):
|
||||
'''Send a handle over a local connection.'''
|
||||
dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
|
||||
conn.send(dh)
|
||||
|
||||
def recv_handle(conn):
|
||||
'''Receive a handle over a local connection.'''
|
||||
return conn.recv().detach()
|
||||
|
||||
class DupHandle:
|
||||
'''Picklable wrapper for a handle.'''
|
||||
def __init__(self, handle, access, pid=None):
|
||||
if pid is None:
|
||||
# We just duplicate the handle in the current process and
|
||||
# let the receiving process steal the handle.
|
||||
pid = os.getpid()
|
||||
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
|
||||
try:
|
||||
self._handle = _winapi.DuplicateHandle(
|
||||
_winapi.GetCurrentProcess(),
|
||||
handle, proc, access, False, 0)
|
||||
finally:
|
||||
_winapi.CloseHandle(proc)
|
||||
self._access = access
|
||||
self._pid = pid
|
||||
|
||||
def detach(self):
|
||||
'''Get the handle. This should only be called once.'''
|
||||
# retrieve handle from process which currently owns it
|
||||
if self._pid == os.getpid():
|
||||
# The handle has already been duplicated for this process.
|
||||
return self._handle
|
||||
# We must steal the handle from the process whose pid is self._pid.
|
||||
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
|
||||
self._pid)
|
||||
try:
|
||||
return _winapi.DuplicateHandle(
|
||||
proc, self._handle, _winapi.GetCurrentProcess(),
|
||||
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
|
||||
finally:
|
||||
_winapi.CloseHandle(proc)
|
||||
|
||||
else:
|
||||
# Unix
|
||||
__all__ += ['DupFd', 'sendfds', 'recvfds']
|
||||
import array
|
||||
|
||||
# On macOS we should acknowledge receipt of fds -- see Issue14669
|
||||
ACKNOWLEDGE = sys.platform == 'darwin'
|
||||
|
||||
def sendfds(sock, fds):
|
||||
'''Send an array of fds over an AF_UNIX socket.'''
|
||||
fds = array.array('i', fds)
|
||||
msg = bytes([len(fds) % 256])
|
||||
sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
|
||||
if ACKNOWLEDGE and sock.recv(1) != b'A':
|
||||
raise RuntimeError('did not receive acknowledgement of fd')
|
||||
|
||||
def recvfds(sock, size):
|
||||
'''Receive an array of fds over an AF_UNIX socket.'''
|
||||
a = array.array('i')
|
||||
bytes_size = a.itemsize * size
|
||||
msg, ancdata, flags, addr = sock.recvmsg(
|
||||
1, socket.CMSG_LEN(bytes_size),
|
||||
)
|
||||
if not msg and not ancdata:
|
||||
raise EOFError
|
||||
try:
|
||||
if ACKNOWLEDGE:
|
||||
sock.send(b'A')
|
||||
if len(ancdata) != 1:
|
||||
raise RuntimeError(
|
||||
'received %d items of ancdata' % len(ancdata),
|
||||
)
|
||||
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
|
||||
if (cmsg_level == socket.SOL_SOCKET and
|
||||
cmsg_type == socket.SCM_RIGHTS):
|
||||
if len(cmsg_data) % a.itemsize != 0:
|
||||
raise ValueError
|
||||
a.frombytes(cmsg_data)
|
||||
assert len(a) % 256 == msg[0]
|
||||
return list(a)
|
||||
except (ValueError, IndexError):
|
||||
pass
|
||||
raise RuntimeError('Invalid data received')
|
||||
|
||||
def send_handle(conn, handle, destination_pid): # noqa
|
||||
'''Send a handle over a local connection.'''
|
||||
fd = conn.fileno()
|
||||
with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s:
|
||||
sendfds(s, [handle])
|
||||
|
||||
def recv_handle(conn): # noqa
|
||||
'''Receive a handle over a local connection.'''
|
||||
fd = conn.fileno()
|
||||
with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s:
|
||||
return recvfds(s, 1)[0]
|
||||
|
||||
def DupFd(fd):
|
||||
'''Return a wrapper for an fd.'''
|
||||
popen_obj = context.get_spawning_popen()
|
||||
if popen_obj is not None:
|
||||
return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
|
||||
elif HAVE_SEND_HANDLE:
|
||||
from . import resource_sharer
|
||||
return resource_sharer.DupFd(fd)
|
||||
else:
|
||||
raise ValueError('SCM_RIGHTS appears not to be available')
|
||||
|
||||
#
|
||||
# Try making some callable types picklable
|
||||
#
|
||||
|
||||
|
||||
def _reduce_method(m):
|
||||
if m.__self__ is None:
|
||||
return getattr, (m.__class__, m.__func__.__name__)
|
||||
else:
|
||||
return getattr, (m.__self__, m.__func__.__name__)
|
||||
|
||||
|
||||
class _C:
|
||||
def f(self):
|
||||
pass
|
||||
register(type(_C().f), _reduce_method)
|
||||
|
||||
|
||||
def _reduce_method_descriptor(m):
|
||||
return getattr, (m.__objclass__, m.__name__)
|
||||
register(type(list.append), _reduce_method_descriptor)
|
||||
register(type(int.__add__), _reduce_method_descriptor)
|
||||
|
||||
|
||||
def _reduce_partial(p):
|
||||
return _rebuild_partial, (p.func, p.args, p.keywords or {})
|
||||
|
||||
|
||||
def _rebuild_partial(func, args, keywords):
|
||||
return functools.partial(func, *args, **keywords)
|
||||
register(functools.partial, _reduce_partial)
|
||||
|
||||
#
|
||||
# Make sockets picklable
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
def _reduce_socket(s):
|
||||
from .resource_sharer import DupSocket
|
||||
return _rebuild_socket, (DupSocket(s),)
|
||||
|
||||
def _rebuild_socket(ds):
|
||||
return ds.detach()
|
||||
register(socket.socket, _reduce_socket)
|
||||
|
||||
else:
|
||||
|
||||
def _reduce_socket(s): # noqa
|
||||
df = DupFd(s.fileno())
|
||||
return _rebuild_socket, (df, s.family, s.type, s.proto)
|
||||
|
||||
def _rebuild_socket(df, family, type, proto): # noqa
|
||||
fd = df.detach()
|
||||
return socket.socket(family, type, proto, fileno=fd)
|
||||
register(socket.socket, _reduce_socket)
|
||||
@@ -0,0 +1,162 @@
|
||||
#
|
||||
# We use a background thread for sharing fds on Unix, and for sharing
|
||||
# sockets on Windows.
|
||||
#
|
||||
# A client which wants to pickle a resource registers it with the resource
|
||||
# sharer and gets an identifier in return. The unpickling process will connect
|
||||
# to the resource sharer, sends the identifier and its pid, and then receives
|
||||
# the resource.
|
||||
#
|
||||
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
|
||||
from . import process
|
||||
from . import reduction
|
||||
from . import util
|
||||
|
||||
__all__ = ['stop']
|
||||
|
||||
|
||||
if sys.platform == 'win32':
|
||||
__all__ += ['DupSocket']
|
||||
|
||||
class DupSocket:
|
||||
'''Picklable wrapper for a socket.'''
|
||||
|
||||
def __init__(self, sock):
|
||||
new_sock = sock.dup()
|
||||
|
||||
def send(conn, pid):
|
||||
share = new_sock.share(pid)
|
||||
conn.send_bytes(share)
|
||||
self._id = _resource_sharer.register(send, new_sock.close)
|
||||
|
||||
def detach(self):
|
||||
'''Get the socket. This should only be called once.'''
|
||||
with _resource_sharer.get_connection(self._id) as conn:
|
||||
share = conn.recv_bytes()
|
||||
return socket.fromshare(share)
|
||||
|
||||
else:
|
||||
__all__ += ['DupFd']
|
||||
|
||||
class DupFd:
|
||||
'''Wrapper for fd which can be used at any time.'''
|
||||
def __init__(self, fd):
|
||||
new_fd = os.dup(fd)
|
||||
|
||||
def send(conn, pid):
|
||||
reduction.send_handle(conn, new_fd, pid)
|
||||
|
||||
def close():
|
||||
os.close(new_fd)
|
||||
self._id = _resource_sharer.register(send, close)
|
||||
|
||||
def detach(self):
|
||||
'''Get the fd. This should only be called once.'''
|
||||
with _resource_sharer.get_connection(self._id) as conn:
|
||||
return reduction.recv_handle(conn)
|
||||
|
||||
|
||||
class _ResourceSharer:
|
||||
'''Manager for resources using background thread.'''
|
||||
def __init__(self):
|
||||
self._key = 0
|
||||
self._cache = {}
|
||||
self._old_locks = []
|
||||
self._lock = threading.Lock()
|
||||
self._listener = None
|
||||
self._address = None
|
||||
self._thread = None
|
||||
util.register_after_fork(self, _ResourceSharer._afterfork)
|
||||
|
||||
def register(self, send, close):
|
||||
'''Register resource, returning an identifier.'''
|
||||
with self._lock:
|
||||
if self._address is None:
|
||||
self._start()
|
||||
self._key += 1
|
||||
self._cache[self._key] = (send, close)
|
||||
return (self._address, self._key)
|
||||
|
||||
@staticmethod
|
||||
def get_connection(ident):
|
||||
'''Return connection from which to receive identified resource.'''
|
||||
from .connection import Client
|
||||
address, key = ident
|
||||
c = Client(address, authkey=process.current_process().authkey)
|
||||
c.send((key, os.getpid()))
|
||||
return c
|
||||
|
||||
def stop(self, timeout=None):
|
||||
'''Stop the background thread and clear registered resources.'''
|
||||
from .connection import Client
|
||||
with self._lock:
|
||||
if self._address is not None:
|
||||
c = Client(self._address,
|
||||
authkey=process.current_process().authkey)
|
||||
c.send(None)
|
||||
c.close()
|
||||
self._thread.join(timeout)
|
||||
if self._thread.is_alive():
|
||||
util.sub_warning('_ResourceSharer thread did '
|
||||
'not stop when asked')
|
||||
self._listener.close()
|
||||
self._thread = None
|
||||
self._address = None
|
||||
self._listener = None
|
||||
for key, (send, close) in self._cache.items():
|
||||
close()
|
||||
self._cache.clear()
|
||||
|
||||
def _afterfork(self):
|
||||
for key, (send, close) in self._cache.items():
|
||||
close()
|
||||
self._cache.clear()
|
||||
# If self._lock was locked at the time of the fork, it may be broken
|
||||
# -- see issue 6721. Replace it without letting it be gc'ed.
|
||||
self._old_locks.append(self._lock)
|
||||
self._lock = threading.Lock()
|
||||
if self._listener is not None:
|
||||
self._listener.close()
|
||||
self._listener = None
|
||||
self._address = None
|
||||
self._thread = None
|
||||
|
||||
def _start(self):
|
||||
from .connection import Listener
|
||||
assert self._listener is None
|
||||
util.debug('starting listener and thread for sending handles')
|
||||
self._listener = Listener(authkey=process.current_process().authkey)
|
||||
self._address = self._listener.address
|
||||
t = threading.Thread(target=self._serve)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
self._thread = t
|
||||
|
||||
def _serve(self):
|
||||
if hasattr(signal, 'pthread_sigmask'):
|
||||
signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
|
||||
while 1:
|
||||
try:
|
||||
with self._listener.accept() as conn:
|
||||
msg = conn.recv()
|
||||
if msg is None:
|
||||
break
|
||||
key, destination_pid = msg
|
||||
send, close = self._cache.pop(key)
|
||||
try:
|
||||
send(conn, destination_pid)
|
||||
finally:
|
||||
close()
|
||||
except:
|
||||
if not util.is_exiting():
|
||||
sys.excepthook(*sys.exc_info())
|
||||
|
||||
|
||||
_resource_sharer = _ResourceSharer()
|
||||
stop = _resource_sharer.stop
|
||||
@@ -0,0 +1,146 @@
|
||||
#
|
||||
# On Unix we run a server process which keeps track of unlinked
|
||||
# semaphores. The server ignores SIGINT and SIGTERM and reads from a
|
||||
# pipe. Every other process of the program has a copy of the writable
|
||||
# end of the pipe, so we get EOF when all other processes have exited.
|
||||
# Then the server process unlinks any remaining semaphore names.
|
||||
#
|
||||
# This is important because the system only supports a limited number
|
||||
# of named semaphores, and they will not be automatically removed till
|
||||
# the next reboot. Without this semaphore tracker process, "killall
|
||||
# python" would probably leave unlinked semaphores.
|
||||
#
|
||||
|
||||
import io
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import threading
|
||||
import warnings
|
||||
from ._ext import _billiard
|
||||
|
||||
from . import spawn
|
||||
from . import util
|
||||
|
||||
from .compat import spawnv_passfds
|
||||
|
||||
__all__ = ['ensure_running', 'register', 'unregister']
|
||||
|
||||
|
||||
class SemaphoreTracker:
|
||||
|
||||
def __init__(self):
|
||||
self._lock = threading.Lock()
|
||||
self._fd = None
|
||||
|
||||
def getfd(self):
|
||||
self.ensure_running()
|
||||
return self._fd
|
||||
|
||||
def ensure_running(self):
|
||||
'''Make sure that semaphore tracker process is running.
|
||||
|
||||
This can be run from any process. Usually a child process will use
|
||||
the semaphore created by its parent.'''
|
||||
with self._lock:
|
||||
if self._fd is not None:
|
||||
return
|
||||
fds_to_pass = []
|
||||
try:
|
||||
fds_to_pass.append(sys.stderr.fileno())
|
||||
except Exception:
|
||||
pass
|
||||
cmd = 'from billiard.semaphore_tracker import main;main(%d)'
|
||||
r, w = os.pipe()
|
||||
try:
|
||||
fds_to_pass.append(r)
|
||||
# process will out live us, so no need to wait on pid
|
||||
exe = spawn.get_executable()
|
||||
args = [exe] + util._args_from_interpreter_flags()
|
||||
args += ['-c', cmd % r]
|
||||
spawnv_passfds(exe, args, fds_to_pass)
|
||||
except:
|
||||
os.close(w)
|
||||
raise
|
||||
else:
|
||||
self._fd = w
|
||||
finally:
|
||||
os.close(r)
|
||||
|
||||
def register(self, name):
|
||||
'''Register name of semaphore with semaphore tracker.'''
|
||||
self._send('REGISTER', name)
|
||||
|
||||
def unregister(self, name):
|
||||
'''Unregister name of semaphore with semaphore tracker.'''
|
||||
self._send('UNREGISTER', name)
|
||||
|
||||
def _send(self, cmd, name):
|
||||
self.ensure_running()
|
||||
msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
|
||||
if len(name) > 512:
|
||||
# posix guarantees that writes to a pipe of less than PIPE_BUF
|
||||
# bytes are atomic, and that PIPE_BUF >= 512
|
||||
raise ValueError('name too long')
|
||||
nbytes = os.write(self._fd, msg)
|
||||
assert nbytes == len(msg)
|
||||
|
||||
|
||||
_semaphore_tracker = SemaphoreTracker()
|
||||
ensure_running = _semaphore_tracker.ensure_running
|
||||
register = _semaphore_tracker.register
|
||||
unregister = _semaphore_tracker.unregister
|
||||
getfd = _semaphore_tracker.getfd
|
||||
|
||||
|
||||
def main(fd):
|
||||
'''Run semaphore tracker.'''
|
||||
# protect the process from ^C and "killall python" etc
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
|
||||
for f in (sys.stdin, sys.stdout):
|
||||
try:
|
||||
f.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
cache = set()
|
||||
try:
|
||||
# keep track of registered/unregistered semaphores
|
||||
with io.open(fd, 'rb') as f:
|
||||
for line in f:
|
||||
try:
|
||||
cmd, name = line.strip().split(b':')
|
||||
if cmd == b'REGISTER':
|
||||
cache.add(name)
|
||||
elif cmd == b'UNREGISTER':
|
||||
cache.remove(name)
|
||||
else:
|
||||
raise RuntimeError('unrecognized command %r' % cmd)
|
||||
except Exception:
|
||||
try:
|
||||
sys.excepthook(*sys.exc_info())
|
||||
except:
|
||||
pass
|
||||
finally:
|
||||
# all processes have terminated; cleanup any remaining semaphores
|
||||
if cache:
|
||||
try:
|
||||
warnings.warn('semaphore_tracker: There appear to be %d '
|
||||
'leaked semaphores to clean up at shutdown' %
|
||||
len(cache))
|
||||
except Exception:
|
||||
pass
|
||||
for name in cache:
|
||||
# For some reason the process which created and registered this
|
||||
# semaphore has failed to unregister it. Presumably it has died.
|
||||
# We therefore unlink it.
|
||||
try:
|
||||
name = name.decode('ascii')
|
||||
try:
|
||||
_billiard.sem_unlink(name)
|
||||
except Exception as e:
|
||||
warnings.warn('semaphore_tracker: %r: %s' % (name, e))
|
||||
finally:
|
||||
pass
|
||||
@@ -0,0 +1,258 @@
|
||||
#
|
||||
# Module which supports allocation of ctypes objects from shared memory
|
||||
#
|
||||
# multiprocessing/sharedctypes.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
|
||||
import ctypes
|
||||
import sys
|
||||
import weakref
|
||||
|
||||
from . import heap
|
||||
from . import get_context
|
||||
from .context import assert_spawning
|
||||
from .reduction import ForkingPickler
|
||||
|
||||
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
typecode_to_type = {
|
||||
'c': ctypes.c_char, 'u': ctypes.c_wchar,
|
||||
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
|
||||
'h': ctypes.c_short, 'H': ctypes.c_ushort,
|
||||
'i': ctypes.c_int, 'I': ctypes.c_uint,
|
||||
'l': ctypes.c_long, 'L': ctypes.c_ulong,
|
||||
'f': ctypes.c_float, 'd': ctypes.c_double
|
||||
}
|
||||
|
||||
|
||||
def _new_value(type_):
|
||||
size = ctypes.sizeof(type_)
|
||||
wrapper = heap.BufferWrapper(size)
|
||||
return rebuild_ctype(type_, wrapper, None)
|
||||
|
||||
|
||||
def RawValue(typecode_or_type, *args):
|
||||
'''
|
||||
Returns a ctypes object allocated from shared memory
|
||||
'''
|
||||
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
|
||||
obj = _new_value(type_)
|
||||
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
|
||||
obj.__init__(*args)
|
||||
return obj
|
||||
|
||||
|
||||
def RawArray(typecode_or_type, size_or_initializer):
|
||||
'''
|
||||
Returns a ctypes array allocated from shared memory
|
||||
'''
|
||||
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
|
||||
if isinstance(size_or_initializer, int):
|
||||
type_ = type_ * size_or_initializer
|
||||
obj = _new_value(type_)
|
||||
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
|
||||
return obj
|
||||
else:
|
||||
type_ = type_ * len(size_or_initializer)
|
||||
result = _new_value(type_)
|
||||
result.__init__(*size_or_initializer)
|
||||
return result
|
||||
|
||||
|
||||
def Value(typecode_or_type, *args, **kwds):
|
||||
'''
|
||||
Return a synchronization wrapper for a Value
|
||||
'''
|
||||
lock = kwds.pop('lock', None)
|
||||
ctx = kwds.pop('ctx', None)
|
||||
if kwds:
|
||||
raise ValueError(
|
||||
'unrecognized keyword argument(s): %s' % list(kwds.keys()))
|
||||
obj = RawValue(typecode_or_type, *args)
|
||||
if lock is False:
|
||||
return obj
|
||||
if lock in (True, None):
|
||||
ctx = ctx or get_context()
|
||||
lock = ctx.RLock()
|
||||
if not hasattr(lock, 'acquire'):
|
||||
raise AttributeError("'%r' has no method 'acquire'" % lock)
|
||||
return synchronized(obj, lock, ctx=ctx)
|
||||
|
||||
|
||||
def Array(typecode_or_type, size_or_initializer, **kwds):
|
||||
'''
|
||||
Return a synchronization wrapper for a RawArray
|
||||
'''
|
||||
lock = kwds.pop('lock', None)
|
||||
ctx = kwds.pop('ctx', None)
|
||||
if kwds:
|
||||
raise ValueError(
|
||||
'unrecognized keyword argument(s): %s' % list(kwds.keys()))
|
||||
obj = RawArray(typecode_or_type, size_or_initializer)
|
||||
if lock is False:
|
||||
return obj
|
||||
if lock in (True, None):
|
||||
ctx = ctx or get_context()
|
||||
lock = ctx.RLock()
|
||||
if not hasattr(lock, 'acquire'):
|
||||
raise AttributeError("'%r' has no method 'acquire'" % lock)
|
||||
return synchronized(obj, lock, ctx=ctx)
|
||||
|
||||
|
||||
def copy(obj):
|
||||
new_obj = _new_value(type(obj))
|
||||
ctypes.pointer(new_obj)[0] = obj
|
||||
return new_obj
|
||||
|
||||
|
||||
def synchronized(obj, lock=None, ctx=None):
|
||||
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
|
||||
ctx = ctx or get_context()
|
||||
|
||||
if isinstance(obj, ctypes._SimpleCData):
|
||||
return Synchronized(obj, lock, ctx)
|
||||
elif isinstance(obj, ctypes.Array):
|
||||
if obj._type_ is ctypes.c_char:
|
||||
return SynchronizedString(obj, lock, ctx)
|
||||
return SynchronizedArray(obj, lock, ctx)
|
||||
else:
|
||||
cls = type(obj)
|
||||
try:
|
||||
scls = class_cache[cls]
|
||||
except KeyError:
|
||||
names = [field[0] for field in cls._fields_]
|
||||
d = dict((name, make_property(name)) for name in names)
|
||||
classname = 'Synchronized' + cls.__name__
|
||||
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
|
||||
return scls(obj, lock, ctx)
|
||||
|
||||
#
|
||||
# Functions for pickling/unpickling
|
||||
#
|
||||
|
||||
|
||||
def reduce_ctype(obj):
|
||||
assert_spawning(obj)
|
||||
if isinstance(obj, ctypes.Array):
|
||||
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
|
||||
else:
|
||||
return rebuild_ctype, (type(obj), obj._wrapper, None)
|
||||
|
||||
|
||||
def rebuild_ctype(type_, wrapper, length):
|
||||
if length is not None:
|
||||
type_ = type_ * length
|
||||
ForkingPickler.register(type_, reduce_ctype)
|
||||
if PY3:
|
||||
buf = wrapper.create_memoryview()
|
||||
obj = type_.from_buffer(buf)
|
||||
else:
|
||||
obj = type_.from_address(wrapper.get_address())
|
||||
obj._wrapper = wrapper
|
||||
return obj
|
||||
|
||||
#
|
||||
# Function to create properties
|
||||
#
|
||||
|
||||
|
||||
def make_property(name):
|
||||
try:
|
||||
return prop_cache[name]
|
||||
except KeyError:
|
||||
d = {}
|
||||
exec(template % ((name, ) * 7), d)
|
||||
prop_cache[name] = d[name]
|
||||
return d[name]
|
||||
|
||||
|
||||
template = '''
|
||||
def get%s(self):
|
||||
self.acquire()
|
||||
try:
|
||||
return self._obj.%s
|
||||
finally:
|
||||
self.release()
|
||||
def set%s(self, value):
|
||||
self.acquire()
|
||||
try:
|
||||
self._obj.%s = value
|
||||
finally:
|
||||
self.release()
|
||||
%s = property(get%s, set%s)
|
||||
'''
|
||||
|
||||
prop_cache = {}
|
||||
class_cache = weakref.WeakKeyDictionary()
|
||||
|
||||
#
|
||||
# Synchronized wrappers
|
||||
#
|
||||
|
||||
|
||||
class SynchronizedBase:
|
||||
|
||||
def __init__(self, obj, lock=None, ctx=None):
|
||||
self._obj = obj
|
||||
if lock:
|
||||
self._lock = lock
|
||||
else:
|
||||
ctx = ctx or get_context(force=True)
|
||||
self._lock = ctx.RLock()
|
||||
self.acquire = self._lock.acquire
|
||||
self.release = self._lock.release
|
||||
|
||||
def __enter__(self):
|
||||
return self._lock.__enter__()
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self._lock.__exit__(*args)
|
||||
|
||||
def __reduce__(self):
|
||||
assert_spawning(self)
|
||||
return synchronized, (self._obj, self._lock)
|
||||
|
||||
def get_obj(self):
|
||||
return self._obj
|
||||
|
||||
def get_lock(self):
|
||||
return self._lock
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
|
||||
|
||||
|
||||
class Synchronized(SynchronizedBase):
|
||||
value = make_property('value')
|
||||
|
||||
|
||||
class SynchronizedArray(SynchronizedBase):
|
||||
|
||||
def __len__(self):
|
||||
return len(self._obj)
|
||||
|
||||
def __getitem__(self, i):
|
||||
with self:
|
||||
return self._obj[i]
|
||||
|
||||
def __setitem__(self, i, value):
|
||||
with self:
|
||||
self._obj[i] = value
|
||||
|
||||
def __getslice__(self, start, stop):
|
||||
with self:
|
||||
return self._obj[start:stop]
|
||||
|
||||
def __setslice__(self, start, stop, values):
|
||||
with self:
|
||||
self._obj[start:stop] = values
|
||||
|
||||
|
||||
class SynchronizedString(SynchronizedArray):
|
||||
value = make_property('value')
|
||||
raw = make_property('raw')
|
||||
389
ETB-API/venv/lib/python3.12/site-packages/billiard/spawn.py
Normal file
389
ETB-API/venv/lib/python3.12/site-packages/billiard/spawn.py
Normal file
@@ -0,0 +1,389 @@
|
||||
#
|
||||
# Code used to start processes when using the spawn or forkserver
|
||||
# start methods.
|
||||
#
|
||||
# multiprocessing/spawn.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
|
||||
import io
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
import runpy
|
||||
import types
|
||||
import warnings
|
||||
|
||||
from . import get_start_method, set_start_method
|
||||
from . import process
|
||||
from . import util
|
||||
|
||||
__all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable',
|
||||
'get_preparation_data', 'get_command_line', 'import_main_path']
|
||||
|
||||
W_OLD_DJANGO_LAYOUT = """\
|
||||
Will add directory %r to path! This is necessary to accommodate \
|
||||
pre-Django 1.4 layouts using setup_environ.
|
||||
You can skip this warning by adding a DJANGO_SETTINGS_MODULE=settings \
|
||||
environment variable.
|
||||
"""
|
||||
|
||||
#
|
||||
# _python_exe is the assumed path to the python executable.
|
||||
# People embedding Python want to modify it.
|
||||
#
|
||||
|
||||
if sys.platform != 'win32':
|
||||
WINEXE = False
|
||||
WINSERVICE = False
|
||||
else:
|
||||
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
|
||||
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
|
||||
|
||||
if WINSERVICE:
|
||||
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
|
||||
else:
|
||||
_python_exe = sys.executable
|
||||
|
||||
|
||||
def _module_parent_dir(mod):
|
||||
dir, filename = os.path.split(_module_dir(mod))
|
||||
if dir == os.curdir or not dir:
|
||||
dir = os.getcwd()
|
||||
return dir
|
||||
|
||||
|
||||
def _module_dir(mod):
|
||||
if '__init__.py' in mod.__file__:
|
||||
return os.path.dirname(mod.__file__)
|
||||
return mod.__file__
|
||||
|
||||
|
||||
def _Django_old_layout_hack__save():
|
||||
if 'DJANGO_PROJECT_DIR' not in os.environ:
|
||||
try:
|
||||
settings_name = os.environ['DJANGO_SETTINGS_MODULE']
|
||||
except KeyError:
|
||||
return # not using Django.
|
||||
|
||||
conf_settings = sys.modules.get('django.conf.settings')
|
||||
configured = conf_settings and conf_settings.configured
|
||||
try:
|
||||
project_name, _ = settings_name.split('.', 1)
|
||||
except ValueError:
|
||||
return # not modified by setup_environ
|
||||
|
||||
project = __import__(project_name)
|
||||
try:
|
||||
project_dir = os.path.normpath(_module_parent_dir(project))
|
||||
except AttributeError:
|
||||
return # dynamically generated module (no __file__)
|
||||
if configured:
|
||||
warnings.warn(UserWarning(
|
||||
W_OLD_DJANGO_LAYOUT % os.path.realpath(project_dir)
|
||||
))
|
||||
os.environ['DJANGO_PROJECT_DIR'] = project_dir
|
||||
|
||||
|
||||
def _Django_old_layout_hack__load():
|
||||
try:
|
||||
sys.path.append(os.environ['DJANGO_PROJECT_DIR'])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
def set_executable(exe):
|
||||
global _python_exe
|
||||
_python_exe = exe
|
||||
|
||||
|
||||
def get_executable():
|
||||
return _python_exe
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
def is_forking(argv):
|
||||
'''
|
||||
Return whether commandline indicates we are forking
|
||||
'''
|
||||
if len(argv) >= 2 and argv[1] == '--billiard-fork':
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def freeze_support():
|
||||
'''
|
||||
Run code for process object if this in not the main process
|
||||
'''
|
||||
if is_forking(sys.argv):
|
||||
kwds = {}
|
||||
for arg in sys.argv[2:]:
|
||||
name, value = arg.split('=')
|
||||
if value == 'None':
|
||||
kwds[name] = None
|
||||
else:
|
||||
kwds[name] = int(value)
|
||||
spawn_main(**kwds)
|
||||
sys.exit()
|
||||
|
||||
|
||||
def get_command_line(**kwds):
|
||||
'''
|
||||
Returns prefix of command line used for spawning a child process
|
||||
'''
|
||||
if getattr(sys, 'frozen', False):
|
||||
return ([sys.executable, '--billiard-fork'] +
|
||||
['%s=%r' % item for item in kwds.items()])
|
||||
else:
|
||||
prog = 'from billiard.spawn import spawn_main; spawn_main(%s)'
|
||||
prog %= ', '.join('%s=%r' % item for item in kwds.items())
|
||||
opts = util._args_from_interpreter_flags()
|
||||
return [_python_exe] + opts + ['-c', prog, '--billiard-fork']
|
||||
|
||||
|
||||
def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None):
|
||||
'''
|
||||
Run code specified by data received over pipe
|
||||
'''
|
||||
assert is_forking(sys.argv)
|
||||
if sys.platform == 'win32':
|
||||
import msvcrt
|
||||
from .reduction import steal_handle
|
||||
new_handle = steal_handle(parent_pid, pipe_handle)
|
||||
fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
|
||||
else:
|
||||
from . import semaphore_tracker
|
||||
semaphore_tracker._semaphore_tracker._fd = tracker_fd
|
||||
fd = pipe_handle
|
||||
exitcode = _main(fd)
|
||||
sys.exit(exitcode)
|
||||
|
||||
|
||||
def _setup_logging_in_child_hack():
|
||||
# Huge hack to make logging before Process.run work.
|
||||
try:
|
||||
os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__
|
||||
except KeyError:
|
||||
pass
|
||||
except AttributeError:
|
||||
pass
|
||||
loglevel = os.environ.get("_MP_FORK_LOGLEVEL_")
|
||||
logfile = os.environ.get("_MP_FORK_LOGFILE_") or None
|
||||
format = os.environ.get("_MP_FORK_LOGFORMAT_")
|
||||
if loglevel:
|
||||
from . import util
|
||||
import logging
|
||||
logger = util.get_logger()
|
||||
logger.setLevel(int(loglevel))
|
||||
if not logger.handlers:
|
||||
logger._rudimentary_setup = True
|
||||
logfile = logfile or sys.__stderr__
|
||||
if hasattr(logfile, "write"):
|
||||
handler = logging.StreamHandler(logfile)
|
||||
else:
|
||||
handler = logging.FileHandler(logfile)
|
||||
formatter = logging.Formatter(
|
||||
format or util.DEFAULT_LOGGING_FORMAT,
|
||||
)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
|
||||
def _main(fd):
|
||||
_Django_old_layout_hack__load()
|
||||
with io.open(fd, 'rb', closefd=True) as from_parent:
|
||||
process.current_process()._inheriting = True
|
||||
try:
|
||||
preparation_data = pickle.load(from_parent)
|
||||
prepare(preparation_data)
|
||||
_setup_logging_in_child_hack()
|
||||
self = pickle.load(from_parent)
|
||||
finally:
|
||||
del process.current_process()._inheriting
|
||||
return self._bootstrap()
|
||||
|
||||
|
||||
def _check_not_importing_main():
|
||||
if getattr(process.current_process(), '_inheriting', False):
|
||||
raise RuntimeError('''
|
||||
An attempt has been made to start a new process before the
|
||||
current process has finished its bootstrapping phase.
|
||||
|
||||
This probably means that you are not using fork to start your
|
||||
child processes and you have forgotten to use the proper idiom
|
||||
in the main module:
|
||||
|
||||
if __name__ == '__main__':
|
||||
freeze_support()
|
||||
...
|
||||
|
||||
The "freeze_support()" line can be omitted if the program
|
||||
is not going to be frozen to produce an executable.''')
|
||||
|
||||
|
||||
def get_preparation_data(name):
|
||||
'''
|
||||
Return info about parent needed by child to unpickle process object
|
||||
'''
|
||||
_check_not_importing_main()
|
||||
d = dict(
|
||||
log_to_stderr=util._log_to_stderr,
|
||||
authkey=process.current_process().authkey,
|
||||
)
|
||||
|
||||
if util._logger is not None:
|
||||
d['log_level'] = util._logger.getEffectiveLevel()
|
||||
|
||||
sys_path = sys.path[:]
|
||||
try:
|
||||
i = sys_path.index('')
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
sys_path[i] = process.ORIGINAL_DIR
|
||||
|
||||
d.update(
|
||||
name=name,
|
||||
sys_path=sys_path,
|
||||
sys_argv=sys.argv,
|
||||
orig_dir=process.ORIGINAL_DIR,
|
||||
dir=os.getcwd(),
|
||||
start_method=get_start_method(),
|
||||
)
|
||||
|
||||
# Figure out whether to initialise main in the subprocess as a module
|
||||
# or through direct execution (or to leave it alone entirely)
|
||||
main_module = sys.modules['__main__']
|
||||
try:
|
||||
main_mod_name = main_module.__spec__.name
|
||||
except AttributeError:
|
||||
main_mod_name = main_module.__name__
|
||||
if main_mod_name is not None:
|
||||
d['init_main_from_name'] = main_mod_name
|
||||
elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
|
||||
main_path = getattr(main_module, '__file__', None)
|
||||
if main_path is not None:
|
||||
if (not os.path.isabs(main_path) and
|
||||
process.ORIGINAL_DIR is not None):
|
||||
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
|
||||
d['init_main_from_path'] = os.path.normpath(main_path)
|
||||
|
||||
return d
|
||||
|
||||
#
|
||||
# Prepare current process
|
||||
#
|
||||
|
||||
|
||||
old_main_modules = []
|
||||
|
||||
|
||||
def prepare(data):
|
||||
'''
|
||||
Try to get current process ready to unpickle process object
|
||||
'''
|
||||
if 'name' in data:
|
||||
process.current_process().name = data['name']
|
||||
|
||||
if 'authkey' in data:
|
||||
process.current_process().authkey = data['authkey']
|
||||
|
||||
if 'log_to_stderr' in data and data['log_to_stderr']:
|
||||
util.log_to_stderr()
|
||||
|
||||
if 'log_level' in data:
|
||||
util.get_logger().setLevel(data['log_level'])
|
||||
|
||||
if 'sys_path' in data:
|
||||
sys.path = data['sys_path']
|
||||
|
||||
if 'sys_argv' in data:
|
||||
sys.argv = data['sys_argv']
|
||||
|
||||
if 'dir' in data:
|
||||
os.chdir(data['dir'])
|
||||
|
||||
if 'orig_dir' in data:
|
||||
process.ORIGINAL_DIR = data['orig_dir']
|
||||
|
||||
if 'start_method' in data:
|
||||
set_start_method(data['start_method'])
|
||||
|
||||
if 'init_main_from_name' in data:
|
||||
_fixup_main_from_name(data['init_main_from_name'])
|
||||
elif 'init_main_from_path' in data:
|
||||
_fixup_main_from_path(data['init_main_from_path'])
|
||||
|
||||
# Multiprocessing module helpers to fix up the main module in
|
||||
# spawned subprocesses
|
||||
|
||||
|
||||
def _fixup_main_from_name(mod_name):
|
||||
# __main__.py files for packages, directories, zip archives, etc, run
|
||||
# their "main only" code unconditionally, so we don't even try to
|
||||
# populate anything in __main__, nor do we make any changes to
|
||||
# __main__ attributes
|
||||
current_main = sys.modules['__main__']
|
||||
if mod_name == "__main__" or mod_name.endswith(".__main__"):
|
||||
return
|
||||
|
||||
# If this process was forked, __main__ may already be populated
|
||||
try:
|
||||
current_main_name = current_main.__spec__.name
|
||||
except AttributeError:
|
||||
current_main_name = current_main.__name__
|
||||
|
||||
if current_main_name == mod_name:
|
||||
return
|
||||
|
||||
# Otherwise, __main__ may contain some non-main code where we need to
|
||||
# support unpickling it properly. We rerun it as __mp_main__ and make
|
||||
# the normal __main__ an alias to that
|
||||
old_main_modules.append(current_main)
|
||||
main_module = types.ModuleType("__mp_main__")
|
||||
main_content = runpy.run_module(mod_name,
|
||||
run_name="__mp_main__",
|
||||
alter_sys=True)
|
||||
main_module.__dict__.update(main_content)
|
||||
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
|
||||
|
||||
|
||||
def _fixup_main_from_path(main_path):
|
||||
# If this process was forked, __main__ may already be populated
|
||||
current_main = sys.modules['__main__']
|
||||
|
||||
# Unfortunately, the main ipython launch script historically had no
|
||||
# "if __name__ == '__main__'" guard, so we work around that
|
||||
# by treating it like a __main__.py file
|
||||
# See https://github.com/ipython/ipython/issues/4698
|
||||
main_name = os.path.splitext(os.path.basename(main_path))[0]
|
||||
if main_name == 'ipython':
|
||||
return
|
||||
|
||||
# Otherwise, if __file__ already has the setting we expect,
|
||||
# there's nothing more to do
|
||||
if getattr(current_main, '__file__', None) == main_path:
|
||||
return
|
||||
|
||||
# If the parent process has sent a path through rather than a module
|
||||
# name we assume it is an executable script that may contain
|
||||
# non-main code that needs to be executed
|
||||
old_main_modules.append(current_main)
|
||||
main_module = types.ModuleType("__mp_main__")
|
||||
main_content = runpy.run_path(main_path,
|
||||
run_name="__mp_main__")
|
||||
main_module.__dict__.update(main_content)
|
||||
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
|
||||
|
||||
|
||||
def import_main_path(main_path):
|
||||
'''
|
||||
Set sys.modules['__main__'] to module at main_path
|
||||
'''
|
||||
_fixup_main_from_path(main_path)
|
||||
@@ -0,0 +1,436 @@
|
||||
#
|
||||
# Module implementing synchronization primitives
|
||||
#
|
||||
# multiprocessing/synchronize.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
|
||||
import errno
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
|
||||
from . import context
|
||||
from . import process
|
||||
from . import util
|
||||
|
||||
from ._ext import _billiard, ensure_SemLock
|
||||
from time import monotonic
|
||||
|
||||
__all__ = [
|
||||
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event',
|
||||
]
|
||||
|
||||
# Try to import the mp.synchronize module cleanly, if it fails
|
||||
# raise ImportError for platforms lacking a working sem_open implementation.
|
||||
# See issue 3770
|
||||
ensure_SemLock()
|
||||
|
||||
#
|
||||
# Constants
|
||||
#
|
||||
|
||||
RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
|
||||
SEM_VALUE_MAX = _billiard.SemLock.SEM_VALUE_MAX
|
||||
|
||||
try:
|
||||
sem_unlink = _billiard.SemLock.sem_unlink
|
||||
except AttributeError: # pragma: no cover
|
||||
try:
|
||||
# Py3.4+ implements sem_unlink and the semaphore must be named
|
||||
from _multiprocessing import sem_unlink # noqa
|
||||
except ImportError:
|
||||
sem_unlink = None # noqa
|
||||
|
||||
#
|
||||
# Base class for semaphores and mutexes; wraps `_billiard.SemLock`
|
||||
#
|
||||
|
||||
|
||||
def _semname(sl):
|
||||
try:
|
||||
return sl.name
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
class SemLock:
|
||||
_rand = tempfile._RandomNameSequence()
|
||||
|
||||
def __init__(self, kind, value, maxvalue, ctx=None):
|
||||
if ctx is None:
|
||||
ctx = context._default_context.get_context()
|
||||
name = ctx.get_start_method()
|
||||
unlink_now = sys.platform == 'win32' or name == 'fork'
|
||||
if sem_unlink:
|
||||
for i in range(100):
|
||||
try:
|
||||
sl = self._semlock = _billiard.SemLock(
|
||||
kind, value, maxvalue, self._make_name(), unlink_now,
|
||||
)
|
||||
except (OSError, IOError) as exc:
|
||||
if getattr(exc, 'errno', None) != errno.EEXIST:
|
||||
raise
|
||||
else:
|
||||
break
|
||||
else:
|
||||
exc = IOError('cannot find file for semaphore')
|
||||
exc.errno = errno.EEXIST
|
||||
raise exc
|
||||
else:
|
||||
sl = self._semlock = _billiard.SemLock(kind, value, maxvalue)
|
||||
|
||||
util.debug('created semlock with handle %s', sl.handle)
|
||||
self._make_methods()
|
||||
|
||||
if sem_unlink:
|
||||
|
||||
if sys.platform != 'win32':
|
||||
def _after_fork(obj):
|
||||
obj._semlock._after_fork()
|
||||
util.register_after_fork(self, _after_fork)
|
||||
|
||||
if _semname(self._semlock) is not None:
|
||||
# We only get here if we are on Unix with forking
|
||||
# disabled. When the object is garbage collected or the
|
||||
# process shuts down we unlink the semaphore name
|
||||
from .semaphore_tracker import register
|
||||
register(self._semlock.name)
|
||||
util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
|
||||
exitpriority=0)
|
||||
|
||||
@staticmethod
|
||||
def _cleanup(name):
|
||||
from .semaphore_tracker import unregister
|
||||
sem_unlink(name)
|
||||
unregister(name)
|
||||
|
||||
def _make_methods(self):
|
||||
self.acquire = self._semlock.acquire
|
||||
self.release = self._semlock.release
|
||||
|
||||
def __enter__(self):
|
||||
return self._semlock.__enter__()
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self._semlock.__exit__(*args)
|
||||
|
||||
def __getstate__(self):
|
||||
context.assert_spawning(self)
|
||||
sl = self._semlock
|
||||
if sys.platform == 'win32':
|
||||
h = context.get_spawning_popen().duplicate_for_child(sl.handle)
|
||||
else:
|
||||
h = sl.handle
|
||||
state = (h, sl.kind, sl.maxvalue)
|
||||
try:
|
||||
state += (sl.name, )
|
||||
except AttributeError:
|
||||
pass
|
||||
return state
|
||||
|
||||
def __setstate__(self, state):
|
||||
self._semlock = _billiard.SemLock._rebuild(*state)
|
||||
util.debug('recreated blocker with handle %r', state[0])
|
||||
self._make_methods()
|
||||
|
||||
@staticmethod
|
||||
def _make_name():
|
||||
return '%s-%s' % (process.current_process()._config['semprefix'],
|
||||
next(SemLock._rand))
|
||||
|
||||
|
||||
class Semaphore(SemLock):
|
||||
|
||||
def __init__(self, value=1, ctx=None):
|
||||
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx)
|
||||
|
||||
def get_value(self):
|
||||
return self._semlock._get_value()
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
value = self._semlock._get_value()
|
||||
except Exception:
|
||||
value = 'unknown'
|
||||
return '<%s(value=%s)>' % (self.__class__.__name__, value)
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
|
||||
def __init__(self, value=1, ctx=None):
|
||||
SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
value = self._semlock._get_value()
|
||||
except Exception:
|
||||
value = 'unknown'
|
||||
return '<%s(value=%s, maxvalue=%s)>' % (
|
||||
self.__class__.__name__, value, self._semlock.maxvalue)
|
||||
|
||||
|
||||
class Lock(SemLock):
|
||||
'''
|
||||
Non-recursive lock.
|
||||
'''
|
||||
|
||||
def __init__(self, ctx=None):
|
||||
SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
if self._semlock._is_mine():
|
||||
name = process.current_process().name
|
||||
if threading.current_thread().name != 'MainThread':
|
||||
name += '|' + threading.current_thread().name
|
||||
elif self._semlock._get_value() == 1:
|
||||
name = 'None'
|
||||
elif self._semlock._count() > 0:
|
||||
name = 'SomeOtherThread'
|
||||
else:
|
||||
name = 'SomeOtherProcess'
|
||||
except Exception:
|
||||
name = 'unknown'
|
||||
return '<%s(owner=%s)>' % (self.__class__.__name__, name)
|
||||
|
||||
|
||||
class RLock(SemLock):
|
||||
'''
|
||||
Recursive lock
|
||||
'''
|
||||
|
||||
def __init__(self, ctx=None):
|
||||
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
if self._semlock._is_mine():
|
||||
name = process.current_process().name
|
||||
if threading.current_thread().name != 'MainThread':
|
||||
name += '|' + threading.current_thread().name
|
||||
count = self._semlock._count()
|
||||
elif self._semlock._get_value() == 1:
|
||||
name, count = 'None', 0
|
||||
elif self._semlock._count() > 0:
|
||||
name, count = 'SomeOtherThread', 'nonzero'
|
||||
else:
|
||||
name, count = 'SomeOtherProcess', 'nonzero'
|
||||
except Exception:
|
||||
name, count = 'unknown', 'unknown'
|
||||
return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)
|
||||
|
||||
|
||||
class Condition:
|
||||
'''
|
||||
Condition variable
|
||||
'''
|
||||
|
||||
def __init__(self, lock=None, ctx=None):
|
||||
assert ctx
|
||||
self._lock = lock or ctx.RLock()
|
||||
self._sleeping_count = ctx.Semaphore(0)
|
||||
self._woken_count = ctx.Semaphore(0)
|
||||
self._wait_semaphore = ctx.Semaphore(0)
|
||||
self._make_methods()
|
||||
|
||||
def __getstate__(self):
|
||||
context.assert_spawning(self)
|
||||
return (self._lock, self._sleeping_count,
|
||||
self._woken_count, self._wait_semaphore)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._lock, self._sleeping_count,
|
||||
self._woken_count, self._wait_semaphore) = state
|
||||
self._make_methods()
|
||||
|
||||
def __enter__(self):
|
||||
return self._lock.__enter__()
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self._lock.__exit__(*args)
|
||||
|
||||
def _make_methods(self):
|
||||
self.acquire = self._lock.acquire
|
||||
self.release = self._lock.release
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
num_waiters = (self._sleeping_count._semlock._get_value() -
|
||||
self._woken_count._semlock._get_value())
|
||||
except Exception:
|
||||
num_waiters = 'unknown'
|
||||
return '<%s(%s, %s)>' % (
|
||||
self.__class__.__name__, self._lock, num_waiters)
|
||||
|
||||
def wait(self, timeout=None):
|
||||
assert self._lock._semlock._is_mine(), \
|
||||
'must acquire() condition before using wait()'
|
||||
|
||||
# indicate that this thread is going to sleep
|
||||
self._sleeping_count.release()
|
||||
|
||||
# release lock
|
||||
count = self._lock._semlock._count()
|
||||
for i in range(count):
|
||||
self._lock.release()
|
||||
|
||||
try:
|
||||
# wait for notification or timeout
|
||||
return self._wait_semaphore.acquire(True, timeout)
|
||||
finally:
|
||||
# indicate that this thread has woken
|
||||
self._woken_count.release()
|
||||
|
||||
# reacquire lock
|
||||
for i in range(count):
|
||||
self._lock.acquire()
|
||||
|
||||
def notify(self):
|
||||
assert self._lock._semlock._is_mine(), 'lock is not owned'
|
||||
assert not self._wait_semaphore.acquire(False)
|
||||
|
||||
# to take account of timeouts since last notify() we subtract
|
||||
# woken_count from sleeping_count and rezero woken_count
|
||||
while self._woken_count.acquire(False):
|
||||
res = self._sleeping_count.acquire(False)
|
||||
assert res
|
||||
|
||||
if self._sleeping_count.acquire(False): # try grabbing a sleeper
|
||||
self._wait_semaphore.release() # wake up one sleeper
|
||||
self._woken_count.acquire() # wait for sleeper to wake
|
||||
|
||||
# rezero _wait_semaphore in case a timeout just happened
|
||||
self._wait_semaphore.acquire(False)
|
||||
|
||||
def notify_all(self):
|
||||
assert self._lock._semlock._is_mine(), 'lock is not owned'
|
||||
assert not self._wait_semaphore.acquire(False)
|
||||
|
||||
# to take account of timeouts since last notify*() we subtract
|
||||
# woken_count from sleeping_count and rezero woken_count
|
||||
while self._woken_count.acquire(False):
|
||||
res = self._sleeping_count.acquire(False)
|
||||
assert res
|
||||
|
||||
sleepers = 0
|
||||
while self._sleeping_count.acquire(False):
|
||||
self._wait_semaphore.release() # wake up one sleeper
|
||||
sleepers += 1
|
||||
|
||||
if sleepers:
|
||||
for i in range(sleepers):
|
||||
self._woken_count.acquire() # wait for a sleeper to wake
|
||||
|
||||
# rezero wait_semaphore in case some timeouts just happened
|
||||
while self._wait_semaphore.acquire(False):
|
||||
pass
|
||||
|
||||
def wait_for(self, predicate, timeout=None):
|
||||
result = predicate()
|
||||
if result:
|
||||
return result
|
||||
if timeout is not None:
|
||||
endtime = monotonic() + timeout
|
||||
else:
|
||||
endtime = None
|
||||
waittime = None
|
||||
while not result:
|
||||
if endtime is not None:
|
||||
waittime = endtime - monotonic()
|
||||
if waittime <= 0:
|
||||
break
|
||||
self.wait(waittime)
|
||||
result = predicate()
|
||||
return result
|
||||
|
||||
|
||||
class Event:
|
||||
|
||||
def __init__(self, ctx=None):
|
||||
assert ctx
|
||||
self._cond = ctx.Condition(ctx.Lock())
|
||||
self._flag = ctx.Semaphore(0)
|
||||
|
||||
def is_set(self):
|
||||
with self._cond:
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
return True
|
||||
return False
|
||||
|
||||
def set(self):
|
||||
with self._cond:
|
||||
self._flag.acquire(False)
|
||||
self._flag.release()
|
||||
self._cond.notify_all()
|
||||
|
||||
def clear(self):
|
||||
with self._cond:
|
||||
self._flag.acquire(False)
|
||||
|
||||
def wait(self, timeout=None):
|
||||
with self._cond:
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
else:
|
||||
self._cond.wait(timeout)
|
||||
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
return True
|
||||
return False
|
||||
|
||||
#
|
||||
# Barrier
|
||||
#
|
||||
|
||||
|
||||
if hasattr(threading, 'Barrier'):
|
||||
|
||||
class Barrier(threading.Barrier):
|
||||
|
||||
def __init__(self, parties, action=None, timeout=None, ctx=None):
|
||||
assert ctx
|
||||
import struct
|
||||
from .heap import BufferWrapper
|
||||
wrapper = BufferWrapper(struct.calcsize('i') * 2)
|
||||
cond = ctx.Condition()
|
||||
self.__setstate__((parties, action, timeout, cond, wrapper))
|
||||
self._state = 0
|
||||
self._count = 0
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._parties, self._action, self._timeout,
|
||||
self._cond, self._wrapper) = state
|
||||
self._array = self._wrapper.create_memoryview().cast('i')
|
||||
|
||||
def __getstate__(self):
|
||||
return (self._parties, self._action, self._timeout,
|
||||
self._cond, self._wrapper)
|
||||
|
||||
@property
|
||||
def _state(self):
|
||||
return self._array[0]
|
||||
|
||||
@_state.setter
|
||||
def _state(self, value): # noqa
|
||||
self._array[0] = value
|
||||
|
||||
@property
|
||||
def _count(self):
|
||||
return self._array[1]
|
||||
|
||||
@_count.setter
|
||||
def _count(self, value): # noqa
|
||||
self._array[1] = value
|
||||
|
||||
|
||||
else:
|
||||
|
||||
class Barrier: # noqa
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
raise NotImplementedError('Barrier only supported on Py3')
|
||||
237
ETB-API/venv/lib/python3.12/site-packages/billiard/util.py
Normal file
237
ETB-API/venv/lib/python3.12/site-packages/billiard/util.py
Normal file
@@ -0,0 +1,237 @@
|
||||
#
|
||||
# Module providing various facilities to other parts of the package
|
||||
#
|
||||
# billiard/util.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
|
||||
import sys
|
||||
import errno
|
||||
import functools
|
||||
import atexit
|
||||
|
||||
try:
|
||||
import cffi
|
||||
except ImportError:
|
||||
import ctypes
|
||||
|
||||
try:
|
||||
from subprocess import _args_from_interpreter_flags # noqa
|
||||
except ImportError: # pragma: no cover
|
||||
def _args_from_interpreter_flags(): # noqa
|
||||
"""Return a list of command-line arguments reproducing the current
|
||||
settings in sys.flags and sys.warnoptions."""
|
||||
flag_opt_map = {
|
||||
'debug': 'd',
|
||||
'optimize': 'O',
|
||||
'dont_write_bytecode': 'B',
|
||||
'no_user_site': 's',
|
||||
'no_site': 'S',
|
||||
'ignore_environment': 'E',
|
||||
'verbose': 'v',
|
||||
'bytes_warning': 'b',
|
||||
'hash_randomization': 'R',
|
||||
'py3k_warning': '3',
|
||||
}
|
||||
args = []
|
||||
for flag, opt in flag_opt_map.items():
|
||||
v = getattr(sys.flags, flag)
|
||||
if v > 0:
|
||||
args.append('-' + opt * v)
|
||||
for opt in sys.warnoptions:
|
||||
args.append('-W' + opt)
|
||||
return args
|
||||
|
||||
from multiprocessing.util import ( # noqa
|
||||
_afterfork_registry,
|
||||
_afterfork_counter,
|
||||
_exit_function,
|
||||
_finalizer_registry,
|
||||
_finalizer_counter,
|
||||
Finalize,
|
||||
ForkAwareLocal,
|
||||
ForkAwareThreadLock,
|
||||
get_temp_dir,
|
||||
is_exiting,
|
||||
register_after_fork,
|
||||
_run_after_forkers,
|
||||
_run_finalizers,
|
||||
)
|
||||
|
||||
from .compat import get_errno
|
||||
|
||||
__all__ = [
|
||||
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
|
||||
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
|
||||
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
|
||||
'SUBDEBUG', 'SUBWARNING',
|
||||
]
|
||||
|
||||
|
||||
# Constants from prctl.h
|
||||
PR_GET_PDEATHSIG = 2
|
||||
PR_SET_PDEATHSIG = 1
|
||||
|
||||
#
|
||||
# Logging
|
||||
#
|
||||
|
||||
NOTSET = 0
|
||||
SUBDEBUG = 5
|
||||
DEBUG = 10
|
||||
INFO = 20
|
||||
SUBWARNING = 25
|
||||
WARNING = 30
|
||||
ERROR = 40
|
||||
|
||||
LOGGER_NAME = 'multiprocessing'
|
||||
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
|
||||
|
||||
_logger = None
|
||||
_log_to_stderr = False
|
||||
|
||||
|
||||
def sub_debug(msg, *args, **kwargs):
|
||||
if _logger:
|
||||
_logger.log(SUBDEBUG, msg, *args, **kwargs)
|
||||
|
||||
|
||||
def debug(msg, *args, **kwargs):
|
||||
if _logger:
|
||||
_logger.log(DEBUG, msg, *args, **kwargs)
|
||||
|
||||
|
||||
def info(msg, *args, **kwargs):
|
||||
if _logger:
|
||||
_logger.log(INFO, msg, *args, **kwargs)
|
||||
|
||||
|
||||
def sub_warning(msg, *args, **kwargs):
|
||||
if _logger:
|
||||
_logger.log(SUBWARNING, msg, *args, **kwargs)
|
||||
|
||||
def warning(msg, *args, **kwargs):
|
||||
if _logger:
|
||||
_logger.log(WARNING, msg, *args, **kwargs)
|
||||
|
||||
def error(msg, *args, **kwargs):
|
||||
if _logger:
|
||||
_logger.log(ERROR, msg, *args, **kwargs)
|
||||
|
||||
|
||||
def get_logger():
|
||||
'''
|
||||
Returns logger used by multiprocessing
|
||||
'''
|
||||
global _logger
|
||||
import logging
|
||||
|
||||
try:
|
||||
# Python 3.13+
|
||||
acquire, release = logging._prepareFork, logging._afterFork
|
||||
except AttributeError:
|
||||
acquire, release = logging._acquireLock, logging._releaseLock
|
||||
acquire()
|
||||
try:
|
||||
if not _logger:
|
||||
|
||||
_logger = logging.getLogger(LOGGER_NAME)
|
||||
_logger.propagate = 0
|
||||
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
|
||||
logging.addLevelName(SUBWARNING, 'SUBWARNING')
|
||||
|
||||
# XXX multiprocessing should cleanup before logging
|
||||
if hasattr(atexit, 'unregister'):
|
||||
atexit.unregister(_exit_function)
|
||||
atexit.register(_exit_function)
|
||||
else:
|
||||
atexit._exithandlers.remove((_exit_function, (), {}))
|
||||
atexit._exithandlers.append((_exit_function, (), {}))
|
||||
finally:
|
||||
release()
|
||||
|
||||
return _logger
|
||||
|
||||
|
||||
def log_to_stderr(level=None):
|
||||
'''
|
||||
Turn on logging and add a handler which prints to stderr
|
||||
'''
|
||||
global _log_to_stderr
|
||||
import logging
|
||||
|
||||
logger = get_logger()
|
||||
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
if level:
|
||||
logger.setLevel(level)
|
||||
_log_to_stderr = True
|
||||
return _logger
|
||||
|
||||
|
||||
def get_pdeathsig():
|
||||
"""
|
||||
Return the current value of the parent process death signal
|
||||
"""
|
||||
if not sys.platform.startswith('linux'):
|
||||
# currently we support only linux platform.
|
||||
raise OSError()
|
||||
try:
|
||||
if 'cffi' in sys.modules:
|
||||
ffi = cffi.FFI()
|
||||
ffi.cdef("int prctl (int __option, ...);")
|
||||
arg = ffi.new("int *")
|
||||
C = ffi.dlopen(None)
|
||||
C.prctl(PR_GET_PDEATHSIG, arg)
|
||||
return arg[0]
|
||||
else:
|
||||
sig = ctypes.c_int()
|
||||
libc = ctypes.cdll.LoadLibrary("libc.so.6")
|
||||
libc.prctl(PR_GET_PDEATHSIG, ctypes.byref(sig))
|
||||
return sig.value
|
||||
except Exception:
|
||||
raise OSError()
|
||||
|
||||
|
||||
def set_pdeathsig(sig):
|
||||
"""
|
||||
Set the parent process death signal of the calling process to sig
|
||||
(either a signal value in the range 1..maxsig, or 0 to clear).
|
||||
This is the signal that the calling process will get when its parent dies.
|
||||
This value is cleared for the child of a fork(2) and
|
||||
(since Linux 2.4.36 / 2.6.23) when executing a set-user-ID or set-group-ID binary.
|
||||
"""
|
||||
if not sys.platform.startswith('linux'):
|
||||
# currently we support only linux platform.
|
||||
raise OSError("pdeathsig is only supported on linux")
|
||||
try:
|
||||
if 'cffi' in sys.modules:
|
||||
ffi = cffi.FFI()
|
||||
ffi.cdef("int prctl (int __option, ...);")
|
||||
C = ffi.dlopen(None)
|
||||
C.prctl(PR_SET_PDEATHSIG, ffi.cast("int", sig))
|
||||
else:
|
||||
libc = ctypes.cdll.LoadLibrary("libc.so.6")
|
||||
libc.prctl(PR_SET_PDEATHSIG, ctypes.c_int(sig))
|
||||
except Exception as e:
|
||||
raise OSError("An error occured while setting pdeathsig") from e
|
||||
|
||||
def _eintr_retry(func):
|
||||
'''
|
||||
Automatic retry after EINTR.
|
||||
'''
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
while 1:
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except OSError as exc:
|
||||
if get_errno(exc) != errno.EINTR:
|
||||
raise
|
||||
return wrapped
|
||||
Reference in New Issue
Block a user