updates
This commit is contained in:
163
Backend/venv/lib/python3.12/site-packages/joblib/__init__.py
Normal file
163
Backend/venv/lib/python3.12/site-packages/joblib/__init__.py
Normal file
@@ -0,0 +1,163 @@
|
||||
"""Joblib is a set of tools to provide **lightweight pipelining in
|
||||
Python**. In particular:
|
||||
|
||||
1. transparent disk-caching of functions and lazy re-evaluation
|
||||
(memoize pattern)
|
||||
|
||||
2. easy simple parallel computing
|
||||
|
||||
Joblib is optimized to be **fast** and **robust** on large
|
||||
data in particular and has specific optimizations for `numpy` arrays. It is
|
||||
**BSD-licensed**.
|
||||
|
||||
|
||||
==================== ===============================================
|
||||
**Documentation:** https://joblib.readthedocs.io
|
||||
|
||||
**Download:** https://pypi.python.org/pypi/joblib#downloads
|
||||
|
||||
**Source code:** https://github.com/joblib/joblib
|
||||
|
||||
**Report issues:** https://github.com/joblib/joblib/issues
|
||||
==================== ===============================================
|
||||
|
||||
|
||||
Vision
|
||||
--------
|
||||
|
||||
The vision is to provide tools to easily achieve better performance and
|
||||
reproducibility when working with long running jobs.
|
||||
|
||||
* **Avoid computing the same thing twice**: code is often rerun again and
|
||||
again, for instance when prototyping computational-heavy jobs (as in
|
||||
scientific development), but hand-crafted solutions to alleviate this
|
||||
issue are error-prone and often lead to unreproducible results.
|
||||
|
||||
* **Persist to disk transparently**: efficiently persisting
|
||||
arbitrary objects containing large data is hard. Using
|
||||
joblib's caching mechanism avoids hand-written persistence and
|
||||
implicitly links the file on disk to the execution context of
|
||||
the original Python object. As a result, joblib's persistence is
|
||||
good for resuming an application status or computational job, eg
|
||||
after a crash.
|
||||
|
||||
Joblib addresses these problems while **leaving your code and your flow
|
||||
control as unmodified as possible** (no framework, no new paradigms).
|
||||
|
||||
Main features
|
||||
------------------
|
||||
|
||||
1) **Transparent and fast disk-caching of output value:** a memoize or
|
||||
make-like functionality for Python functions that works well for
|
||||
arbitrary Python objects, including very large numpy arrays. Separate
|
||||
persistence and flow-execution logic from domain logic or algorithmic
|
||||
code by writing the operations as a set of steps with well-defined
|
||||
inputs and outputs: Python functions. Joblib can save their
|
||||
computation to disk and rerun it only if necessary::
|
||||
|
||||
>>> from joblib import Memory
|
||||
>>> location = 'your_cache_dir_goes_here'
|
||||
>>> mem = Memory(location, verbose=1)
|
||||
>>> import numpy as np
|
||||
>>> a = np.vander(np.arange(3)).astype(float)
|
||||
>>> square = mem.cache(np.square)
|
||||
>>> b = square(a) # doctest: +ELLIPSIS
|
||||
______________________________________________________________________...
|
||||
[Memory] Calling ...square...
|
||||
square(array([[0., 0., 1.],
|
||||
[1., 1., 1.],
|
||||
[4., 2., 1.]]))
|
||||
_________________________________________________...square - ...s, 0.0min
|
||||
|
||||
>>> c = square(a)
|
||||
>>> # The above call did not trigger an evaluation
|
||||
|
||||
2) **Embarrassingly parallel helper:** to make it easy to write readable
|
||||
parallel code and debug it quickly::
|
||||
|
||||
>>> from joblib import Parallel, delayed
|
||||
>>> from math import sqrt
|
||||
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
|
||||
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
|
||||
|
||||
|
||||
3) **Fast compressed Persistence**: a replacement for pickle to work
|
||||
efficiently on Python objects containing large data (
|
||||
*joblib.dump* & *joblib.load* ).
|
||||
|
||||
..
|
||||
>>> import shutil ; shutil.rmtree(location)
|
||||
|
||||
"""
|
||||
|
||||
# PEP0440 compatible formatted version, see:
|
||||
# https://www.python.org/dev/peps/pep-0440/
|
||||
#
|
||||
# Generic release markers:
|
||||
# X.Y
|
||||
# X.Y.Z # For bugfix releases
|
||||
#
|
||||
# Admissible pre-release markers:
|
||||
# X.YaN # Alpha release
|
||||
# X.YbN # Beta release
|
||||
# X.YrcN # Release Candidate
|
||||
# X.Y # Final release
|
||||
#
|
||||
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
|
||||
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
|
||||
#
|
||||
__version__ = "1.5.2"
|
||||
|
||||
|
||||
import os
|
||||
|
||||
from ._cloudpickle_wrapper import wrap_non_picklable_objects
|
||||
from ._parallel_backends import ParallelBackendBase
|
||||
from ._store_backends import StoreBackendBase
|
||||
from .compressor import register_compressor
|
||||
from .hashing import hash
|
||||
from .logger import Logger, PrintTime
|
||||
from .memory import MemorizedResult, Memory, expires_after, register_store_backend
|
||||
from .numpy_pickle import dump, load
|
||||
from .parallel import (
|
||||
Parallel,
|
||||
cpu_count,
|
||||
delayed,
|
||||
effective_n_jobs,
|
||||
parallel_backend,
|
||||
parallel_config,
|
||||
register_parallel_backend,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# On-disk result caching
|
||||
"Memory",
|
||||
"MemorizedResult",
|
||||
"expires_after",
|
||||
# Parallel code execution
|
||||
"Parallel",
|
||||
"delayed",
|
||||
"cpu_count",
|
||||
"effective_n_jobs",
|
||||
"wrap_non_picklable_objects",
|
||||
# Context to change the backend globally
|
||||
"parallel_config",
|
||||
"parallel_backend",
|
||||
# Helpers to define and register store/parallel backends
|
||||
"ParallelBackendBase",
|
||||
"StoreBackendBase",
|
||||
"register_compressor",
|
||||
"register_parallel_backend",
|
||||
"register_store_backend",
|
||||
# Helpers kept for backward compatibility
|
||||
"PrintTime",
|
||||
"Logger",
|
||||
"hash",
|
||||
"dump",
|
||||
"load",
|
||||
]
|
||||
|
||||
|
||||
# Workaround issue discovered in intel-openmp 2019.5:
|
||||
# https://github.com/ContinuumIO/anaconda-issues/issues/11294
|
||||
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,18 @@
|
||||
"""
|
||||
Small shim of loky's cloudpickle_wrapper to avoid failure when
|
||||
multiprocessing is not available.
|
||||
"""
|
||||
|
||||
from ._multiprocessing_helpers import mp
|
||||
|
||||
|
||||
def _my_wrap_non_picklable_objects(obj, keep_wrapper=True):
|
||||
return obj
|
||||
|
||||
|
||||
if mp is not None:
|
||||
from .externals.loky import wrap_non_picklable_objects
|
||||
else:
|
||||
wrap_non_picklable_objects = _my_wrap_non_picklable_objects
|
||||
|
||||
__all__ = ["wrap_non_picklable_objects"]
|
||||
381
Backend/venv/lib/python3.12/site-packages/joblib/_dask.py
Normal file
381
Backend/venv/lib/python3.12/site-packages/joblib/_dask.py
Normal file
@@ -0,0 +1,381 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
import contextlib
|
||||
import time
|
||||
import weakref
|
||||
from uuid import uuid4
|
||||
|
||||
from ._utils import (
|
||||
_retrieve_traceback_capturing_wrapped_call,
|
||||
_TracebackCapturingWrapper,
|
||||
)
|
||||
from .parallel import AutoBatchingMixin, ParallelBackendBase, parallel_config
|
||||
|
||||
try:
|
||||
import dask
|
||||
import distributed
|
||||
except ImportError:
|
||||
dask = None
|
||||
distributed = None
|
||||
|
||||
if dask is not None and distributed is not None:
|
||||
from dask.distributed import (
|
||||
Client,
|
||||
as_completed,
|
||||
get_client,
|
||||
rejoin,
|
||||
secede,
|
||||
)
|
||||
from dask.sizeof import sizeof
|
||||
from dask.utils import funcname
|
||||
from distributed.utils import thread_state
|
||||
|
||||
try:
|
||||
# asyncio.TimeoutError, Python3-only error thrown by recent versions of
|
||||
# distributed
|
||||
from distributed.utils import TimeoutError as _TimeoutError
|
||||
except ImportError:
|
||||
from tornado.gen import TimeoutError as _TimeoutError
|
||||
|
||||
|
||||
def is_weakrefable(obj):
|
||||
try:
|
||||
weakref.ref(obj)
|
||||
return True
|
||||
except TypeError:
|
||||
return False
|
||||
|
||||
|
||||
class _WeakKeyDictionary:
|
||||
"""A variant of weakref.WeakKeyDictionary for unhashable objects.
|
||||
|
||||
This datastructure is used to store futures for broadcasted data objects
|
||||
such as large numpy arrays or pandas dataframes that are not hashable and
|
||||
therefore cannot be used as keys of traditional python dicts.
|
||||
|
||||
Furthermore using a dict with id(array) as key is not safe because the
|
||||
Python is likely to reuse id of recently collected arrays.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._data = {}
|
||||
|
||||
def __getitem__(self, obj):
|
||||
ref, val = self._data[id(obj)]
|
||||
if ref() is not obj:
|
||||
# In case of a race condition with on_destroy.
|
||||
raise KeyError(obj)
|
||||
return val
|
||||
|
||||
def __setitem__(self, obj, value):
|
||||
key = id(obj)
|
||||
try:
|
||||
ref, _ = self._data[key]
|
||||
if ref() is not obj:
|
||||
# In case of race condition with on_destroy.
|
||||
raise KeyError(obj)
|
||||
except KeyError:
|
||||
# Insert the new entry in the mapping along with a weakref
|
||||
# callback to automatically delete the entry from the mapping
|
||||
# as soon as the object used as key is garbage collected.
|
||||
def on_destroy(_):
|
||||
del self._data[key]
|
||||
|
||||
ref = weakref.ref(obj, on_destroy)
|
||||
self._data[key] = ref, value
|
||||
|
||||
def __len__(self):
|
||||
return len(self._data)
|
||||
|
||||
def clear(self):
|
||||
self._data.clear()
|
||||
|
||||
|
||||
def _funcname(x):
|
||||
try:
|
||||
if isinstance(x, list):
|
||||
x = x[0][0]
|
||||
except Exception:
|
||||
pass
|
||||
return funcname(x)
|
||||
|
||||
|
||||
def _make_tasks_summary(tasks):
|
||||
"""Summarize of list of (func, args, kwargs) function calls"""
|
||||
unique_funcs = {func for func, args, kwargs in tasks}
|
||||
|
||||
if len(unique_funcs) == 1:
|
||||
mixed = False
|
||||
else:
|
||||
mixed = True
|
||||
return len(tasks), mixed, _funcname(tasks)
|
||||
|
||||
|
||||
class Batch:
|
||||
"""dask-compatible wrapper that executes a batch of tasks"""
|
||||
|
||||
def __init__(self, tasks):
|
||||
# collect some metadata from the tasks to ease Batch calls
|
||||
# introspection when debugging
|
||||
self._num_tasks, self._mixed, self._funcname = _make_tasks_summary(tasks)
|
||||
|
||||
def __call__(self, tasks=None):
|
||||
results = []
|
||||
with parallel_config(backend="dask"):
|
||||
for func, args, kwargs in tasks:
|
||||
results.append(func(*args, **kwargs))
|
||||
return results
|
||||
|
||||
def __repr__(self):
|
||||
descr = f"batch_of_{self._funcname}_{self._num_tasks}_calls"
|
||||
if self._mixed:
|
||||
descr = "mixed_" + descr
|
||||
return descr
|
||||
|
||||
|
||||
def _joblib_probe_task():
|
||||
# Noop used by the joblib connector to probe when workers are ready.
|
||||
pass
|
||||
|
||||
|
||||
class DaskDistributedBackend(AutoBatchingMixin, ParallelBackendBase):
|
||||
MIN_IDEAL_BATCH_DURATION = 0.2
|
||||
MAX_IDEAL_BATCH_DURATION = 1.0
|
||||
supports_retrieve_callback = True
|
||||
default_n_jobs = -1
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
scheduler_host=None,
|
||||
scatter=None,
|
||||
client=None,
|
||||
loop=None,
|
||||
wait_for_workers_timeout=10,
|
||||
**submit_kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
if distributed is None:
|
||||
msg = (
|
||||
"You are trying to use 'dask' as a joblib parallel backend "
|
||||
"but dask is not installed. Please install dask "
|
||||
"to fix this error."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
if client is None:
|
||||
if scheduler_host:
|
||||
client = Client(scheduler_host, loop=loop, set_as_default=False)
|
||||
else:
|
||||
try:
|
||||
client = get_client()
|
||||
except ValueError as e:
|
||||
msg = (
|
||||
"To use Joblib with Dask first create a Dask Client"
|
||||
"\n\n"
|
||||
" from dask.distributed import Client\n"
|
||||
" client = Client()\n"
|
||||
"or\n"
|
||||
" client = Client('scheduler-address:8786')"
|
||||
)
|
||||
raise ValueError(msg) from e
|
||||
|
||||
self.client = client
|
||||
|
||||
if scatter is not None and not isinstance(scatter, (list, tuple)):
|
||||
raise TypeError(
|
||||
"scatter must be a list/tuple, got `%s`" % type(scatter).__name__
|
||||
)
|
||||
|
||||
if scatter is not None and len(scatter) > 0:
|
||||
# Keep a reference to the scattered data to keep the ids the same
|
||||
self._scatter = list(scatter)
|
||||
scattered = self.client.scatter(scatter, broadcast=True)
|
||||
self.data_futures = {id(x): f for x, f in zip(scatter, scattered)}
|
||||
else:
|
||||
self._scatter = []
|
||||
self.data_futures = {}
|
||||
self.wait_for_workers_timeout = wait_for_workers_timeout
|
||||
self.submit_kwargs = submit_kwargs
|
||||
self.waiting_futures = as_completed(
|
||||
[], loop=client.loop, with_results=True, raise_errors=False
|
||||
)
|
||||
self._results = {}
|
||||
self._callbacks = {}
|
||||
|
||||
async def _collect(self):
|
||||
while self._continue:
|
||||
async for future, result in self.waiting_futures:
|
||||
cf_future = self._results.pop(future)
|
||||
callback = self._callbacks.pop(future)
|
||||
if future.status == "error":
|
||||
typ, exc, tb = result
|
||||
cf_future.set_exception(exc)
|
||||
else:
|
||||
cf_future.set_result(result)
|
||||
callback(result)
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
def __reduce__(self):
|
||||
return (DaskDistributedBackend, ())
|
||||
|
||||
def get_nested_backend(self):
|
||||
return DaskDistributedBackend(client=self.client), -1
|
||||
|
||||
def configure(self, n_jobs=1, parallel=None, **backend_args):
|
||||
self.parallel = parallel
|
||||
return self.effective_n_jobs(n_jobs)
|
||||
|
||||
def start_call(self):
|
||||
self._continue = True
|
||||
self.client.loop.add_callback(self._collect)
|
||||
self.call_data_futures = _WeakKeyDictionary()
|
||||
|
||||
def stop_call(self):
|
||||
# The explicit call to clear is required to break a cycling reference
|
||||
# to the futures.
|
||||
self._continue = False
|
||||
# wait for the future collection routine (self._backend._collect) to
|
||||
# finish in order to limit asyncio warnings due to aborting _collect
|
||||
# during a following backend termination call
|
||||
time.sleep(0.01)
|
||||
self.call_data_futures.clear()
|
||||
|
||||
def effective_n_jobs(self, n_jobs):
|
||||
effective_n_jobs = sum(self.client.ncores().values())
|
||||
if effective_n_jobs != 0 or not self.wait_for_workers_timeout:
|
||||
return effective_n_jobs
|
||||
|
||||
# If there is no worker, schedule a probe task to wait for the workers
|
||||
# to come up and be available. If the dask cluster is in adaptive mode
|
||||
# task might cause the cluster to provision some workers.
|
||||
try:
|
||||
self.client.submit(_joblib_probe_task).result(
|
||||
timeout=self.wait_for_workers_timeout
|
||||
)
|
||||
except _TimeoutError as e:
|
||||
error_msg = (
|
||||
"DaskDistributedBackend has no worker after {} seconds. "
|
||||
"Make sure that workers are started and can properly connect "
|
||||
"to the scheduler and increase the joblib/dask connection "
|
||||
"timeout with:\n\n"
|
||||
"parallel_config(backend='dask', wait_for_workers_timeout={})"
|
||||
).format(
|
||||
self.wait_for_workers_timeout,
|
||||
max(10, 2 * self.wait_for_workers_timeout),
|
||||
)
|
||||
raise TimeoutError(error_msg) from e
|
||||
return sum(self.client.ncores().values())
|
||||
|
||||
async def _to_func_args(self, func):
|
||||
itemgetters = dict()
|
||||
|
||||
# Futures that are dynamically generated during a single call to
|
||||
# Parallel.__call__.
|
||||
call_data_futures = getattr(self, "call_data_futures", None)
|
||||
|
||||
async def maybe_to_futures(args):
|
||||
out = []
|
||||
for arg in args:
|
||||
arg_id = id(arg)
|
||||
if arg_id in itemgetters:
|
||||
out.append(itemgetters[arg_id])
|
||||
continue
|
||||
|
||||
f = self.data_futures.get(arg_id, None)
|
||||
if f is None and call_data_futures is not None:
|
||||
try:
|
||||
f = await call_data_futures[arg]
|
||||
except KeyError:
|
||||
pass
|
||||
if f is None:
|
||||
if is_weakrefable(arg) and sizeof(arg) > 1e3:
|
||||
# Automatically scatter large objects to some of
|
||||
# the workers to avoid duplicated data transfers.
|
||||
# Rely on automated inter-worker data stealing if
|
||||
# more workers need to reuse this data
|
||||
# concurrently.
|
||||
# set hash=False - nested scatter calls (i.e
|
||||
# calling client.scatter inside a dask worker)
|
||||
# using hash=True often raise CancelledError,
|
||||
# see dask/distributed#3703
|
||||
_coro = self.client.scatter(
|
||||
arg, asynchronous=True, hash=False
|
||||
)
|
||||
# Centralize the scattering of identical arguments
|
||||
# between concurrent apply_async callbacks by
|
||||
# exposing the running coroutine in
|
||||
# call_data_futures before it completes.
|
||||
t = asyncio.Task(_coro)
|
||||
call_data_futures[arg] = t
|
||||
|
||||
f = await t
|
||||
|
||||
if f is not None:
|
||||
out.append(f)
|
||||
else:
|
||||
out.append(arg)
|
||||
return out
|
||||
|
||||
tasks = []
|
||||
for f, args, kwargs in func.items:
|
||||
args = list(await maybe_to_futures(args))
|
||||
kwargs = dict(zip(kwargs.keys(), await maybe_to_futures(kwargs.values())))
|
||||
tasks.append((f, args, kwargs))
|
||||
|
||||
return (Batch(tasks), tasks)
|
||||
|
||||
def apply_async(self, func, callback=None):
|
||||
cf_future = concurrent.futures.Future()
|
||||
cf_future.get = cf_future.result # achieve AsyncResult API
|
||||
|
||||
async def f(func, callback):
|
||||
batch, tasks = await self._to_func_args(func)
|
||||
key = f"{repr(batch)}-{uuid4().hex}"
|
||||
|
||||
dask_future = self.client.submit(
|
||||
_TracebackCapturingWrapper(batch),
|
||||
tasks=tasks,
|
||||
key=key,
|
||||
**self.submit_kwargs,
|
||||
)
|
||||
self.waiting_futures.add(dask_future)
|
||||
self._callbacks[dask_future] = callback
|
||||
self._results[dask_future] = cf_future
|
||||
|
||||
self.client.loop.add_callback(f, func, callback)
|
||||
|
||||
return cf_future
|
||||
|
||||
def retrieve_result_callback(self, out):
|
||||
return _retrieve_traceback_capturing_wrapped_call(out)
|
||||
|
||||
def abort_everything(self, ensure_ready=True):
|
||||
"""Tell the client to cancel any task submitted via this instance
|
||||
|
||||
joblib.Parallel will never access those results
|
||||
"""
|
||||
with self.waiting_futures.lock:
|
||||
self.waiting_futures.futures.clear()
|
||||
while not self.waiting_futures.queue.empty():
|
||||
self.waiting_futures.queue.get()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def retrieval_context(self):
|
||||
"""Override ParallelBackendBase.retrieval_context to avoid deadlocks.
|
||||
|
||||
This removes thread from the worker's thread pool (using 'secede').
|
||||
Seceding avoids deadlock in nested parallelism settings.
|
||||
"""
|
||||
# See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how
|
||||
# this is used.
|
||||
if hasattr(thread_state, "execution_state"):
|
||||
# we are in a worker. Secede to avoid deadlock.
|
||||
secede()
|
||||
|
||||
yield
|
||||
|
||||
if hasattr(thread_state, "execution_state"):
|
||||
rejoin()
|
||||
@@ -0,0 +1,715 @@
|
||||
"""
|
||||
Reducer using memory mapping for numpy arrays
|
||||
"""
|
||||
# Author: Thomas Moreau <thomas.moreau.2010@gmail.com>
|
||||
# Copyright: 2017, Thomas Moreau
|
||||
# License: BSD 3 clause
|
||||
|
||||
import atexit
|
||||
import errno
|
||||
import os
|
||||
import stat
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import warnings
|
||||
import weakref
|
||||
from mmap import mmap
|
||||
from multiprocessing import util
|
||||
from pickle import HIGHEST_PROTOCOL, PicklingError, dumps, loads, whichmodule
|
||||
from uuid import uuid4
|
||||
|
||||
try:
|
||||
WindowsError
|
||||
except NameError:
|
||||
WindowsError = type(None)
|
||||
|
||||
try:
|
||||
import numpy as np
|
||||
from numpy.lib.stride_tricks import as_strided
|
||||
except ImportError:
|
||||
np = None
|
||||
|
||||
from .backports import make_memmap
|
||||
from .disk import delete_folder
|
||||
from .externals.loky.backend import resource_tracker
|
||||
from .numpy_pickle import dump, load, load_temporary_memmap
|
||||
|
||||
# Some system have a ramdisk mounted by default, we can use it instead of /tmp
|
||||
# as the default folder to dump big arrays to share with subprocesses.
|
||||
SYSTEM_SHARED_MEM_FS = "/dev/shm"
|
||||
|
||||
# Minimal number of bytes available on SYSTEM_SHARED_MEM_FS to consider using
|
||||
# it as the default folder to dump big arrays to share with subprocesses.
|
||||
SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(2e9)
|
||||
|
||||
# Folder and file permissions to chmod temporary files generated by the
|
||||
# memmapping pool. Only the owner of the Python process can access the
|
||||
# temporary files and folder.
|
||||
FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
|
||||
FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
|
||||
|
||||
# Set used in joblib workers, referencing the filenames of temporary memmaps
|
||||
# created by joblib to speed up data communication. In child processes, we add
|
||||
# a finalizer to these memmaps that sends a maybe_unlink call to the
|
||||
# resource_tracker, in order to free main memory as fast as possible.
|
||||
JOBLIB_MMAPS = set()
|
||||
|
||||
|
||||
def _log_and_unlink(filename):
|
||||
from .externals.loky.backend.resource_tracker import _resource_tracker
|
||||
|
||||
util.debug(
|
||||
"[FINALIZER CALL] object mapping to {} about to be deleted,"
|
||||
" decrementing the refcount of the file (pid: {})".format(
|
||||
os.path.basename(filename), os.getpid()
|
||||
)
|
||||
)
|
||||
_resource_tracker.maybe_unlink(filename, "file")
|
||||
|
||||
|
||||
def add_maybe_unlink_finalizer(memmap):
|
||||
util.debug(
|
||||
"[FINALIZER ADD] adding finalizer to {} (id {}, filename {}, pid {})".format(
|
||||
type(memmap), id(memmap), os.path.basename(memmap.filename), os.getpid()
|
||||
)
|
||||
)
|
||||
weakref.finalize(memmap, _log_and_unlink, memmap.filename)
|
||||
|
||||
|
||||
def unlink_file(filename):
|
||||
"""Wrapper around os.unlink with a retry mechanism.
|
||||
|
||||
The retry mechanism has been implemented primarily to overcome a race
|
||||
condition happening during the finalizer of a np.memmap: when a process
|
||||
holding the last reference to a mmap-backed np.memmap/np.array is about to
|
||||
delete this array (and close the reference), it sends a maybe_unlink
|
||||
request to the resource_tracker. This request can be processed faster than
|
||||
it takes for the last reference of the memmap to be closed, yielding (on
|
||||
Windows) a PermissionError in the resource_tracker loop.
|
||||
"""
|
||||
NUM_RETRIES = 10
|
||||
for retry_no in range(1, NUM_RETRIES + 1):
|
||||
try:
|
||||
os.unlink(filename)
|
||||
break
|
||||
except PermissionError:
|
||||
util.debug(
|
||||
"[ResourceTracker] tried to unlink {}, got PermissionError".format(
|
||||
filename
|
||||
)
|
||||
)
|
||||
if retry_no == NUM_RETRIES:
|
||||
raise
|
||||
else:
|
||||
time.sleep(0.2)
|
||||
except FileNotFoundError:
|
||||
# In case of a race condition when deleting the temporary folder,
|
||||
# avoid noisy FileNotFoundError exception in the resource tracker.
|
||||
pass
|
||||
|
||||
|
||||
resource_tracker._CLEANUP_FUNCS["file"] = unlink_file
|
||||
|
||||
|
||||
class _WeakArrayKeyMap:
|
||||
"""A variant of weakref.WeakKeyDictionary for unhashable numpy arrays.
|
||||
|
||||
This datastructure will be used with numpy arrays as obj keys, therefore we
|
||||
do not use the __get__ / __set__ methods to avoid any conflict with the
|
||||
numpy fancy indexing syntax.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._data = {}
|
||||
|
||||
def get(self, obj):
|
||||
ref, val = self._data[id(obj)]
|
||||
if ref() is not obj:
|
||||
# In case of race condition with on_destroy: could never be
|
||||
# triggered by the joblib tests with CPython.
|
||||
raise KeyError(obj)
|
||||
return val
|
||||
|
||||
def set(self, obj, value):
|
||||
key = id(obj)
|
||||
try:
|
||||
ref, _ = self._data[key]
|
||||
if ref() is not obj:
|
||||
# In case of race condition with on_destroy: could never be
|
||||
# triggered by the joblib tests with CPython.
|
||||
raise KeyError(obj)
|
||||
except KeyError:
|
||||
# Insert the new entry in the mapping along with a weakref
|
||||
# callback to automatically delete the entry from the mapping
|
||||
# as soon as the object used as key is garbage collected.
|
||||
def on_destroy(_):
|
||||
del self._data[key]
|
||||
|
||||
ref = weakref.ref(obj, on_destroy)
|
||||
self._data[key] = ref, value
|
||||
|
||||
def __getstate__(self):
|
||||
raise PicklingError("_WeakArrayKeyMap is not pickleable")
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Support for efficient transient pickling of numpy data structures
|
||||
|
||||
|
||||
def _get_backing_memmap(a):
|
||||
"""Recursively look up the original np.memmap instance base if any."""
|
||||
b = getattr(a, "base", None)
|
||||
if b is None:
|
||||
# TODO: check scipy sparse datastructure if scipy is installed
|
||||
# a nor its descendants do not have a memmap base
|
||||
return None
|
||||
|
||||
elif isinstance(b, mmap):
|
||||
# a is already a real memmap instance.
|
||||
return a
|
||||
|
||||
else:
|
||||
# Recursive exploration of the base ancestry
|
||||
return _get_backing_memmap(b)
|
||||
|
||||
|
||||
def _get_temp_dir(pool_folder_name, temp_folder=None):
|
||||
"""Get the full path to a subfolder inside the temporary folder.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
pool_folder_name : str
|
||||
Sub-folder name used for the serialization of a pool instance.
|
||||
|
||||
temp_folder: str, optional
|
||||
Folder to be used by the pool for memmapping large arrays
|
||||
for sharing memory with worker processes. If None, this will try in
|
||||
order:
|
||||
|
||||
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
|
||||
variable,
|
||||
- /dev/shm if the folder exists and is writable: this is a
|
||||
RAMdisk filesystem available by default on modern Linux
|
||||
distributions,
|
||||
- the default system temporary folder that can be
|
||||
overridden with TMP, TMPDIR or TEMP environment
|
||||
variables, typically /tmp under Unix operating systems.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pool_folder : str
|
||||
full path to the temporary folder
|
||||
use_shared_mem : bool
|
||||
whether the temporary folder is written to the system shared memory
|
||||
folder or some other temporary folder.
|
||||
"""
|
||||
use_shared_mem = False
|
||||
if temp_folder is None:
|
||||
temp_folder = os.environ.get("JOBLIB_TEMP_FOLDER", None)
|
||||
if temp_folder is None:
|
||||
if os.path.exists(SYSTEM_SHARED_MEM_FS) and hasattr(os, "statvfs"):
|
||||
try:
|
||||
shm_stats = os.statvfs(SYSTEM_SHARED_MEM_FS)
|
||||
available_nbytes = shm_stats.f_bsize * shm_stats.f_bavail
|
||||
if available_nbytes > SYSTEM_SHARED_MEM_FS_MIN_SIZE:
|
||||
# Try to see if we have write access to the shared mem
|
||||
# folder only if it is reasonably large (that is 2GB or
|
||||
# more).
|
||||
temp_folder = SYSTEM_SHARED_MEM_FS
|
||||
pool_folder = os.path.join(temp_folder, pool_folder_name)
|
||||
if not os.path.exists(pool_folder):
|
||||
os.makedirs(pool_folder)
|
||||
use_shared_mem = True
|
||||
except (IOError, OSError):
|
||||
# Missing rights in the /dev/shm partition, fallback to regular
|
||||
# temp folder.
|
||||
temp_folder = None
|
||||
if temp_folder is None:
|
||||
# Fallback to the default tmp folder, typically /tmp
|
||||
temp_folder = tempfile.gettempdir()
|
||||
temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
|
||||
pool_folder = os.path.join(temp_folder, pool_folder_name)
|
||||
return pool_folder, use_shared_mem
|
||||
|
||||
|
||||
def has_shareable_memory(a):
|
||||
"""Return True if a is backed by some mmap buffer directly or not."""
|
||||
return _get_backing_memmap(a) is not None
|
||||
|
||||
|
||||
def _strided_from_memmap(
|
||||
filename,
|
||||
dtype,
|
||||
mode,
|
||||
offset,
|
||||
order,
|
||||
shape,
|
||||
strides,
|
||||
total_buffer_len,
|
||||
unlink_on_gc_collect,
|
||||
):
|
||||
"""Reconstruct an array view on a memory mapped file."""
|
||||
if mode == "w+":
|
||||
# Do not zero the original data when unpickling
|
||||
mode = "r+"
|
||||
|
||||
if strides is None:
|
||||
# Simple, contiguous memmap
|
||||
return make_memmap(
|
||||
filename,
|
||||
dtype=dtype,
|
||||
shape=shape,
|
||||
mode=mode,
|
||||
offset=offset,
|
||||
order=order,
|
||||
unlink_on_gc_collect=unlink_on_gc_collect,
|
||||
)
|
||||
else:
|
||||
# For non-contiguous data, memmap the total enclosing buffer and then
|
||||
# extract the non-contiguous view with the stride-tricks API
|
||||
base = make_memmap(
|
||||
filename,
|
||||
dtype=dtype,
|
||||
shape=total_buffer_len,
|
||||
offset=offset,
|
||||
mode=mode,
|
||||
order=order,
|
||||
unlink_on_gc_collect=unlink_on_gc_collect,
|
||||
)
|
||||
return as_strided(base, shape=shape, strides=strides)
|
||||
|
||||
|
||||
def _reduce_memmap_backed(a, m):
|
||||
"""Pickling reduction for memmap backed arrays.
|
||||
|
||||
a is expected to be an instance of np.ndarray (or np.memmap)
|
||||
m is expected to be an instance of np.memmap on the top of the ``base``
|
||||
attribute ancestry of a. ``m.base`` should be the real python mmap object.
|
||||
"""
|
||||
# offset that comes from the striding differences between a and m
|
||||
util.debug(
|
||||
"[MEMMAP REDUCE] reducing a memmap-backed array (shape, {}, pid: {})".format(
|
||||
a.shape, os.getpid()
|
||||
)
|
||||
)
|
||||
try:
|
||||
from numpy.lib.array_utils import byte_bounds
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
# Backward-compat for numpy < 2.0
|
||||
from numpy import byte_bounds
|
||||
a_start, a_end = byte_bounds(a)
|
||||
m_start = byte_bounds(m)[0]
|
||||
offset = a_start - m_start
|
||||
|
||||
# offset from the backing memmap
|
||||
offset += m.offset
|
||||
|
||||
# 1D arrays are both F and C contiguous, so only set the flag in
|
||||
# higher dimensions. See https://github.com/joblib/joblib/pull/1704.
|
||||
if m.ndim > 1 and m.flags["F_CONTIGUOUS"]:
|
||||
order = "F"
|
||||
else:
|
||||
# The backing memmap buffer is necessarily contiguous hence C if not
|
||||
# Fortran
|
||||
order = "C"
|
||||
|
||||
if a.flags["F_CONTIGUOUS"] or a.flags["C_CONTIGUOUS"]:
|
||||
# If the array is a contiguous view, no need to pass the strides
|
||||
strides = None
|
||||
total_buffer_len = None
|
||||
else:
|
||||
# Compute the total number of items to map from which the strided
|
||||
# view will be extracted.
|
||||
strides = a.strides
|
||||
total_buffer_len = (a_end - a_start) // a.itemsize
|
||||
|
||||
return (
|
||||
_strided_from_memmap,
|
||||
(
|
||||
m.filename,
|
||||
a.dtype,
|
||||
m.mode,
|
||||
offset,
|
||||
order,
|
||||
a.shape,
|
||||
strides,
|
||||
total_buffer_len,
|
||||
False,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def reduce_array_memmap_backward(a):
|
||||
"""reduce a np.array or a np.memmap from a child process"""
|
||||
m = _get_backing_memmap(a)
|
||||
if isinstance(m, np.memmap) and m.filename not in JOBLIB_MMAPS:
|
||||
# if a is backed by a memmaped file, reconstruct a using the
|
||||
# memmaped file.
|
||||
return _reduce_memmap_backed(a, m)
|
||||
else:
|
||||
# a is either a regular (not memmap-backed) numpy array, or an array
|
||||
# backed by a shared temporary file created by joblib. In the latter
|
||||
# case, in order to limit the lifespan of these temporary files, we
|
||||
# serialize the memmap as a regular numpy array, and decref the
|
||||
# file backing the memmap (done implicitly in a previously registered
|
||||
# finalizer, see ``unlink_on_gc_collect`` for more details)
|
||||
return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),))
|
||||
|
||||
|
||||
class ArrayMemmapForwardReducer(object):
|
||||
"""Reducer callable to dump large arrays to memmap files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
max_nbytes: int
|
||||
Threshold to trigger memmapping of large arrays to files created
|
||||
a folder.
|
||||
temp_folder_resolver: callable
|
||||
An callable in charge of resolving a temporary folder name where files
|
||||
for backing memmapped arrays are created.
|
||||
mmap_mode: 'r', 'r+' or 'c'
|
||||
Mode for the created memmap datastructure. See the documentation of
|
||||
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
|
||||
automatically to avoid zeroing the data on unpickling.
|
||||
verbose: int, optional, 0 by default
|
||||
If verbose > 0, memmap creations are logged.
|
||||
If verbose > 1, both memmap creations, reuse and array pickling are
|
||||
logged.
|
||||
prewarm: bool, optional, False by default.
|
||||
Force a read on newly memmapped array to make sure that OS pre-cache it
|
||||
memory. This can be useful to avoid concurrent disk access when the
|
||||
same data array is passed to different worker processes.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_nbytes,
|
||||
temp_folder_resolver,
|
||||
mmap_mode,
|
||||
unlink_on_gc_collect,
|
||||
verbose=0,
|
||||
prewarm=True,
|
||||
):
|
||||
self._max_nbytes = max_nbytes
|
||||
self._temp_folder_resolver = temp_folder_resolver
|
||||
self._mmap_mode = mmap_mode
|
||||
self.verbose = int(verbose)
|
||||
if prewarm == "auto":
|
||||
self._prewarm = not self._temp_folder.startswith(SYSTEM_SHARED_MEM_FS)
|
||||
else:
|
||||
self._prewarm = prewarm
|
||||
self._prewarm = prewarm
|
||||
self._memmaped_arrays = _WeakArrayKeyMap()
|
||||
self._temporary_memmaped_filenames = set()
|
||||
self._unlink_on_gc_collect = unlink_on_gc_collect
|
||||
|
||||
@property
|
||||
def _temp_folder(self):
|
||||
return self._temp_folder_resolver()
|
||||
|
||||
def __reduce__(self):
|
||||
# The ArrayMemmapForwardReducer is passed to the children processes: it
|
||||
# needs to be pickled but the _WeakArrayKeyMap need to be skipped as
|
||||
# it's only guaranteed to be consistent with the parent process memory
|
||||
# garbage collection.
|
||||
# Although this reducer is pickled, it is not needed in its destination
|
||||
# process (child processes), as we only use this reducer to send
|
||||
# memmaps from the parent process to the children processes. For this
|
||||
# reason, we can afford skipping the resolver, (which would otherwise
|
||||
# be unpicklable), and pass it as None instead.
|
||||
args = (self._max_nbytes, None, self._mmap_mode, self._unlink_on_gc_collect)
|
||||
kwargs = {
|
||||
"verbose": self.verbose,
|
||||
"prewarm": self._prewarm,
|
||||
}
|
||||
return ArrayMemmapForwardReducer, args, kwargs
|
||||
|
||||
def __call__(self, a):
|
||||
m = _get_backing_memmap(a)
|
||||
if m is not None and isinstance(m, np.memmap):
|
||||
# a is already backed by a memmap file, let's reuse it directly
|
||||
return _reduce_memmap_backed(a, m)
|
||||
|
||||
if (
|
||||
not a.dtype.hasobject
|
||||
and self._max_nbytes is not None
|
||||
and a.nbytes > self._max_nbytes
|
||||
):
|
||||
# check that the folder exists (lazily create the pool temp folder
|
||||
# if required)
|
||||
try:
|
||||
os.makedirs(self._temp_folder)
|
||||
os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise e
|
||||
|
||||
try:
|
||||
basename = self._memmaped_arrays.get(a)
|
||||
except KeyError:
|
||||
# Generate a new unique random filename. The process and thread
|
||||
# ids are only useful for debugging purpose and to make it
|
||||
# easier to cleanup orphaned files in case of hard process
|
||||
# kill (e.g. by "kill -9" or segfault).
|
||||
basename = "{}-{}-{}.pkl".format(
|
||||
os.getpid(), id(threading.current_thread()), uuid4().hex
|
||||
)
|
||||
self._memmaped_arrays.set(a, basename)
|
||||
filename = os.path.join(self._temp_folder, basename)
|
||||
|
||||
# In case the same array with the same content is passed several
|
||||
# times to the pool subprocess children, serialize it only once
|
||||
|
||||
is_new_memmap = filename not in self._temporary_memmaped_filenames
|
||||
|
||||
# add the memmap to the list of temporary memmaps created by joblib
|
||||
self._temporary_memmaped_filenames.add(filename)
|
||||
|
||||
if self._unlink_on_gc_collect:
|
||||
# Bump reference count of the memmap by 1 to account for
|
||||
# shared usage of the memmap by a child process. The
|
||||
# corresponding decref call will be executed upon calling
|
||||
# resource_tracker.maybe_unlink, registered as a finalizer in
|
||||
# the child.
|
||||
# the incref/decref calls here are only possible when the child
|
||||
# and the parent share the same resource_tracker. It is not the
|
||||
# case for the multiprocessing backend, but it does not matter
|
||||
# because unlinking a memmap from a child process is only
|
||||
# useful to control the memory usage of long-lasting child
|
||||
# processes, while the multiprocessing-based pools terminate
|
||||
# their workers at the end of a map() call.
|
||||
resource_tracker.register(filename, "file")
|
||||
|
||||
if is_new_memmap:
|
||||
# Incref each temporary memmap created by joblib one extra
|
||||
# time. This means that these memmaps will only be deleted
|
||||
# once an extra maybe_unlink() is called, which is done once
|
||||
# all the jobs have completed (or been canceled) in the
|
||||
# Parallel._terminate_backend() method.
|
||||
resource_tracker.register(filename, "file")
|
||||
|
||||
if not os.path.exists(filename):
|
||||
util.debug(
|
||||
"[ARRAY DUMP] Pickling new array (shape={}, dtype={}) "
|
||||
"creating a new memmap at {}".format(a.shape, a.dtype, filename)
|
||||
)
|
||||
for dumped_filename in dump(a, filename):
|
||||
os.chmod(dumped_filename, FILE_PERMISSIONS)
|
||||
|
||||
if self._prewarm:
|
||||
# Warm up the data by accessing it. This operation ensures
|
||||
# that the disk access required to create the memmapping
|
||||
# file are performed in the reducing process and avoids
|
||||
# concurrent memmap creation in multiple children
|
||||
# processes.
|
||||
load(filename, mmap_mode=self._mmap_mode).max()
|
||||
|
||||
else:
|
||||
util.debug(
|
||||
"[ARRAY DUMP] Pickling known array (shape={}, dtype={}) "
|
||||
"reusing memmap file: {}".format(
|
||||
a.shape, a.dtype, os.path.basename(filename)
|
||||
)
|
||||
)
|
||||
|
||||
# The worker process will use joblib.load to memmap the data
|
||||
return (
|
||||
load_temporary_memmap,
|
||||
(filename, self._mmap_mode, self._unlink_on_gc_collect),
|
||||
)
|
||||
else:
|
||||
# do not convert a into memmap, let pickler do its usual copy with
|
||||
# the default system pickler
|
||||
util.debug(
|
||||
"[ARRAY DUMP] Pickling array (NO MEMMAPPING) (shape={}, "
|
||||
" dtype={}).".format(a.shape, a.dtype)
|
||||
)
|
||||
return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
|
||||
|
||||
|
||||
def get_memmapping_reducers(
|
||||
forward_reducers=None,
|
||||
backward_reducers=None,
|
||||
temp_folder_resolver=None,
|
||||
max_nbytes=1e6,
|
||||
mmap_mode="r",
|
||||
verbose=0,
|
||||
prewarm=False,
|
||||
unlink_on_gc_collect=True,
|
||||
**kwargs,
|
||||
):
|
||||
"""Construct a pair of memmapping reducer linked to a tmpdir.
|
||||
|
||||
This function manage the creation and the clean up of the temporary folders
|
||||
underlying the memory maps and should be use to get the reducers necessary
|
||||
to construct joblib pool or executor.
|
||||
"""
|
||||
if forward_reducers is None:
|
||||
forward_reducers = dict()
|
||||
if backward_reducers is None:
|
||||
backward_reducers = dict()
|
||||
|
||||
if np is not None:
|
||||
# Register smart numpy.ndarray reducers that detects memmap backed
|
||||
# arrays and that is also able to dump to memmap large in-memory
|
||||
# arrays over the max_nbytes threshold
|
||||
forward_reduce_ndarray = ArrayMemmapForwardReducer(
|
||||
max_nbytes,
|
||||
temp_folder_resolver,
|
||||
mmap_mode,
|
||||
unlink_on_gc_collect,
|
||||
verbose,
|
||||
prewarm=prewarm,
|
||||
)
|
||||
forward_reducers[np.ndarray] = forward_reduce_ndarray
|
||||
forward_reducers[np.memmap] = forward_reduce_ndarray
|
||||
|
||||
# Communication from child process to the parent process always
|
||||
# pickles in-memory numpy.ndarray without dumping them as memmap
|
||||
# to avoid confusing the caller and make it tricky to collect the
|
||||
# temporary folder
|
||||
backward_reducers[np.ndarray] = reduce_array_memmap_backward
|
||||
backward_reducers[np.memmap] = reduce_array_memmap_backward
|
||||
|
||||
return forward_reducers, backward_reducers
|
||||
|
||||
|
||||
class TemporaryResourcesManager(object):
|
||||
"""Stateful object able to manage temporary folder and pickles
|
||||
|
||||
It exposes:
|
||||
- a per-context folder name resolving API that memmap-based reducers will
|
||||
rely on to know where to pickle the temporary memmaps
|
||||
- a temporary file/folder management API that internally uses the
|
||||
resource_tracker.
|
||||
"""
|
||||
|
||||
def __init__(self, temp_folder_root=None, context_id=None):
|
||||
self._current_temp_folder = None
|
||||
self._temp_folder_root = temp_folder_root
|
||||
self._use_shared_mem = None
|
||||
self._cached_temp_folders = dict()
|
||||
self._id = uuid4().hex
|
||||
self._finalizers = {}
|
||||
if context_id is None:
|
||||
# It would be safer to not assign a default context id (less silent
|
||||
# bugs), but doing this while maintaining backward compatibility
|
||||
# with the previous, context-unaware version get_memmaping_executor
|
||||
# exposes too many low-level details.
|
||||
context_id = uuid4().hex
|
||||
self.set_current_context(context_id)
|
||||
|
||||
def set_current_context(self, context_id):
|
||||
self._current_context_id = context_id
|
||||
self.register_new_context(context_id)
|
||||
|
||||
def register_new_context(self, context_id):
|
||||
# Prepare a sub-folder name specific to a context (usually a unique id
|
||||
# generated by each instance of the Parallel class). Do not create in
|
||||
# advance to spare FS write access if no array is to be dumped).
|
||||
if context_id in self._cached_temp_folders:
|
||||
return
|
||||
else:
|
||||
# During its lifecycle, one Parallel object can have several
|
||||
# executors associated to it (for instance, if a loky worker raises
|
||||
# an exception, joblib shutdowns the executor and instantly
|
||||
# recreates a new one before raising the error - see
|
||||
# ``ensure_ready``. Because we don't want two executors tied to
|
||||
# the same Parallel object (and thus the same context id) to
|
||||
# register/use/delete the same folder, we also add an id specific
|
||||
# to the current Manager (and thus specific to its associated
|
||||
# executor) to the folder name.
|
||||
new_folder_name = "joblib_memmapping_folder_{}_{}_{}".format(
|
||||
os.getpid(), self._id, context_id
|
||||
)
|
||||
new_folder_path, _ = _get_temp_dir(new_folder_name, self._temp_folder_root)
|
||||
self.register_folder_finalizer(new_folder_path, context_id)
|
||||
self._cached_temp_folders[context_id] = new_folder_path
|
||||
|
||||
def resolve_temp_folder_name(self):
|
||||
"""Return a folder name specific to the currently activated context"""
|
||||
return self._cached_temp_folders[self._current_context_id]
|
||||
|
||||
# resource management API
|
||||
|
||||
def register_folder_finalizer(self, pool_subfolder, context_id):
|
||||
# Register the garbage collector at program exit in case caller forgets
|
||||
# to call terminate explicitly: note we do not pass any reference to
|
||||
# ensure that this callback won't prevent garbage collection of
|
||||
# parallel instance and related file handler resources such as POSIX
|
||||
# semaphores and pipes
|
||||
pool_module_name = whichmodule(delete_folder, "delete_folder")
|
||||
resource_tracker.register(pool_subfolder, "folder")
|
||||
|
||||
def _cleanup():
|
||||
# In some cases the Python runtime seems to set delete_folder to
|
||||
# None just before exiting when accessing the delete_folder
|
||||
# function from the closure namespace. So instead we reimport
|
||||
# the delete_folder function explicitly.
|
||||
# https://github.com/joblib/joblib/issues/328
|
||||
# We cannot just use from 'joblib.pool import delete_folder'
|
||||
# because joblib should only use relative imports to allow
|
||||
# easy vendoring.
|
||||
delete_folder = __import__(
|
||||
pool_module_name, fromlist=["delete_folder"]
|
||||
).delete_folder
|
||||
try:
|
||||
delete_folder(pool_subfolder, allow_non_empty=True)
|
||||
resource_tracker.unregister(pool_subfolder, "folder")
|
||||
except OSError:
|
||||
warnings.warn(
|
||||
"Failed to delete temporary folder: {}".format(pool_subfolder)
|
||||
)
|
||||
|
||||
self._finalizers[context_id] = atexit.register(_cleanup)
|
||||
|
||||
def _clean_temporary_resources(
|
||||
self, context_id=None, force=False, allow_non_empty=False
|
||||
):
|
||||
"""Clean temporary resources created by a process-based pool"""
|
||||
if context_id is None:
|
||||
# Iterates over a copy of the cache keys to avoid Error due to
|
||||
# iterating over a changing size dictionary.
|
||||
for context_id in list(self._cached_temp_folders):
|
||||
self._clean_temporary_resources(
|
||||
context_id, force=force, allow_non_empty=allow_non_empty
|
||||
)
|
||||
else:
|
||||
temp_folder = self._cached_temp_folders.get(context_id)
|
||||
if temp_folder and os.path.exists(temp_folder):
|
||||
for filename in os.listdir(temp_folder):
|
||||
if force:
|
||||
# Some workers have failed and the ref counted might
|
||||
# be off. The workers should have shut down by this
|
||||
# time so forcefully clean up the files.
|
||||
resource_tracker.unregister(
|
||||
os.path.join(temp_folder, filename), "file"
|
||||
)
|
||||
else:
|
||||
resource_tracker.maybe_unlink(
|
||||
os.path.join(temp_folder, filename), "file"
|
||||
)
|
||||
|
||||
# When forcing clean-up, try to delete the folder even if some
|
||||
# files are still in it. Otherwise, try to delete the folder
|
||||
allow_non_empty |= force
|
||||
|
||||
# Clean up the folder if possible, either if it is empty or
|
||||
# if none of the files in it are in used and allow_non_empty.
|
||||
try:
|
||||
delete_folder(temp_folder, allow_non_empty=allow_non_empty)
|
||||
# Forget the folder once it has been deleted
|
||||
self._cached_temp_folders.pop(context_id, None)
|
||||
resource_tracker.unregister(temp_folder, "folder")
|
||||
|
||||
# Also cancel the finalizers that gets triggered at gc.
|
||||
finalizer = self._finalizers.pop(context_id, None)
|
||||
if finalizer is not None:
|
||||
atexit.unregister(finalizer)
|
||||
|
||||
except OSError:
|
||||
# Temporary folder cannot be deleted right now.
|
||||
# This folder will be cleaned up by an atexit
|
||||
# finalizer registered by the memmapping_reducer.
|
||||
pass
|
||||
@@ -0,0 +1,51 @@
|
||||
"""Helper module to factorize the conditional multiprocessing import logic
|
||||
|
||||
We use a distinct module to simplify import statements and avoid introducing
|
||||
circular dependencies (for instance for the assert_spawning name).
|
||||
"""
|
||||
|
||||
import os
|
||||
import warnings
|
||||
|
||||
# Obtain possible configuration from the environment, assuming 1 (on)
|
||||
# by default, upon 0 set to None. Should instructively fail if some non
|
||||
# 0/1 value is set.
|
||||
mp = int(os.environ.get("JOBLIB_MULTIPROCESSING", 1)) or None
|
||||
if mp:
|
||||
try:
|
||||
import _multiprocessing # noqa
|
||||
import multiprocessing as mp
|
||||
except ImportError:
|
||||
mp = None
|
||||
|
||||
# 2nd stage: validate that locking is available on the system and
|
||||
# issue a warning if not
|
||||
if mp is not None:
|
||||
try:
|
||||
# try to create a named semaphore using SemLock to make sure they are
|
||||
# available on this platform. We use the low level object
|
||||
# _multiprocessing.SemLock to avoid spawning a resource tracker on
|
||||
# Unix system or changing the default backend.
|
||||
import tempfile
|
||||
from _multiprocessing import SemLock
|
||||
|
||||
_rand = tempfile._RandomNameSequence()
|
||||
for i in range(100):
|
||||
try:
|
||||
name = "/joblib-{}-{}".format(os.getpid(), next(_rand))
|
||||
_sem = SemLock(0, 0, 1, name=name, unlink=True)
|
||||
del _sem # cleanup
|
||||
break
|
||||
except FileExistsError as e: # pragma: no cover
|
||||
if i >= 99:
|
||||
raise FileExistsError("cannot find name for semaphore") from e
|
||||
except (FileExistsError, AttributeError, ImportError, OSError) as e:
|
||||
mp = None
|
||||
warnings.warn("%s. joblib will operate in serial mode" % (e,))
|
||||
|
||||
|
||||
# 3rd stage: backward compat for the assert_spawning helper
|
||||
if mp is not None:
|
||||
from multiprocessing.context import assert_spawning
|
||||
else:
|
||||
assert_spawning = None
|
||||
@@ -0,0 +1,753 @@
|
||||
"""
|
||||
Backends for embarrassingly parallel code.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import gc
|
||||
import os
|
||||
import threading
|
||||
import warnings
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
from ._multiprocessing_helpers import mp
|
||||
from ._utils import (
|
||||
_retrieve_traceback_capturing_wrapped_call,
|
||||
_TracebackCapturingWrapper,
|
||||
)
|
||||
|
||||
if mp is not None:
|
||||
from multiprocessing.pool import ThreadPool
|
||||
|
||||
from .executor import get_memmapping_executor
|
||||
|
||||
# Import loky only if multiprocessing is present
|
||||
from .externals.loky import cpu_count, process_executor
|
||||
from .externals.loky.process_executor import ShutdownExecutorError
|
||||
from .pool import MemmappingPool
|
||||
|
||||
|
||||
class ParallelBackendBase(metaclass=ABCMeta):
|
||||
"""Helper abc which defines all methods a ParallelBackend must implement"""
|
||||
|
||||
default_n_jobs = 1
|
||||
|
||||
supports_inner_max_num_threads = False
|
||||
|
||||
# This flag was introduced for backward compatibility reasons.
|
||||
# New backends should always set it to True and implement the
|
||||
# `retrieve_result_callback` method.
|
||||
supports_retrieve_callback = False
|
||||
|
||||
@property
|
||||
def supports_return_generator(self):
|
||||
return self.supports_retrieve_callback
|
||||
|
||||
@property
|
||||
def supports_timeout(self):
|
||||
return self.supports_retrieve_callback
|
||||
|
||||
nesting_level = None
|
||||
|
||||
def __init__(
|
||||
self, nesting_level=None, inner_max_num_threads=None, **backend_kwargs
|
||||
):
|
||||
super().__init__()
|
||||
self.nesting_level = nesting_level
|
||||
self.inner_max_num_threads = inner_max_num_threads
|
||||
self.backend_kwargs = backend_kwargs
|
||||
|
||||
MAX_NUM_THREADS_VARS = [
|
||||
"OMP_NUM_THREADS",
|
||||
"OPENBLAS_NUM_THREADS",
|
||||
"MKL_NUM_THREADS",
|
||||
"BLIS_NUM_THREADS",
|
||||
"VECLIB_MAXIMUM_THREADS",
|
||||
"NUMBA_NUM_THREADS",
|
||||
"NUMEXPR_NUM_THREADS",
|
||||
]
|
||||
|
||||
TBB_ENABLE_IPC_VAR = "ENABLE_IPC"
|
||||
|
||||
@abstractmethod
|
||||
def effective_n_jobs(self, n_jobs):
|
||||
"""Determine the number of jobs that can actually run in parallel
|
||||
|
||||
n_jobs is the number of workers requested by the callers. Passing
|
||||
n_jobs=-1 means requesting all available workers for instance matching
|
||||
the number of CPU cores on the worker host(s).
|
||||
|
||||
This method should return a guesstimate of the number of workers that
|
||||
can actually perform work concurrently. The primary use case is to make
|
||||
it possible for the caller to know in how many chunks to slice the
|
||||
work.
|
||||
|
||||
In general working on larger data chunks is more efficient (less
|
||||
scheduling overhead and better use of CPU cache prefetching heuristics)
|
||||
as long as all the workers have enough work to do.
|
||||
"""
|
||||
|
||||
def apply_async(self, func, callback=None):
|
||||
"""Deprecated: implement `submit` instead."""
|
||||
raise NotImplementedError("Implement `submit` instead.")
|
||||
|
||||
def submit(self, func, callback=None):
|
||||
"""Schedule a function to be run and return a future-like object.
|
||||
|
||||
This method should return a future-like object that allow tracking
|
||||
the progress of the task.
|
||||
|
||||
If ``supports_retrieve_callback`` is False, the return value of this
|
||||
method is passed to ``retrieve_result`` instead of calling
|
||||
``retrieve_result_callback``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func: callable
|
||||
The function to be run in parallel.
|
||||
|
||||
callback: callable
|
||||
A callable that will be called when the task is completed. This callable
|
||||
is a wrapper around ``retrieve_result_callback``. This should be added
|
||||
to the future-like object returned by this method, so that the callback
|
||||
is called when the task is completed.
|
||||
|
||||
For future-like backends, this can be achieved with something like
|
||||
``future.add_done_callback(callback)``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
future: future-like
|
||||
A future-like object to track the execution of the submitted function.
|
||||
"""
|
||||
warnings.warn(
|
||||
"`apply_async` is deprecated, implement and use `submit` instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
return self.apply_async(func, callback)
|
||||
|
||||
def retrieve_result_callback(self, out):
|
||||
"""Called within the callback function passed to `submit`.
|
||||
|
||||
This method can customise how the result of the function is retrieved
|
||||
from the future-like object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
future: future-like
|
||||
The future-like object returned by the `submit` method.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result: object
|
||||
The result of the function executed in parallel.
|
||||
"""
|
||||
|
||||
def retrieve_result(self, out, timeout=None):
|
||||
"""Hook to retrieve the result when support_retrieve_callback=False.
|
||||
|
||||
The argument `out` is the result of the `submit` call. This method
|
||||
should return the result of the computation or raise an exception if
|
||||
the computation failed.
|
||||
"""
|
||||
if self.supports_timeout:
|
||||
return out.get(timeout=timeout)
|
||||
else:
|
||||
return out.get()
|
||||
|
||||
def configure(
|
||||
self, n_jobs=1, parallel=None, prefer=None, require=None, **backend_kwargs
|
||||
):
|
||||
"""Reconfigure the backend and return the number of workers.
|
||||
|
||||
This makes it possible to reuse an existing backend instance for
|
||||
successive independent calls to Parallel with different parameters.
|
||||
"""
|
||||
self.parallel = parallel
|
||||
return self.effective_n_jobs(n_jobs)
|
||||
|
||||
def start_call(self):
|
||||
"""Call-back method called at the beginning of a Parallel call"""
|
||||
|
||||
def stop_call(self):
|
||||
"""Call-back method called at the end of a Parallel call"""
|
||||
|
||||
def terminate(self):
|
||||
"""Shutdown the workers and free the shared memory."""
|
||||
|
||||
def compute_batch_size(self):
|
||||
"""Determine the optimal batch size"""
|
||||
return 1
|
||||
|
||||
def batch_completed(self, batch_size, duration):
|
||||
"""Callback indicate how long it took to run a batch"""
|
||||
|
||||
def abort_everything(self, ensure_ready=True):
|
||||
"""Abort any running tasks
|
||||
|
||||
This is called when an exception has been raised when executing a task
|
||||
and all the remaining tasks will be ignored and can therefore be
|
||||
aborted to spare computation resources.
|
||||
|
||||
If ensure_ready is True, the backend should be left in an operating
|
||||
state as future tasks might be re-submitted via that same backend
|
||||
instance.
|
||||
|
||||
If ensure_ready is False, the implementer of this method can decide
|
||||
to leave the backend in a closed / terminated state as no new task
|
||||
are expected to be submitted to this backend.
|
||||
|
||||
Setting ensure_ready to False is an optimization that can be leveraged
|
||||
when aborting tasks via killing processes from a local process pool
|
||||
managed by the backend it-self: if we expect no new tasks, there is no
|
||||
point in re-creating new workers.
|
||||
"""
|
||||
# Does nothing by default: to be overridden in subclasses when
|
||||
# canceling tasks is possible.
|
||||
pass
|
||||
|
||||
def get_nested_backend(self):
|
||||
"""Backend instance to be used by nested Parallel calls.
|
||||
|
||||
By default a thread-based backend is used for the first level of
|
||||
nesting. Beyond, switch to sequential backend to avoid spawning too
|
||||
many threads on the host.
|
||||
"""
|
||||
nesting_level = getattr(self, "nesting_level", 0) + 1
|
||||
if nesting_level > 1:
|
||||
return SequentialBackend(nesting_level=nesting_level), None
|
||||
else:
|
||||
return ThreadingBackend(nesting_level=nesting_level), None
|
||||
|
||||
def _prepare_worker_env(self, n_jobs):
|
||||
"""Return environment variables limiting threadpools in external libs.
|
||||
|
||||
This function return a dict containing environment variables to pass
|
||||
when creating a pool of process. These environment variables limit the
|
||||
number of threads to `n_threads` for OpenMP, MKL, Accelerated and
|
||||
OpenBLAS libraries in the child processes.
|
||||
"""
|
||||
explicit_n_threads = self.inner_max_num_threads
|
||||
default_n_threads = max(cpu_count() // n_jobs, 1)
|
||||
|
||||
# Set the inner environment variables to self.inner_max_num_threads if
|
||||
# it is given. Else, default to cpu_count // n_jobs unless the variable
|
||||
# is already present in the parent process environment.
|
||||
env = {}
|
||||
for var in self.MAX_NUM_THREADS_VARS:
|
||||
if explicit_n_threads is None:
|
||||
var_value = os.environ.get(var, default_n_threads)
|
||||
else:
|
||||
var_value = explicit_n_threads
|
||||
|
||||
env[var] = str(var_value)
|
||||
|
||||
if self.TBB_ENABLE_IPC_VAR not in os.environ:
|
||||
# To avoid over-subscription when using TBB, let the TBB schedulers
|
||||
# use Inter Process Communication to coordinate:
|
||||
env[self.TBB_ENABLE_IPC_VAR] = "1"
|
||||
return env
|
||||
|
||||
@contextlib.contextmanager
|
||||
def retrieval_context(self):
|
||||
"""Context manager to manage an execution context.
|
||||
|
||||
Calls to Parallel.retrieve will be made inside this context.
|
||||
|
||||
By default, this does nothing. It may be useful for subclasses to
|
||||
handle nested parallelism. In particular, it may be required to avoid
|
||||
deadlocks if a backend manages a fixed number of workers, when those
|
||||
workers may be asked to do nested Parallel calls. Without
|
||||
'retrieval_context' this could lead to deadlock, as all the workers
|
||||
managed by the backend may be "busy" waiting for the nested parallel
|
||||
calls to finish, but the backend has no free workers to execute those
|
||||
tasks.
|
||||
"""
|
||||
yield
|
||||
|
||||
@staticmethod
|
||||
def in_main_thread():
|
||||
return isinstance(threading.current_thread(), threading._MainThread)
|
||||
|
||||
|
||||
class SequentialBackend(ParallelBackendBase):
|
||||
"""A ParallelBackend which will execute all batches sequentially.
|
||||
|
||||
Does not use/create any threading objects, and hence has minimal
|
||||
overhead. Used when n_jobs == 1.
|
||||
"""
|
||||
|
||||
uses_threads = True
|
||||
supports_timeout = False
|
||||
supports_retrieve_callback = False
|
||||
supports_sharedmem = True
|
||||
|
||||
def effective_n_jobs(self, n_jobs):
|
||||
"""Determine the number of jobs which are going to run in parallel"""
|
||||
if n_jobs == 0:
|
||||
raise ValueError("n_jobs == 0 in Parallel has no meaning")
|
||||
return 1
|
||||
|
||||
def submit(self, func, callback=None):
|
||||
"""Schedule a func to be run"""
|
||||
raise RuntimeError("Should never be called for SequentialBackend.")
|
||||
|
||||
def retrieve_result_callback(self, out):
|
||||
raise RuntimeError("Should never be called for SequentialBackend.")
|
||||
|
||||
def get_nested_backend(self):
|
||||
# import is not top level to avoid cyclic import errors.
|
||||
from .parallel import get_active_backend
|
||||
|
||||
# SequentialBackend should neither change the nesting level, the
|
||||
# default backend or the number of jobs. Just return the current one.
|
||||
return get_active_backend()
|
||||
|
||||
|
||||
class PoolManagerMixin(object):
|
||||
"""A helper class for managing pool of workers."""
|
||||
|
||||
_pool = None
|
||||
|
||||
def effective_n_jobs(self, n_jobs):
|
||||
"""Determine the number of jobs which are going to run in parallel"""
|
||||
if n_jobs == 0:
|
||||
raise ValueError("n_jobs == 0 in Parallel has no meaning")
|
||||
elif mp is None or n_jobs is None:
|
||||
# multiprocessing is not available or disabled, fallback
|
||||
# to sequential mode
|
||||
return 1
|
||||
elif n_jobs < 0:
|
||||
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
|
||||
return n_jobs
|
||||
|
||||
def terminate(self):
|
||||
"""Shutdown the process or thread pool"""
|
||||
if self._pool is not None:
|
||||
self._pool.close()
|
||||
self._pool.terminate() # terminate does a join()
|
||||
self._pool = None
|
||||
|
||||
def _get_pool(self):
|
||||
"""Used by `submit` to make it possible to implement lazy init"""
|
||||
return self._pool
|
||||
|
||||
def submit(self, func, callback=None):
|
||||
"""Schedule a func to be run"""
|
||||
# Here, we need a wrapper to avoid crashes on KeyboardInterruptErrors.
|
||||
# We also call the callback on error, to make sure the pool does not
|
||||
# wait on crashed jobs.
|
||||
return self._get_pool().apply_async(
|
||||
_TracebackCapturingWrapper(func),
|
||||
(),
|
||||
callback=callback,
|
||||
error_callback=callback,
|
||||
)
|
||||
|
||||
def retrieve_result_callback(self, result):
|
||||
"""Mimic concurrent.futures results, raising an error if needed."""
|
||||
# In the multiprocessing Pool API, the callback are called with the
|
||||
# result value as an argument so `result`(`out`) is the output of
|
||||
# job.get(). It's either the result or the exception raised while
|
||||
# collecting the result.
|
||||
return _retrieve_traceback_capturing_wrapped_call(result)
|
||||
|
||||
def abort_everything(self, ensure_ready=True):
|
||||
"""Shutdown the pool and restart a new one with the same parameters"""
|
||||
self.terminate()
|
||||
if ensure_ready:
|
||||
self.configure(
|
||||
n_jobs=self.parallel.n_jobs,
|
||||
parallel=self.parallel,
|
||||
**self.parallel._backend_kwargs,
|
||||
)
|
||||
|
||||
|
||||
class AutoBatchingMixin(object):
|
||||
"""A helper class for automagically batching jobs."""
|
||||
|
||||
# In seconds, should be big enough to hide multiprocessing dispatching
|
||||
# overhead.
|
||||
# This settings was found by running benchmarks/bench_auto_batching.py
|
||||
# with various parameters on various platforms.
|
||||
MIN_IDEAL_BATCH_DURATION = 0.2
|
||||
|
||||
# Should not be too high to avoid stragglers: long jobs running alone
|
||||
# on a single worker while other workers have no work to process any more.
|
||||
MAX_IDEAL_BATCH_DURATION = 2
|
||||
|
||||
# Batching counters default values
|
||||
_DEFAULT_EFFECTIVE_BATCH_SIZE = 1
|
||||
_DEFAULT_SMOOTHED_BATCH_DURATION = 0.0
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
|
||||
self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
|
||||
|
||||
def compute_batch_size(self):
|
||||
"""Determine the optimal batch size"""
|
||||
old_batch_size = self._effective_batch_size
|
||||
batch_duration = self._smoothed_batch_duration
|
||||
if batch_duration > 0 and batch_duration < self.MIN_IDEAL_BATCH_DURATION:
|
||||
# The current batch size is too small: the duration of the
|
||||
# processing of a batch of task is not large enough to hide
|
||||
# the scheduling overhead.
|
||||
ideal_batch_size = int(
|
||||
old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration
|
||||
)
|
||||
# Multiply by two to limit oscilations between min and max.
|
||||
ideal_batch_size *= 2
|
||||
|
||||
# dont increase the batch size too fast to limit huge batch sizes
|
||||
# potentially leading to starving worker
|
||||
batch_size = min(2 * old_batch_size, ideal_batch_size)
|
||||
|
||||
batch_size = max(batch_size, 1)
|
||||
|
||||
self._effective_batch_size = batch_size
|
||||
if self.parallel.verbose >= 10:
|
||||
self.parallel._print(
|
||||
f"Batch computation too fast ({batch_duration}s.) "
|
||||
f"Setting batch_size={batch_size}."
|
||||
)
|
||||
elif batch_duration > self.MAX_IDEAL_BATCH_DURATION and old_batch_size >= 2:
|
||||
# The current batch size is too big. If we schedule overly long
|
||||
# running batches some CPUs might wait with nothing left to do
|
||||
# while a couple of CPUs a left processing a few long running
|
||||
# batches. Better reduce the batch size a bit to limit the
|
||||
# likelihood of scheduling such stragglers.
|
||||
|
||||
# decrease the batch size quickly to limit potential starving
|
||||
ideal_batch_size = int(
|
||||
old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration
|
||||
)
|
||||
# Multiply by two to limit oscilations between min and max.
|
||||
batch_size = max(2 * ideal_batch_size, 1)
|
||||
self._effective_batch_size = batch_size
|
||||
if self.parallel.verbose >= 10:
|
||||
self.parallel._print(
|
||||
f"Batch computation too slow ({batch_duration}s.) "
|
||||
f"Setting batch_size={batch_size}."
|
||||
)
|
||||
else:
|
||||
# No batch size adjustment
|
||||
batch_size = old_batch_size
|
||||
|
||||
if batch_size != old_batch_size:
|
||||
# Reset estimation of the smoothed mean batch duration: this
|
||||
# estimate is updated in the multiprocessing apply_async
|
||||
# CallBack as long as the batch_size is constant. Therefore
|
||||
# we need to reset the estimate whenever we re-tune the batch
|
||||
# size.
|
||||
self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
|
||||
|
||||
return batch_size
|
||||
|
||||
def batch_completed(self, batch_size, duration):
|
||||
"""Callback indicate how long it took to run a batch"""
|
||||
if batch_size == self._effective_batch_size:
|
||||
# Update the smoothed streaming estimate of the duration of a batch
|
||||
# from dispatch to completion
|
||||
old_duration = self._smoothed_batch_duration
|
||||
if old_duration == self._DEFAULT_SMOOTHED_BATCH_DURATION:
|
||||
# First record of duration for this batch size after the last
|
||||
# reset.
|
||||
new_duration = duration
|
||||
else:
|
||||
# Update the exponentially weighted average of the duration of
|
||||
# batch for the current effective size.
|
||||
new_duration = 0.8 * old_duration + 0.2 * duration
|
||||
self._smoothed_batch_duration = new_duration
|
||||
|
||||
def reset_batch_stats(self):
|
||||
"""Reset batch statistics to default values.
|
||||
|
||||
This avoids interferences with future jobs.
|
||||
"""
|
||||
self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
|
||||
self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
|
||||
|
||||
|
||||
class ThreadingBackend(PoolManagerMixin, ParallelBackendBase):
|
||||
"""A ParallelBackend which will use a thread pool to execute batches in.
|
||||
|
||||
This is a low-overhead backend but it suffers from the Python Global
|
||||
Interpreter Lock if the called function relies a lot on Python objects.
|
||||
Mostly useful when the execution bottleneck is a compiled extension that
|
||||
explicitly releases the GIL (for instance a Cython loop wrapped in a "with
|
||||
nogil" block or an expensive call to a library such as NumPy).
|
||||
|
||||
The actual thread pool is lazily initialized: the actual thread pool
|
||||
construction is delayed to the first call to apply_async.
|
||||
|
||||
ThreadingBackend is used as the default backend for nested calls.
|
||||
"""
|
||||
|
||||
supports_retrieve_callback = True
|
||||
uses_threads = True
|
||||
supports_sharedmem = True
|
||||
|
||||
def configure(self, n_jobs=1, parallel=None, **backend_kwargs):
|
||||
"""Build a process or thread pool and return the number of workers"""
|
||||
n_jobs = self.effective_n_jobs(n_jobs)
|
||||
if n_jobs == 1:
|
||||
# Avoid unnecessary overhead and use sequential backend instead.
|
||||
raise FallbackToBackend(SequentialBackend(nesting_level=self.nesting_level))
|
||||
self.parallel = parallel
|
||||
self._n_jobs = n_jobs
|
||||
return n_jobs
|
||||
|
||||
def _get_pool(self):
|
||||
"""Lazily initialize the thread pool
|
||||
|
||||
The actual pool of worker threads is only initialized at the first
|
||||
call to apply_async.
|
||||
"""
|
||||
if self._pool is None:
|
||||
self._pool = ThreadPool(self._n_jobs)
|
||||
return self._pool
|
||||
|
||||
|
||||
class MultiprocessingBackend(PoolManagerMixin, AutoBatchingMixin, ParallelBackendBase):
|
||||
"""A ParallelBackend which will use a multiprocessing.Pool.
|
||||
|
||||
Will introduce some communication and memory overhead when exchanging
|
||||
input and output data with the with the worker Python processes.
|
||||
However, does not suffer from the Python Global Interpreter Lock.
|
||||
"""
|
||||
|
||||
supports_retrieve_callback = True
|
||||
supports_return_generator = False
|
||||
|
||||
def effective_n_jobs(self, n_jobs):
|
||||
"""Determine the number of jobs which are going to run in parallel.
|
||||
|
||||
This also checks if we are attempting to create a nested parallel
|
||||
loop.
|
||||
"""
|
||||
if mp is None:
|
||||
return 1
|
||||
|
||||
if mp.current_process().daemon:
|
||||
# Daemonic processes cannot have children
|
||||
if n_jobs != 1:
|
||||
if inside_dask_worker():
|
||||
msg = (
|
||||
"Inside a Dask worker with daemon=True, "
|
||||
"setting n_jobs=1.\nPossible work-arounds:\n"
|
||||
"- dask.config.set("
|
||||
"{'distributed.worker.daemon': False})"
|
||||
"- set the environment variable "
|
||||
"DASK_DISTRIBUTED__WORKER__DAEMON=False\n"
|
||||
"before creating your Dask cluster."
|
||||
)
|
||||
else:
|
||||
msg = (
|
||||
"Multiprocessing-backed parallel loops "
|
||||
"cannot be nested, setting n_jobs=1"
|
||||
)
|
||||
warnings.warn(msg, stacklevel=3)
|
||||
return 1
|
||||
|
||||
if process_executor._CURRENT_DEPTH > 0:
|
||||
# Mixing loky and multiprocessing in nested loop is not supported
|
||||
if n_jobs != 1:
|
||||
warnings.warn(
|
||||
"Multiprocessing-backed parallel loops cannot be nested,"
|
||||
" below loky, setting n_jobs=1",
|
||||
stacklevel=3,
|
||||
)
|
||||
return 1
|
||||
|
||||
elif not (self.in_main_thread() or self.nesting_level == 0):
|
||||
# Prevent posix fork inside in non-main posix threads
|
||||
if n_jobs != 1:
|
||||
warnings.warn(
|
||||
"Multiprocessing-backed parallel loops cannot be nested"
|
||||
" below threads, setting n_jobs=1",
|
||||
stacklevel=3,
|
||||
)
|
||||
return 1
|
||||
|
||||
return super(MultiprocessingBackend, self).effective_n_jobs(n_jobs)
|
||||
|
||||
def configure(
|
||||
self,
|
||||
n_jobs=1,
|
||||
parallel=None,
|
||||
prefer=None,
|
||||
require=None,
|
||||
**memmapping_pool_kwargs,
|
||||
):
|
||||
"""Build a process or thread pool and return the number of workers"""
|
||||
n_jobs = self.effective_n_jobs(n_jobs)
|
||||
if n_jobs == 1:
|
||||
raise FallbackToBackend(SequentialBackend(nesting_level=self.nesting_level))
|
||||
|
||||
memmapping_pool_kwargs = {
|
||||
**self.backend_kwargs,
|
||||
**memmapping_pool_kwargs,
|
||||
}
|
||||
|
||||
# Make sure to free as much memory as possible before forking
|
||||
gc.collect()
|
||||
self._pool = MemmappingPool(n_jobs, **memmapping_pool_kwargs)
|
||||
self.parallel = parallel
|
||||
return n_jobs
|
||||
|
||||
def terminate(self):
|
||||
"""Shutdown the process or thread pool"""
|
||||
super(MultiprocessingBackend, self).terminate()
|
||||
self.reset_batch_stats()
|
||||
|
||||
|
||||
class LokyBackend(AutoBatchingMixin, ParallelBackendBase):
|
||||
"""Managing pool of workers with loky instead of multiprocessing."""
|
||||
|
||||
supports_retrieve_callback = True
|
||||
supports_inner_max_num_threads = True
|
||||
|
||||
def configure(
|
||||
self,
|
||||
n_jobs=1,
|
||||
parallel=None,
|
||||
prefer=None,
|
||||
require=None,
|
||||
idle_worker_timeout=None,
|
||||
**memmapping_executor_kwargs,
|
||||
):
|
||||
"""Build a process executor and return the number of workers"""
|
||||
n_jobs = self.effective_n_jobs(n_jobs)
|
||||
if n_jobs == 1:
|
||||
raise FallbackToBackend(SequentialBackend(nesting_level=self.nesting_level))
|
||||
|
||||
memmapping_executor_kwargs = {
|
||||
**self.backend_kwargs,
|
||||
**memmapping_executor_kwargs,
|
||||
}
|
||||
|
||||
# Prohibit the use of 'timeout' in the LokyBackend, as 'idle_worker_timeout'
|
||||
# better describes the backend's behavior.
|
||||
if "timeout" in memmapping_executor_kwargs:
|
||||
raise ValueError(
|
||||
"The 'timeout' parameter is not supported by the LokyBackend. "
|
||||
"Please use the `idle_worker_timeout` parameter instead."
|
||||
)
|
||||
if idle_worker_timeout is None:
|
||||
idle_worker_timeout = self.backend_kwargs.get("idle_worker_timeout", 300)
|
||||
|
||||
self._workers = get_memmapping_executor(
|
||||
n_jobs,
|
||||
timeout=idle_worker_timeout,
|
||||
env=self._prepare_worker_env(n_jobs=n_jobs),
|
||||
context_id=parallel._id,
|
||||
**memmapping_executor_kwargs,
|
||||
)
|
||||
self.parallel = parallel
|
||||
return n_jobs
|
||||
|
||||
def effective_n_jobs(self, n_jobs):
|
||||
"""Determine the number of jobs which are going to run in parallel"""
|
||||
if n_jobs == 0:
|
||||
raise ValueError("n_jobs == 0 in Parallel has no meaning")
|
||||
elif mp is None or n_jobs is None:
|
||||
# multiprocessing is not available or disabled, fallback
|
||||
# to sequential mode
|
||||
return 1
|
||||
elif mp.current_process().daemon:
|
||||
# Daemonic processes cannot have children
|
||||
if n_jobs != 1:
|
||||
if inside_dask_worker():
|
||||
msg = (
|
||||
"Inside a Dask worker with daemon=True, "
|
||||
"setting n_jobs=1.\nPossible work-arounds:\n"
|
||||
"- dask.config.set("
|
||||
"{'distributed.worker.daemon': False})\n"
|
||||
"- set the environment variable "
|
||||
"DASK_DISTRIBUTED__WORKER__DAEMON=False\n"
|
||||
"before creating your Dask cluster."
|
||||
)
|
||||
else:
|
||||
msg = (
|
||||
"Loky-backed parallel loops cannot be called in a"
|
||||
" multiprocessing, setting n_jobs=1"
|
||||
)
|
||||
warnings.warn(msg, stacklevel=3)
|
||||
|
||||
return 1
|
||||
elif not (self.in_main_thread() or self.nesting_level == 0):
|
||||
# Prevent posix fork inside in non-main posix threads
|
||||
if n_jobs != 1:
|
||||
warnings.warn(
|
||||
"Loky-backed parallel loops cannot be nested below "
|
||||
"threads, setting n_jobs=1",
|
||||
stacklevel=3,
|
||||
)
|
||||
return 1
|
||||
elif n_jobs < 0:
|
||||
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
|
||||
return n_jobs
|
||||
|
||||
def submit(self, func, callback=None):
|
||||
"""Schedule a func to be run"""
|
||||
future = self._workers.submit(func)
|
||||
if callback is not None:
|
||||
future.add_done_callback(callback)
|
||||
return future
|
||||
|
||||
def retrieve_result_callback(self, future):
|
||||
"""Retrieve the result, here out is the future given by submit"""
|
||||
try:
|
||||
return future.result()
|
||||
except ShutdownExecutorError:
|
||||
raise RuntimeError(
|
||||
"The executor underlying Parallel has been shutdown. "
|
||||
"This is likely due to the garbage collection of a previous "
|
||||
"generator from a call to Parallel with return_as='generator'."
|
||||
" Make sure the generator is not garbage collected when "
|
||||
"submitting a new job or that it is first properly exhausted."
|
||||
)
|
||||
|
||||
def terminate(self):
|
||||
if self._workers is not None:
|
||||
# Don't terminate the workers as we want to reuse them in later
|
||||
# calls, but cleanup the temporary resources that the Parallel call
|
||||
# created. This 'hack' requires a private, low-level operation.
|
||||
self._workers._temp_folder_manager._clean_temporary_resources(
|
||||
context_id=self.parallel._id, force=False
|
||||
)
|
||||
self._workers = None
|
||||
|
||||
self.reset_batch_stats()
|
||||
|
||||
def abort_everything(self, ensure_ready=True):
|
||||
"""Shutdown the workers and restart a new one with the same parameters"""
|
||||
self._workers.terminate(kill_workers=True)
|
||||
self._workers = None
|
||||
|
||||
if ensure_ready:
|
||||
self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel)
|
||||
|
||||
|
||||
class FallbackToBackend(Exception):
|
||||
"""Raised when configuration should fallback to another backend"""
|
||||
|
||||
def __init__(self, backend):
|
||||
self.backend = backend
|
||||
|
||||
|
||||
def inside_dask_worker():
|
||||
"""Check whether the current function is executed inside a Dask worker."""
|
||||
# This function can not be in joblib._dask because there would be a
|
||||
# circular import:
|
||||
# _dask imports _parallel_backend that imports _dask ...
|
||||
try:
|
||||
from distributed import get_worker
|
||||
except ImportError:
|
||||
return False
|
||||
|
||||
try:
|
||||
get_worker()
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
@@ -0,0 +1,495 @@
|
||||
"""Storage providers backends for Memory caching."""
|
||||
|
||||
import collections
|
||||
import datetime
|
||||
import json
|
||||
import operator
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import shutil
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
import warnings
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from pickle import PicklingError
|
||||
|
||||
from . import numpy_pickle
|
||||
from .backports import concurrency_safe_rename
|
||||
from .disk import memstr_to_bytes, mkdirp, rm_subdirs
|
||||
from .logger import format_time
|
||||
|
||||
CacheItemInfo = collections.namedtuple("CacheItemInfo", "path size last_access")
|
||||
|
||||
|
||||
class CacheWarning(Warning):
|
||||
"""Warning to capture dump failures except for PicklingError."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def concurrency_safe_write(object_to_write, filename, write_func):
|
||||
"""Writes an object into a unique file in a concurrency-safe way."""
|
||||
# Temporary name is composed of UUID, process_id and thread_id to avoid
|
||||
# collisions due to concurrent write.
|
||||
# UUID is unique across nodes and time and help avoid collisions, even if
|
||||
# the cache folder is shared by several Python processes with the same pid and
|
||||
# thread id on different nodes of a cluster for instance.
|
||||
thread_id = id(threading.current_thread())
|
||||
temporary_filename = f"{filename}.{uuid.uuid4().hex}-{os.getpid()}-{thread_id}"
|
||||
|
||||
write_func(object_to_write, temporary_filename)
|
||||
|
||||
return temporary_filename
|
||||
|
||||
|
||||
class StoreBackendBase(metaclass=ABCMeta):
|
||||
"""Helper Abstract Base Class which defines all methods that
|
||||
a StorageBackend must implement."""
|
||||
|
||||
location = None
|
||||
|
||||
@abstractmethod
|
||||
def _open_item(self, f, mode):
|
||||
"""Opens an item on the store and return a file-like object.
|
||||
|
||||
This method is private and only used by the StoreBackendMixin object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
f: a file-like object
|
||||
The file-like object where an item is stored and retrieved
|
||||
mode: string, optional
|
||||
the mode in which the file-like object is opened allowed valued are
|
||||
'rb', 'wb'
|
||||
|
||||
Returns
|
||||
-------
|
||||
a file-like object
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def _item_exists(self, location):
|
||||
"""Checks if an item location exists in the store.
|
||||
|
||||
This method is private and only used by the StoreBackendMixin object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
location: string
|
||||
The location of an item. On a filesystem, this corresponds to the
|
||||
absolute path, including the filename, of a file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
True if the item exists, False otherwise
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def _move_item(self, src, dst):
|
||||
"""Moves an item from src to dst in the store.
|
||||
|
||||
This method is private and only used by the StoreBackendMixin object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
src: string
|
||||
The source location of an item
|
||||
dst: string
|
||||
The destination location of an item
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def create_location(self, location):
|
||||
"""Creates a location on the store.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
location: string
|
||||
The location in the store. On a filesystem, this corresponds to a
|
||||
directory.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def clear_location(self, location):
|
||||
"""Clears a location on the store.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
location: string
|
||||
The location in the store. On a filesystem, this corresponds to a
|
||||
directory or a filename absolute path
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_items(self):
|
||||
"""Returns the whole list of items available in the store.
|
||||
|
||||
Returns
|
||||
-------
|
||||
The list of items identified by their ids (e.g filename in a
|
||||
filesystem).
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def configure(self, location, verbose=0, backend_options=dict()):
|
||||
"""Configures the store.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
location: string
|
||||
The base location used by the store. On a filesystem, this
|
||||
corresponds to a directory.
|
||||
verbose: int
|
||||
The level of verbosity of the store
|
||||
backend_options: dict
|
||||
Contains a dictionary of named parameters used to configure the
|
||||
store backend.
|
||||
"""
|
||||
|
||||
|
||||
class StoreBackendMixin(object):
|
||||
"""Class providing all logic for managing the store in a generic way.
|
||||
|
||||
The StoreBackend subclass has to implement 3 methods: create_location,
|
||||
clear_location and configure. The StoreBackend also has to provide
|
||||
a private _open_item, _item_exists and _move_item methods. The _open_item
|
||||
method has to have the same signature as the builtin open and return a
|
||||
file-like object.
|
||||
"""
|
||||
|
||||
def load_item(self, call_id, verbose=1, timestamp=None, metadata=None):
|
||||
"""Load an item from the store given its id as a list of str."""
|
||||
full_path = os.path.join(self.location, *call_id)
|
||||
|
||||
if verbose > 1:
|
||||
ts_string = (
|
||||
"{: <16}".format(format_time(time.time() - timestamp))
|
||||
if timestamp is not None
|
||||
else ""
|
||||
)
|
||||
signature = os.path.basename(call_id[0])
|
||||
if metadata is not None and "input_args" in metadata:
|
||||
kwargs = ", ".join(
|
||||
"{}={}".format(*item) for item in metadata["input_args"].items()
|
||||
)
|
||||
signature += "({})".format(kwargs)
|
||||
msg = "[Memory]{}: Loading {}".format(ts_string, signature)
|
||||
if verbose < 10:
|
||||
print("{0}...".format(msg))
|
||||
else:
|
||||
print("{0} from {1}".format(msg, full_path))
|
||||
|
||||
mmap_mode = None if not hasattr(self, "mmap_mode") else self.mmap_mode
|
||||
|
||||
filename = os.path.join(full_path, "output.pkl")
|
||||
if not self._item_exists(filename):
|
||||
raise KeyError(
|
||||
"Non-existing item (may have been "
|
||||
"cleared).\nFile %s does not exist" % filename
|
||||
)
|
||||
|
||||
# file-like object cannot be used when mmap_mode is set
|
||||
if mmap_mode is None:
|
||||
with self._open_item(filename, "rb") as f:
|
||||
item = numpy_pickle.load(f)
|
||||
else:
|
||||
item = numpy_pickle.load(filename, mmap_mode=mmap_mode)
|
||||
return item
|
||||
|
||||
def dump_item(self, call_id, item, verbose=1):
|
||||
"""Dump an item in the store at the id given as a list of str."""
|
||||
try:
|
||||
item_path = os.path.join(self.location, *call_id)
|
||||
if not self._item_exists(item_path):
|
||||
self.create_location(item_path)
|
||||
filename = os.path.join(item_path, "output.pkl")
|
||||
if verbose > 10:
|
||||
print("Persisting in %s" % item_path)
|
||||
|
||||
def write_func(to_write, dest_filename):
|
||||
with self._open_item(dest_filename, "wb") as f:
|
||||
try:
|
||||
numpy_pickle.dump(to_write, f, compress=self.compress)
|
||||
except PicklingError as e:
|
||||
# TODO(1.5) turn into error
|
||||
warnings.warn(
|
||||
"Unable to cache to disk: failed to pickle "
|
||||
"output. In version 1.5 this will raise an "
|
||||
f"exception. Exception: {e}.",
|
||||
FutureWarning,
|
||||
)
|
||||
|
||||
self._concurrency_safe_write(item, filename, write_func)
|
||||
except Exception as e: # noqa: E722
|
||||
warnings.warn(
|
||||
"Unable to cache to disk. Possibly a race condition in the "
|
||||
f"creation of the directory. Exception: {e}.",
|
||||
CacheWarning,
|
||||
)
|
||||
|
||||
def clear_item(self, call_id):
|
||||
"""Clear the item at the id, given as a list of str."""
|
||||
item_path = os.path.join(self.location, *call_id)
|
||||
if self._item_exists(item_path):
|
||||
self.clear_location(item_path)
|
||||
|
||||
def contains_item(self, call_id):
|
||||
"""Check if there is an item at the id, given as a list of str."""
|
||||
item_path = os.path.join(self.location, *call_id)
|
||||
filename = os.path.join(item_path, "output.pkl")
|
||||
|
||||
return self._item_exists(filename)
|
||||
|
||||
def get_item_info(self, call_id):
|
||||
"""Return information about item."""
|
||||
return {"location": os.path.join(self.location, *call_id)}
|
||||
|
||||
def get_metadata(self, call_id):
|
||||
"""Return actual metadata of an item."""
|
||||
try:
|
||||
item_path = os.path.join(self.location, *call_id)
|
||||
filename = os.path.join(item_path, "metadata.json")
|
||||
with self._open_item(filename, "rb") as f:
|
||||
return json.loads(f.read().decode("utf-8"))
|
||||
except: # noqa: E722
|
||||
return {}
|
||||
|
||||
def store_metadata(self, call_id, metadata):
|
||||
"""Store metadata of a computation."""
|
||||
try:
|
||||
item_path = os.path.join(self.location, *call_id)
|
||||
self.create_location(item_path)
|
||||
filename = os.path.join(item_path, "metadata.json")
|
||||
|
||||
def write_func(to_write, dest_filename):
|
||||
with self._open_item(dest_filename, "wb") as f:
|
||||
f.write(json.dumps(to_write).encode("utf-8"))
|
||||
|
||||
self._concurrency_safe_write(metadata, filename, write_func)
|
||||
except: # noqa: E722
|
||||
pass
|
||||
|
||||
def contains_path(self, call_id):
|
||||
"""Check cached function is available in store."""
|
||||
func_path = os.path.join(self.location, *call_id)
|
||||
return self.object_exists(func_path)
|
||||
|
||||
def clear_path(self, call_id):
|
||||
"""Clear all items with a common path in the store."""
|
||||
func_path = os.path.join(self.location, *call_id)
|
||||
if self._item_exists(func_path):
|
||||
self.clear_location(func_path)
|
||||
|
||||
def store_cached_func_code(self, call_id, func_code=None):
|
||||
"""Store the code of the cached function."""
|
||||
func_path = os.path.join(self.location, *call_id)
|
||||
if not self._item_exists(func_path):
|
||||
self.create_location(func_path)
|
||||
|
||||
if func_code is not None:
|
||||
filename = os.path.join(func_path, "func_code.py")
|
||||
with self._open_item(filename, "wb") as f:
|
||||
f.write(func_code.encode("utf-8"))
|
||||
|
||||
def get_cached_func_code(self, call_id):
|
||||
"""Store the code of the cached function."""
|
||||
filename = os.path.join(self.location, *call_id, "func_code.py")
|
||||
try:
|
||||
with self._open_item(filename, "rb") as f:
|
||||
return f.read().decode("utf-8")
|
||||
except: # noqa: E722
|
||||
raise
|
||||
|
||||
def get_cached_func_info(self, call_id):
|
||||
"""Return information related to the cached function if it exists."""
|
||||
return {"location": os.path.join(self.location, *call_id)}
|
||||
|
||||
def clear(self):
|
||||
"""Clear the whole store content."""
|
||||
self.clear_location(self.location)
|
||||
|
||||
def enforce_store_limits(self, bytes_limit, items_limit=None, age_limit=None):
|
||||
"""
|
||||
Remove the store's oldest files to enforce item, byte, and age limits.
|
||||
"""
|
||||
items_to_delete = self._get_items_to_delete(bytes_limit, items_limit, age_limit)
|
||||
|
||||
for item in items_to_delete:
|
||||
if self.verbose > 10:
|
||||
print("Deleting item {0}".format(item))
|
||||
try:
|
||||
self.clear_location(item.path)
|
||||
except OSError:
|
||||
# Even with ignore_errors=True shutil.rmtree can raise OSError
|
||||
# with:
|
||||
# [Errno 116] Stale file handle if another process has deleted
|
||||
# the folder already.
|
||||
pass
|
||||
|
||||
def _get_items_to_delete(self, bytes_limit, items_limit=None, age_limit=None):
|
||||
"""
|
||||
Get items to delete to keep the store under size, file, & age limits.
|
||||
"""
|
||||
if isinstance(bytes_limit, str):
|
||||
bytes_limit = memstr_to_bytes(bytes_limit)
|
||||
|
||||
items = self.get_items()
|
||||
if not items:
|
||||
return []
|
||||
|
||||
size = sum(item.size for item in items)
|
||||
|
||||
if bytes_limit is not None:
|
||||
to_delete_size = size - bytes_limit
|
||||
else:
|
||||
to_delete_size = 0
|
||||
|
||||
if items_limit is not None:
|
||||
to_delete_items = len(items) - items_limit
|
||||
else:
|
||||
to_delete_items = 0
|
||||
|
||||
if age_limit is not None:
|
||||
older_item = min(item.last_access for item in items)
|
||||
if age_limit.total_seconds() < 0:
|
||||
raise ValueError("age_limit has to be a positive timedelta")
|
||||
deadline = datetime.datetime.now() - age_limit
|
||||
else:
|
||||
deadline = None
|
||||
|
||||
if (
|
||||
to_delete_size <= 0
|
||||
and to_delete_items <= 0
|
||||
and (deadline is None or older_item > deadline)
|
||||
):
|
||||
return []
|
||||
|
||||
# We want to delete first the cache items that were accessed a
|
||||
# long time ago
|
||||
items.sort(key=operator.attrgetter("last_access"))
|
||||
|
||||
items_to_delete = []
|
||||
size_so_far = 0
|
||||
items_so_far = 0
|
||||
|
||||
for item in items:
|
||||
if (
|
||||
(size_so_far >= to_delete_size)
|
||||
and items_so_far >= to_delete_items
|
||||
and (deadline is None or deadline < item.last_access)
|
||||
):
|
||||
break
|
||||
|
||||
items_to_delete.append(item)
|
||||
size_so_far += item.size
|
||||
items_so_far += 1
|
||||
|
||||
return items_to_delete
|
||||
|
||||
def _concurrency_safe_write(self, to_write, filename, write_func):
|
||||
"""Writes an object into a file in a concurrency-safe way."""
|
||||
temporary_filename = concurrency_safe_write(to_write, filename, write_func)
|
||||
self._move_item(temporary_filename, filename)
|
||||
|
||||
def __repr__(self):
|
||||
"""Printable representation of the store location."""
|
||||
return '{class_name}(location="{location}")'.format(
|
||||
class_name=self.__class__.__name__, location=self.location
|
||||
)
|
||||
|
||||
|
||||
class FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin):
|
||||
"""A StoreBackend used with local or network file systems."""
|
||||
|
||||
_open_item = staticmethod(open)
|
||||
_item_exists = staticmethod(os.path.exists)
|
||||
_move_item = staticmethod(concurrency_safe_rename)
|
||||
|
||||
def clear_location(self, location):
|
||||
"""Delete location on store."""
|
||||
if location == self.location:
|
||||
rm_subdirs(location)
|
||||
else:
|
||||
shutil.rmtree(location, ignore_errors=True)
|
||||
|
||||
def create_location(self, location):
|
||||
"""Create object location on store"""
|
||||
mkdirp(location)
|
||||
|
||||
def get_items(self):
|
||||
"""Returns the whole list of items available in the store."""
|
||||
items = []
|
||||
|
||||
for dirpath, _, filenames in os.walk(self.location):
|
||||
is_cache_hash_dir = re.match("[a-f0-9]{32}", os.path.basename(dirpath))
|
||||
|
||||
if is_cache_hash_dir:
|
||||
output_filename = os.path.join(dirpath, "output.pkl")
|
||||
try:
|
||||
last_access = os.path.getatime(output_filename)
|
||||
except OSError:
|
||||
try:
|
||||
last_access = os.path.getatime(dirpath)
|
||||
except OSError:
|
||||
# The directory has already been deleted
|
||||
continue
|
||||
|
||||
last_access = datetime.datetime.fromtimestamp(last_access)
|
||||
try:
|
||||
full_filenames = [os.path.join(dirpath, fn) for fn in filenames]
|
||||
dirsize = sum(os.path.getsize(fn) for fn in full_filenames)
|
||||
except OSError:
|
||||
# Either output_filename or one of the files in
|
||||
# dirpath does not exist any more. We assume this
|
||||
# directory is being cleaned by another process already
|
||||
continue
|
||||
|
||||
items.append(CacheItemInfo(dirpath, dirsize, last_access))
|
||||
|
||||
return items
|
||||
|
||||
def configure(self, location, verbose=1, backend_options=None):
|
||||
"""Configure the store backend.
|
||||
|
||||
For this backend, valid store options are 'compress' and 'mmap_mode'
|
||||
"""
|
||||
if backend_options is None:
|
||||
backend_options = {}
|
||||
|
||||
# setup location directory
|
||||
self.location = location
|
||||
if not os.path.exists(self.location):
|
||||
mkdirp(self.location)
|
||||
|
||||
# Automatically add `.gitignore` file to the cache folder.
|
||||
# XXX: the condition is necessary because in `Memory.__init__`, the user
|
||||
# passed `location` param is modified to be either `{location}` or
|
||||
# `{location}/joblib` depending on input type (`pathlib.Path` vs `str`).
|
||||
# The proper resolution of this inconsistency is tracked in:
|
||||
# https://github.com/joblib/joblib/issues/1684
|
||||
cache_directory = (
|
||||
os.path.dirname(location)
|
||||
if os.path.dirname(location) and os.path.basename(location) == "joblib"
|
||||
else location
|
||||
)
|
||||
with open(os.path.join(cache_directory, ".gitignore"), "w") as file:
|
||||
file.write("# Created by joblib automatically.\n")
|
||||
file.write("*\n")
|
||||
|
||||
# item can be stored compressed for faster I/O
|
||||
self.compress = backend_options.get("compress", False)
|
||||
|
||||
# FileSystemStoreBackend can be used with mmap_mode options under
|
||||
# certain conditions.
|
||||
mmap_mode = backend_options.get("mmap_mode")
|
||||
if self.compress and mmap_mode is not None:
|
||||
warnings.warn(
|
||||
"Compressed items cannot be memmapped in a "
|
||||
"filesystem store. Option will be ignored.",
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
self.mmap_mode = mmap_mode
|
||||
self.verbose = verbose
|
||||
83
Backend/venv/lib/python3.12/site-packages/joblib/_utils.py
Normal file
83
Backend/venv/lib/python3.12/site-packages/joblib/_utils.py
Normal file
@@ -0,0 +1,83 @@
|
||||
# Adapted from https://stackoverflow.com/a/9558001/2536294
|
||||
|
||||
import ast
|
||||
import operator as op
|
||||
from dataclasses import dataclass
|
||||
|
||||
from ._multiprocessing_helpers import mp
|
||||
|
||||
if mp is not None:
|
||||
from .externals.loky.process_executor import _ExceptionWithTraceback
|
||||
|
||||
|
||||
# supported operators
|
||||
operators = {
|
||||
ast.Add: op.add,
|
||||
ast.Sub: op.sub,
|
||||
ast.Mult: op.mul,
|
||||
ast.Div: op.truediv,
|
||||
ast.FloorDiv: op.floordiv,
|
||||
ast.Mod: op.mod,
|
||||
ast.Pow: op.pow,
|
||||
ast.USub: op.neg,
|
||||
}
|
||||
|
||||
|
||||
def eval_expr(expr):
|
||||
"""
|
||||
>>> eval_expr('2*6')
|
||||
12
|
||||
>>> eval_expr('2**6')
|
||||
64
|
||||
>>> eval_expr('1 + 2*3**(4) / (6 + -7)')
|
||||
-161.0
|
||||
"""
|
||||
try:
|
||||
return eval_(ast.parse(expr, mode="eval").body)
|
||||
except (TypeError, SyntaxError, KeyError) as e:
|
||||
raise ValueError(
|
||||
f"{expr!r} is not a valid or supported arithmetic expression."
|
||||
) from e
|
||||
|
||||
|
||||
def eval_(node):
|
||||
if isinstance(node, ast.Constant): # <constant>
|
||||
return node.value
|
||||
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
|
||||
return operators[type(node.op)](eval_(node.left), eval_(node.right))
|
||||
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
|
||||
return operators[type(node.op)](eval_(node.operand))
|
||||
else:
|
||||
raise TypeError(node)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class _Sentinel:
|
||||
"""A sentinel to mark a parameter as not explicitly set"""
|
||||
|
||||
default_value: object
|
||||
|
||||
def __repr__(self):
|
||||
return f"default({self.default_value!r})"
|
||||
|
||||
|
||||
class _TracebackCapturingWrapper:
|
||||
"""Protect function call and return error with traceback."""
|
||||
|
||||
def __init__(self, func):
|
||||
self.func = func
|
||||
|
||||
def __call__(self, **kwargs):
|
||||
try:
|
||||
return self.func(**kwargs)
|
||||
except BaseException as e:
|
||||
return _ExceptionWithTraceback(e)
|
||||
|
||||
|
||||
def _retrieve_traceback_capturing_wrapped_call(out):
|
||||
if isinstance(out, _ExceptionWithTraceback):
|
||||
rebuild, args = out.__reduce__()
|
||||
out = rebuild(*args)
|
||||
if isinstance(out, BaseException):
|
||||
raise out
|
||||
return out
|
||||
195
Backend/venv/lib/python3.12/site-packages/joblib/backports.py
Normal file
195
Backend/venv/lib/python3.12/site-packages/joblib/backports.py
Normal file
@@ -0,0 +1,195 @@
|
||||
"""
|
||||
Backports of fixes for joblib dependencies
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from multiprocessing import util
|
||||
from os.path import basename
|
||||
|
||||
|
||||
class Version:
|
||||
"""Backport from deprecated distutils
|
||||
|
||||
We maintain this backport to avoid introducing a new dependency on
|
||||
`packaging`.
|
||||
|
||||
We might rexplore this choice in the future if all major Python projects
|
||||
introduce a dependency on packaging anyway.
|
||||
"""
|
||||
|
||||
def __init__(self, vstring=None):
|
||||
if vstring:
|
||||
self.parse(vstring)
|
||||
|
||||
def __repr__(self):
|
||||
return "%s ('%s')" % (self.__class__.__name__, str(self))
|
||||
|
||||
def __eq__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c == 0
|
||||
|
||||
def __lt__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c < 0
|
||||
|
||||
def __le__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c <= 0
|
||||
|
||||
def __gt__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c > 0
|
||||
|
||||
def __ge__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c >= 0
|
||||
|
||||
|
||||
class LooseVersion(Version):
|
||||
"""Backport from deprecated distutils
|
||||
|
||||
We maintain this backport to avoid introducing a new dependency on
|
||||
`packaging`.
|
||||
|
||||
We might rexplore this choice in the future if all major Python projects
|
||||
introduce a dependency on packaging anyway.
|
||||
"""
|
||||
|
||||
component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE)
|
||||
|
||||
def __init__(self, vstring=None):
|
||||
if vstring:
|
||||
self.parse(vstring)
|
||||
|
||||
def parse(self, vstring):
|
||||
# I've given up on thinking I can reconstruct the version string
|
||||
# from the parsed tuple -- so I just store the string here for
|
||||
# use by __str__
|
||||
self.vstring = vstring
|
||||
components = [x for x in self.component_re.split(vstring) if x and x != "."]
|
||||
for i, obj in enumerate(components):
|
||||
try:
|
||||
components[i] = int(obj)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
self.version = components
|
||||
|
||||
def __str__(self):
|
||||
return self.vstring
|
||||
|
||||
def __repr__(self):
|
||||
return "LooseVersion ('%s')" % str(self)
|
||||
|
||||
def _cmp(self, other):
|
||||
if isinstance(other, str):
|
||||
other = LooseVersion(other)
|
||||
elif not isinstance(other, LooseVersion):
|
||||
return NotImplemented
|
||||
|
||||
if self.version == other.version:
|
||||
return 0
|
||||
if self.version < other.version:
|
||||
return -1
|
||||
if self.version > other.version:
|
||||
return 1
|
||||
|
||||
|
||||
try:
|
||||
import numpy as np
|
||||
|
||||
def make_memmap(
|
||||
filename,
|
||||
dtype="uint8",
|
||||
mode="r+",
|
||||
offset=0,
|
||||
shape=None,
|
||||
order="C",
|
||||
unlink_on_gc_collect=False,
|
||||
):
|
||||
"""Custom memmap constructor compatible with numpy.memmap.
|
||||
|
||||
This function:
|
||||
- is a backport the numpy memmap offset fix (See
|
||||
https://github.com/numpy/numpy/pull/8443 for more details.
|
||||
The numpy fix is available starting numpy 1.13)
|
||||
- adds ``unlink_on_gc_collect``, which specifies explicitly whether
|
||||
the process re-constructing the memmap owns a reference to the
|
||||
underlying file. If set to True, it adds a finalizer to the
|
||||
newly-created memmap that sends a maybe_unlink request for the
|
||||
memmaped file to resource_tracker.
|
||||
"""
|
||||
util.debug(
|
||||
"[MEMMAP READ] creating a memmap (shape {}, filename {}, pid {})".format(
|
||||
shape, basename(filename), os.getpid()
|
||||
)
|
||||
)
|
||||
|
||||
mm = np.memmap(
|
||||
filename, dtype=dtype, mode=mode, offset=offset, shape=shape, order=order
|
||||
)
|
||||
if LooseVersion(np.__version__) < "1.13":
|
||||
mm.offset = offset
|
||||
if unlink_on_gc_collect:
|
||||
from ._memmapping_reducer import add_maybe_unlink_finalizer
|
||||
|
||||
add_maybe_unlink_finalizer(mm)
|
||||
return mm
|
||||
except ImportError:
|
||||
|
||||
def make_memmap(
|
||||
filename,
|
||||
dtype="uint8",
|
||||
mode="r+",
|
||||
offset=0,
|
||||
shape=None,
|
||||
order="C",
|
||||
unlink_on_gc_collect=False,
|
||||
):
|
||||
raise NotImplementedError(
|
||||
"'joblib.backports.make_memmap' should not be used "
|
||||
"if numpy is not installed."
|
||||
)
|
||||
|
||||
|
||||
if os.name == "nt":
|
||||
# https://github.com/joblib/joblib/issues/540
|
||||
access_denied_errors = (5, 13)
|
||||
from os import replace
|
||||
|
||||
def concurrency_safe_rename(src, dst):
|
||||
"""Renames ``src`` into ``dst`` overwriting ``dst`` if it exists.
|
||||
|
||||
On Windows os.replace can yield permission errors if executed by two
|
||||
different processes.
|
||||
"""
|
||||
max_sleep_time = 1
|
||||
total_sleep_time = 0
|
||||
sleep_time = 0.001
|
||||
while total_sleep_time < max_sleep_time:
|
||||
try:
|
||||
replace(src, dst)
|
||||
break
|
||||
except Exception as exc:
|
||||
if getattr(exc, "winerror", None) in access_denied_errors:
|
||||
time.sleep(sleep_time)
|
||||
total_sleep_time += sleep_time
|
||||
sleep_time *= 2
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
from os import replace as concurrency_safe_rename # noqa
|
||||
572
Backend/venv/lib/python3.12/site-packages/joblib/compressor.py
Normal file
572
Backend/venv/lib/python3.12/site-packages/joblib/compressor.py
Normal file
@@ -0,0 +1,572 @@
|
||||
"""Classes and functions for managing compressors."""
|
||||
|
||||
import io
|
||||
import zlib
|
||||
|
||||
from joblib.backports import LooseVersion
|
||||
|
||||
try:
|
||||
from threading import RLock
|
||||
except ImportError:
|
||||
from dummy_threading import RLock
|
||||
|
||||
try:
|
||||
import bz2
|
||||
except ImportError:
|
||||
bz2 = None
|
||||
|
||||
try:
|
||||
import lz4
|
||||
from lz4.frame import LZ4FrameFile
|
||||
except ImportError:
|
||||
lz4 = None
|
||||
|
||||
try:
|
||||
import lzma
|
||||
except ImportError:
|
||||
lzma = None
|
||||
|
||||
|
||||
LZ4_NOT_INSTALLED_ERROR = (
|
||||
"LZ4 is not installed. Install it with pip: https://python-lz4.readthedocs.io/"
|
||||
)
|
||||
|
||||
# Registered compressors
|
||||
_COMPRESSORS = {}
|
||||
|
||||
# Magic numbers of supported compression file formats.
|
||||
_ZFILE_PREFIX = b"ZF" # used with pickle files created before 0.9.3.
|
||||
_ZLIB_PREFIX = b"\x78"
|
||||
_GZIP_PREFIX = b"\x1f\x8b"
|
||||
_BZ2_PREFIX = b"BZ"
|
||||
_XZ_PREFIX = b"\xfd\x37\x7a\x58\x5a"
|
||||
_LZMA_PREFIX = b"\x5d\x00"
|
||||
_LZ4_PREFIX = b"\x04\x22\x4d\x18"
|
||||
|
||||
|
||||
def register_compressor(compressor_name, compressor, force=False):
|
||||
"""Register a new compressor.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
compressor_name: str.
|
||||
The name of the compressor.
|
||||
compressor: CompressorWrapper
|
||||
An instance of a 'CompressorWrapper'.
|
||||
"""
|
||||
global _COMPRESSORS
|
||||
if not isinstance(compressor_name, str):
|
||||
raise ValueError(
|
||||
"Compressor name should be a string, '{}' given.".format(compressor_name)
|
||||
)
|
||||
|
||||
if not isinstance(compressor, CompressorWrapper):
|
||||
raise ValueError(
|
||||
"Compressor should implement the CompressorWrapper "
|
||||
"interface, '{}' given.".format(compressor)
|
||||
)
|
||||
|
||||
if compressor.fileobj_factory is not None and (
|
||||
not hasattr(compressor.fileobj_factory, "read")
|
||||
or not hasattr(compressor.fileobj_factory, "write")
|
||||
or not hasattr(compressor.fileobj_factory, "seek")
|
||||
or not hasattr(compressor.fileobj_factory, "tell")
|
||||
):
|
||||
raise ValueError(
|
||||
"Compressor 'fileobj_factory' attribute should "
|
||||
"implement the file object interface, '{}' given.".format(
|
||||
compressor.fileobj_factory
|
||||
)
|
||||
)
|
||||
|
||||
if compressor_name in _COMPRESSORS and not force:
|
||||
raise ValueError("Compressor '{}' already registered.".format(compressor_name))
|
||||
|
||||
_COMPRESSORS[compressor_name] = compressor
|
||||
|
||||
|
||||
class CompressorWrapper:
|
||||
"""A wrapper around a compressor file object.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
obj: a file-like object
|
||||
The object must implement the buffer interface and will be used
|
||||
internally to compress/decompress the data.
|
||||
prefix: bytestring
|
||||
A bytestring corresponding to the magic number that identifies the
|
||||
file format associated to the compressor.
|
||||
extension: str
|
||||
The file extension used to automatically select this compressor during
|
||||
a dump to a file.
|
||||
"""
|
||||
|
||||
def __init__(self, obj, prefix=b"", extension=""):
|
||||
self.fileobj_factory = obj
|
||||
self.prefix = prefix
|
||||
self.extension = extension
|
||||
|
||||
def compressor_file(self, fileobj, compresslevel=None):
|
||||
"""Returns an instance of a compressor file object."""
|
||||
if compresslevel is None:
|
||||
return self.fileobj_factory(fileobj, "wb")
|
||||
else:
|
||||
return self.fileobj_factory(fileobj, "wb", compresslevel=compresslevel)
|
||||
|
||||
def decompressor_file(self, fileobj):
|
||||
"""Returns an instance of a decompressor file object."""
|
||||
return self.fileobj_factory(fileobj, "rb")
|
||||
|
||||
|
||||
class BZ2CompressorWrapper(CompressorWrapper):
|
||||
prefix = _BZ2_PREFIX
|
||||
extension = ".bz2"
|
||||
|
||||
def __init__(self):
|
||||
if bz2 is not None:
|
||||
self.fileobj_factory = bz2.BZ2File
|
||||
else:
|
||||
self.fileobj_factory = None
|
||||
|
||||
def _check_versions(self):
|
||||
if bz2 is None:
|
||||
raise ValueError(
|
||||
"bz2 module is not compiled on your python standard library."
|
||||
)
|
||||
|
||||
def compressor_file(self, fileobj, compresslevel=None):
|
||||
"""Returns an instance of a compressor file object."""
|
||||
self._check_versions()
|
||||
if compresslevel is None:
|
||||
return self.fileobj_factory(fileobj, "wb")
|
||||
else:
|
||||
return self.fileobj_factory(fileobj, "wb", compresslevel=compresslevel)
|
||||
|
||||
def decompressor_file(self, fileobj):
|
||||
"""Returns an instance of a decompressor file object."""
|
||||
self._check_versions()
|
||||
fileobj = self.fileobj_factory(fileobj, "rb")
|
||||
return fileobj
|
||||
|
||||
|
||||
class LZMACompressorWrapper(CompressorWrapper):
|
||||
prefix = _LZMA_PREFIX
|
||||
extension = ".lzma"
|
||||
_lzma_format_name = "FORMAT_ALONE"
|
||||
|
||||
def __init__(self):
|
||||
if lzma is not None:
|
||||
self.fileobj_factory = lzma.LZMAFile
|
||||
self._lzma_format = getattr(lzma, self._lzma_format_name)
|
||||
else:
|
||||
self.fileobj_factory = None
|
||||
|
||||
def _check_versions(self):
|
||||
if lzma is None:
|
||||
raise ValueError(
|
||||
"lzma module is not compiled on your python standard library."
|
||||
)
|
||||
|
||||
def compressor_file(self, fileobj, compresslevel=None):
|
||||
"""Returns an instance of a compressor file object."""
|
||||
if compresslevel is None:
|
||||
return self.fileobj_factory(fileobj, "wb", format=self._lzma_format)
|
||||
else:
|
||||
return self.fileobj_factory(
|
||||
fileobj, "wb", format=self._lzma_format, preset=compresslevel
|
||||
)
|
||||
|
||||
def decompressor_file(self, fileobj):
|
||||
"""Returns an instance of a decompressor file object."""
|
||||
return lzma.LZMAFile(fileobj, "rb")
|
||||
|
||||
|
||||
class XZCompressorWrapper(LZMACompressorWrapper):
|
||||
prefix = _XZ_PREFIX
|
||||
extension = ".xz"
|
||||
_lzma_format_name = "FORMAT_XZ"
|
||||
|
||||
|
||||
class LZ4CompressorWrapper(CompressorWrapper):
|
||||
prefix = _LZ4_PREFIX
|
||||
extension = ".lz4"
|
||||
|
||||
def __init__(self):
|
||||
if lz4 is not None:
|
||||
self.fileobj_factory = LZ4FrameFile
|
||||
else:
|
||||
self.fileobj_factory = None
|
||||
|
||||
def _check_versions(self):
|
||||
if lz4 is None:
|
||||
raise ValueError(LZ4_NOT_INSTALLED_ERROR)
|
||||
lz4_version = lz4.__version__
|
||||
if lz4_version.startswith("v"):
|
||||
lz4_version = lz4_version[1:]
|
||||
if LooseVersion(lz4_version) < LooseVersion("0.19"):
|
||||
raise ValueError(LZ4_NOT_INSTALLED_ERROR)
|
||||
|
||||
def compressor_file(self, fileobj, compresslevel=None):
|
||||
"""Returns an instance of a compressor file object."""
|
||||
self._check_versions()
|
||||
if compresslevel is None:
|
||||
return self.fileobj_factory(fileobj, "wb")
|
||||
else:
|
||||
return self.fileobj_factory(fileobj, "wb", compression_level=compresslevel)
|
||||
|
||||
def decompressor_file(self, fileobj):
|
||||
"""Returns an instance of a decompressor file object."""
|
||||
self._check_versions()
|
||||
return self.fileobj_factory(fileobj, "rb")
|
||||
|
||||
|
||||
###############################################################################
|
||||
# base file compression/decompression object definition
|
||||
_MODE_CLOSED = 0
|
||||
_MODE_READ = 1
|
||||
_MODE_READ_EOF = 2
|
||||
_MODE_WRITE = 3
|
||||
_BUFFER_SIZE = 8192
|
||||
|
||||
|
||||
class BinaryZlibFile(io.BufferedIOBase):
|
||||
"""A file object providing transparent zlib (de)compression.
|
||||
|
||||
TODO python2_drop: is it still needed since we dropped Python 2 support A
|
||||
BinaryZlibFile can act as a wrapper for an existing file object, or refer
|
||||
directly to a named file on disk.
|
||||
|
||||
Note that BinaryZlibFile provides only a *binary* file interface: data read
|
||||
is returned as bytes, and data to be written should be given as bytes.
|
||||
|
||||
This object is an adaptation of the BZ2File object and is compatible with
|
||||
versions of python >= 2.7.
|
||||
|
||||
If filename is a str or bytes object, it gives the name
|
||||
of the file to be opened. Otherwise, it should be a file object,
|
||||
which will be used to read or write the compressed data.
|
||||
|
||||
mode can be 'rb' for reading (default) or 'wb' for (over)writing
|
||||
|
||||
If mode is 'wb', compresslevel can be a number between 1
|
||||
and 9 specifying the level of compression: 1 produces the least
|
||||
compression, and 9 produces the most compression. 3 is the default.
|
||||
"""
|
||||
|
||||
wbits = zlib.MAX_WBITS
|
||||
|
||||
def __init__(self, filename, mode="rb", compresslevel=3):
|
||||
# This lock must be recursive, so that BufferedIOBase's
|
||||
# readline(), readlines() and writelines() don't deadlock.
|
||||
self._lock = RLock()
|
||||
self._fp = None
|
||||
self._closefp = False
|
||||
self._mode = _MODE_CLOSED
|
||||
self._pos = 0
|
||||
self._size = -1
|
||||
self.compresslevel = compresslevel
|
||||
|
||||
if not isinstance(compresslevel, int) or not (1 <= compresslevel <= 9):
|
||||
raise ValueError(
|
||||
"'compresslevel' must be an integer "
|
||||
"between 1 and 9. You provided 'compresslevel={}'".format(compresslevel)
|
||||
)
|
||||
|
||||
if mode == "rb":
|
||||
self._mode = _MODE_READ
|
||||
self._decompressor = zlib.decompressobj(self.wbits)
|
||||
self._buffer = b""
|
||||
self._buffer_offset = 0
|
||||
elif mode == "wb":
|
||||
self._mode = _MODE_WRITE
|
||||
self._compressor = zlib.compressobj(
|
||||
self.compresslevel, zlib.DEFLATED, self.wbits, zlib.DEF_MEM_LEVEL, 0
|
||||
)
|
||||
else:
|
||||
raise ValueError("Invalid mode: %r" % (mode,))
|
||||
|
||||
if isinstance(filename, str):
|
||||
self._fp = io.open(filename, mode)
|
||||
self._closefp = True
|
||||
elif hasattr(filename, "read") or hasattr(filename, "write"):
|
||||
self._fp = filename
|
||||
else:
|
||||
raise TypeError("filename must be a str or bytes object, or a file")
|
||||
|
||||
def close(self):
|
||||
"""Flush and close the file.
|
||||
|
||||
May be called more than once without error. Once the file is
|
||||
closed, any other operation on it will raise a ValueError.
|
||||
"""
|
||||
with self._lock:
|
||||
if self._mode == _MODE_CLOSED:
|
||||
return
|
||||
try:
|
||||
if self._mode in (_MODE_READ, _MODE_READ_EOF):
|
||||
self._decompressor = None
|
||||
elif self._mode == _MODE_WRITE:
|
||||
self._fp.write(self._compressor.flush())
|
||||
self._compressor = None
|
||||
finally:
|
||||
try:
|
||||
if self._closefp:
|
||||
self._fp.close()
|
||||
finally:
|
||||
self._fp = None
|
||||
self._closefp = False
|
||||
self._mode = _MODE_CLOSED
|
||||
self._buffer = b""
|
||||
self._buffer_offset = 0
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
"""True if this file is closed."""
|
||||
return self._mode == _MODE_CLOSED
|
||||
|
||||
def fileno(self):
|
||||
"""Return the file descriptor for the underlying file."""
|
||||
self._check_not_closed()
|
||||
return self._fp.fileno()
|
||||
|
||||
def seekable(self):
|
||||
"""Return whether the file supports seeking."""
|
||||
return self.readable() and self._fp.seekable()
|
||||
|
||||
def readable(self):
|
||||
"""Return whether the file was opened for reading."""
|
||||
self._check_not_closed()
|
||||
return self._mode in (_MODE_READ, _MODE_READ_EOF)
|
||||
|
||||
def writable(self):
|
||||
"""Return whether the file was opened for writing."""
|
||||
self._check_not_closed()
|
||||
return self._mode == _MODE_WRITE
|
||||
|
||||
# Mode-checking helper functions.
|
||||
|
||||
def _check_not_closed(self):
|
||||
if self.closed:
|
||||
fname = getattr(self._fp, "name", None)
|
||||
msg = "I/O operation on closed file"
|
||||
if fname is not None:
|
||||
msg += " {}".format(fname)
|
||||
msg += "."
|
||||
raise ValueError(msg)
|
||||
|
||||
def _check_can_read(self):
|
||||
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
|
||||
self._check_not_closed()
|
||||
raise io.UnsupportedOperation("File not open for reading")
|
||||
|
||||
def _check_can_write(self):
|
||||
if self._mode != _MODE_WRITE:
|
||||
self._check_not_closed()
|
||||
raise io.UnsupportedOperation("File not open for writing")
|
||||
|
||||
def _check_can_seek(self):
|
||||
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
|
||||
self._check_not_closed()
|
||||
raise io.UnsupportedOperation(
|
||||
"Seeking is only supported on files open for reading"
|
||||
)
|
||||
if not self._fp.seekable():
|
||||
raise io.UnsupportedOperation(
|
||||
"The underlying file object does not support seeking"
|
||||
)
|
||||
|
||||
# Fill the readahead buffer if it is empty. Returns False on EOF.
|
||||
def _fill_buffer(self):
|
||||
if self._mode == _MODE_READ_EOF:
|
||||
return False
|
||||
# Depending on the input data, our call to the decompressor may not
|
||||
# return any data. In this case, try again after reading another block.
|
||||
while self._buffer_offset == len(self._buffer):
|
||||
try:
|
||||
rawblock = self._decompressor.unused_data or self._fp.read(_BUFFER_SIZE)
|
||||
if not rawblock:
|
||||
raise EOFError
|
||||
except EOFError:
|
||||
# End-of-stream marker and end of file. We're good.
|
||||
self._mode = _MODE_READ_EOF
|
||||
self._size = self._pos
|
||||
return False
|
||||
else:
|
||||
self._buffer = self._decompressor.decompress(rawblock)
|
||||
self._buffer_offset = 0
|
||||
return True
|
||||
|
||||
# Read data until EOF.
|
||||
# If return_data is false, consume the data without returning it.
|
||||
def _read_all(self, return_data=True):
|
||||
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
|
||||
self._buffer = self._buffer[self._buffer_offset :]
|
||||
self._buffer_offset = 0
|
||||
|
||||
blocks = []
|
||||
while self._fill_buffer():
|
||||
if return_data:
|
||||
blocks.append(self._buffer)
|
||||
self._pos += len(self._buffer)
|
||||
self._buffer = b""
|
||||
if return_data:
|
||||
return b"".join(blocks)
|
||||
|
||||
# Read a block of up to n bytes.
|
||||
# If return_data is false, consume the data without returning it.
|
||||
def _read_block(self, n_bytes, return_data=True):
|
||||
# If we have enough data buffered, return immediately.
|
||||
end = self._buffer_offset + n_bytes
|
||||
if end <= len(self._buffer):
|
||||
data = self._buffer[self._buffer_offset : end]
|
||||
self._buffer_offset = end
|
||||
self._pos += len(data)
|
||||
return data if return_data else None
|
||||
|
||||
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
|
||||
self._buffer = self._buffer[self._buffer_offset :]
|
||||
self._buffer_offset = 0
|
||||
|
||||
blocks = []
|
||||
while n_bytes > 0 and self._fill_buffer():
|
||||
if n_bytes < len(self._buffer):
|
||||
data = self._buffer[:n_bytes]
|
||||
self._buffer_offset = n_bytes
|
||||
else:
|
||||
data = self._buffer
|
||||
self._buffer = b""
|
||||
if return_data:
|
||||
blocks.append(data)
|
||||
self._pos += len(data)
|
||||
n_bytes -= len(data)
|
||||
if return_data:
|
||||
return b"".join(blocks)
|
||||
|
||||
def read(self, size=-1):
|
||||
"""Read up to size uncompressed bytes from the file.
|
||||
|
||||
If size is negative or omitted, read until EOF is reached.
|
||||
Returns b'' if the file is already at EOF.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_read()
|
||||
if size == 0:
|
||||
return b""
|
||||
elif size < 0:
|
||||
return self._read_all()
|
||||
else:
|
||||
return self._read_block(size)
|
||||
|
||||
def readinto(self, b):
|
||||
"""Read up to len(b) bytes into b.
|
||||
|
||||
Returns the number of bytes read (0 for EOF).
|
||||
"""
|
||||
with self._lock:
|
||||
return io.BufferedIOBase.readinto(self, b)
|
||||
|
||||
def write(self, data):
|
||||
"""Write a byte string to the file.
|
||||
|
||||
Returns the number of uncompressed bytes written, which is
|
||||
always len(data). Note that due to buffering, the file on disk
|
||||
may not reflect the data written until close() is called.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_write()
|
||||
# Convert data type if called by io.BufferedWriter.
|
||||
if isinstance(data, memoryview):
|
||||
data = data.tobytes()
|
||||
|
||||
compressed = self._compressor.compress(data)
|
||||
self._fp.write(compressed)
|
||||
self._pos += len(data)
|
||||
return len(data)
|
||||
|
||||
# Rewind the file to the beginning of the data stream.
|
||||
def _rewind(self):
|
||||
self._fp.seek(0, 0)
|
||||
self._mode = _MODE_READ
|
||||
self._pos = 0
|
||||
self._decompressor = zlib.decompressobj(self.wbits)
|
||||
self._buffer = b""
|
||||
self._buffer_offset = 0
|
||||
|
||||
def seek(self, offset, whence=0):
|
||||
"""Change the file position.
|
||||
|
||||
The new position is specified by offset, relative to the
|
||||
position indicated by whence. Values for whence are:
|
||||
|
||||
0: start of stream (default); offset must not be negative
|
||||
1: current stream position
|
||||
2: end of stream; offset must not be positive
|
||||
|
||||
Returns the new file position.
|
||||
|
||||
Note that seeking is emulated, so depending on the parameters,
|
||||
this operation may be extremely slow.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_seek()
|
||||
|
||||
# Recalculate offset as an absolute file position.
|
||||
if whence == 0:
|
||||
pass
|
||||
elif whence == 1:
|
||||
offset = self._pos + offset
|
||||
elif whence == 2:
|
||||
# Seeking relative to EOF - we need to know the file's size.
|
||||
if self._size < 0:
|
||||
self._read_all(return_data=False)
|
||||
offset = self._size + offset
|
||||
else:
|
||||
raise ValueError("Invalid value for whence: %s" % (whence,))
|
||||
|
||||
# Make it so that offset is the number of bytes to skip forward.
|
||||
if offset < self._pos:
|
||||
self._rewind()
|
||||
else:
|
||||
offset -= self._pos
|
||||
|
||||
# Read and discard data until we reach the desired position.
|
||||
self._read_block(offset, return_data=False)
|
||||
|
||||
return self._pos
|
||||
|
||||
def tell(self):
|
||||
"""Return the current file position."""
|
||||
with self._lock:
|
||||
self._check_not_closed()
|
||||
return self._pos
|
||||
|
||||
|
||||
class ZlibCompressorWrapper(CompressorWrapper):
|
||||
def __init__(self):
|
||||
CompressorWrapper.__init__(
|
||||
self, obj=BinaryZlibFile, prefix=_ZLIB_PREFIX, extension=".z"
|
||||
)
|
||||
|
||||
|
||||
class BinaryGzipFile(BinaryZlibFile):
|
||||
"""A file object providing transparent gzip (de)compression.
|
||||
|
||||
If filename is a str or bytes object, it gives the name
|
||||
of the file to be opened. Otherwise, it should be a file object,
|
||||
which will be used to read or write the compressed data.
|
||||
|
||||
mode can be 'rb' for reading (default) or 'wb' for (over)writing
|
||||
|
||||
If mode is 'wb', compresslevel can be a number between 1
|
||||
and 9 specifying the level of compression: 1 produces the least
|
||||
compression, and 9 produces the most compression. 3 is the default.
|
||||
"""
|
||||
|
||||
wbits = 31 # zlib compressor/decompressor wbits value for gzip format.
|
||||
|
||||
|
||||
class GzipCompressorWrapper(CompressorWrapper):
|
||||
def __init__(self):
|
||||
CompressorWrapper.__init__(
|
||||
self, obj=BinaryGzipFile, prefix=_GZIP_PREFIX, extension=".gz"
|
||||
)
|
||||
131
Backend/venv/lib/python3.12/site-packages/joblib/disk.py
Normal file
131
Backend/venv/lib/python3.12/site-packages/joblib/disk.py
Normal file
@@ -0,0 +1,131 @@
|
||||
"""
|
||||
Disk management utilities.
|
||||
"""
|
||||
|
||||
# Authors: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
|
||||
# Lars Buitinck
|
||||
# Copyright (c) 2010 Gael Varoquaux
|
||||
# License: BSD Style, 3 clauses.
|
||||
|
||||
import errno
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from multiprocessing import util
|
||||
|
||||
try:
|
||||
WindowsError
|
||||
except NameError:
|
||||
WindowsError = OSError
|
||||
|
||||
|
||||
def disk_used(path):
|
||||
"""Return the disk usage in a directory."""
|
||||
size = 0
|
||||
for file in os.listdir(path) + ["."]:
|
||||
stat = os.stat(os.path.join(path, file))
|
||||
if hasattr(stat, "st_blocks"):
|
||||
size += stat.st_blocks * 512
|
||||
else:
|
||||
# on some platform st_blocks is not available (e.g., Windows)
|
||||
# approximate by rounding to next multiple of 512
|
||||
size += (stat.st_size // 512 + 1) * 512
|
||||
# We need to convert to int to avoid having longs on some systems (we
|
||||
# don't want longs to avoid problems we SQLite)
|
||||
return int(size / 1024.0)
|
||||
|
||||
|
||||
def memstr_to_bytes(text):
|
||||
"""Convert a memory text to its value in bytes."""
|
||||
kilo = 1024
|
||||
units = dict(K=kilo, M=kilo**2, G=kilo**3)
|
||||
try:
|
||||
size = int(units[text[-1]] * float(text[:-1]))
|
||||
except (KeyError, ValueError) as e:
|
||||
raise ValueError(
|
||||
"Invalid literal for size give: %s (type %s) should be "
|
||||
"alike '10G', '500M', '50K'." % (text, type(text))
|
||||
) from e
|
||||
return size
|
||||
|
||||
|
||||
def mkdirp(d):
|
||||
"""Ensure directory d exists (like mkdir -p on Unix)
|
||||
No guarantee that the directory is writable.
|
||||
"""
|
||||
try:
|
||||
os.makedirs(d)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
|
||||
# if a rmtree operation fails in rm_subdirs, wait for this much time (in secs),
|
||||
# then retry up to RM_SUBDIRS_N_RETRY times. If it still fails, raise the
|
||||
# exception. this mechanism ensures that the sub-process gc have the time to
|
||||
# collect and close the memmaps before we fail.
|
||||
RM_SUBDIRS_RETRY_TIME = 0.1
|
||||
RM_SUBDIRS_N_RETRY = 10
|
||||
|
||||
|
||||
def rm_subdirs(path, onerror=None):
|
||||
"""Remove all subdirectories in this path.
|
||||
|
||||
The directory indicated by `path` is left in place, and its subdirectories
|
||||
are erased.
|
||||
|
||||
If onerror is set, it is called to handle the error with arguments (func,
|
||||
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
|
||||
path is the argument to that function that caused it to fail; and
|
||||
exc_info is a tuple returned by sys.exc_info(). If onerror is None,
|
||||
an exception is raised.
|
||||
"""
|
||||
|
||||
# NOTE this code is adapted from the one in shutil.rmtree, and is
|
||||
# just as fast
|
||||
|
||||
names = []
|
||||
try:
|
||||
names = os.listdir(path)
|
||||
except os.error:
|
||||
if onerror is not None:
|
||||
onerror(os.listdir, path, sys.exc_info())
|
||||
else:
|
||||
raise
|
||||
|
||||
for name in names:
|
||||
fullname = os.path.join(path, name)
|
||||
delete_folder(fullname, onerror=onerror)
|
||||
|
||||
|
||||
def delete_folder(folder_path, onerror=None, allow_non_empty=True):
|
||||
"""Utility function to cleanup a temporary folder if it still exists."""
|
||||
if os.path.isdir(folder_path):
|
||||
if onerror is not None:
|
||||
shutil.rmtree(folder_path, False, onerror)
|
||||
else:
|
||||
# allow the rmtree to fail once, wait and re-try.
|
||||
# if the error is raised again, fail
|
||||
err_count = 0
|
||||
while True:
|
||||
files = os.listdir(folder_path)
|
||||
try:
|
||||
if len(files) == 0 or allow_non_empty:
|
||||
shutil.rmtree(folder_path, ignore_errors=False, onerror=None)
|
||||
util.debug("Successfully deleted {}".format(folder_path))
|
||||
break
|
||||
else:
|
||||
raise OSError(
|
||||
"Expected empty folder {} but got {} files.".format(
|
||||
folder_path, len(files)
|
||||
)
|
||||
)
|
||||
except (OSError, WindowsError):
|
||||
err_count += 1
|
||||
if err_count > RM_SUBDIRS_N_RETRY:
|
||||
# the folder cannot be deleted right now. It maybe
|
||||
# because some temporary files have not been deleted
|
||||
# yet.
|
||||
raise
|
||||
time.sleep(RM_SUBDIRS_RETRY_TIME)
|
||||
131
Backend/venv/lib/python3.12/site-packages/joblib/executor.py
Normal file
131
Backend/venv/lib/python3.12/site-packages/joblib/executor.py
Normal file
@@ -0,0 +1,131 @@
|
||||
"""Utility function to construct a loky.ReusableExecutor with custom pickler.
|
||||
|
||||
This module provides efficient ways of working with data stored in
|
||||
shared memory with numpy.memmap arrays without inducing any memory
|
||||
copy between the parent and child processes.
|
||||
"""
|
||||
# Author: Thomas Moreau <thomas.moreau.2010@gmail.com>
|
||||
# Copyright: 2017, Thomas Moreau
|
||||
# License: BSD 3 clause
|
||||
|
||||
from ._memmapping_reducer import TemporaryResourcesManager, get_memmapping_reducers
|
||||
from .externals.loky.reusable_executor import _ReusablePoolExecutor
|
||||
|
||||
_executor_args = None
|
||||
|
||||
|
||||
def get_memmapping_executor(n_jobs, **kwargs):
|
||||
return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs)
|
||||
|
||||
|
||||
class MemmappingExecutor(_ReusablePoolExecutor):
|
||||
@classmethod
|
||||
def get_memmapping_executor(
|
||||
cls,
|
||||
n_jobs,
|
||||
timeout=300,
|
||||
initializer=None,
|
||||
initargs=(),
|
||||
env=None,
|
||||
temp_folder=None,
|
||||
context_id=None,
|
||||
**backend_args,
|
||||
):
|
||||
"""Factory for ReusableExecutor with automatic memmapping for large
|
||||
numpy arrays.
|
||||
"""
|
||||
global _executor_args
|
||||
# Check if we can reuse the executor here instead of deferring the test
|
||||
# to loky as the reducers are objects that changes at each call.
|
||||
executor_args = backend_args.copy()
|
||||
executor_args.update(env if env else {})
|
||||
executor_args.update(
|
||||
dict(timeout=timeout, initializer=initializer, initargs=initargs)
|
||||
)
|
||||
reuse = _executor_args is None or _executor_args == executor_args
|
||||
_executor_args = executor_args
|
||||
|
||||
manager = TemporaryResourcesManager(temp_folder)
|
||||
|
||||
# reducers access the temporary folder in which to store temporary
|
||||
# pickles through a call to manager.resolve_temp_folder_name. resolving
|
||||
# the folder name dynamically is useful to use different folders across
|
||||
# calls of a same reusable executor
|
||||
job_reducers, result_reducers = get_memmapping_reducers(
|
||||
unlink_on_gc_collect=True,
|
||||
temp_folder_resolver=manager.resolve_temp_folder_name,
|
||||
**backend_args,
|
||||
)
|
||||
_executor, executor_is_reused = super().get_reusable_executor(
|
||||
n_jobs,
|
||||
job_reducers=job_reducers,
|
||||
result_reducers=result_reducers,
|
||||
reuse=reuse,
|
||||
timeout=timeout,
|
||||
initializer=initializer,
|
||||
initargs=initargs,
|
||||
env=env,
|
||||
)
|
||||
|
||||
if not executor_is_reused:
|
||||
# Only set a _temp_folder_manager for new executors. Reused
|
||||
# executors already have a _temporary_folder_manager that must not
|
||||
# be re-assigned like that because it is referenced in various
|
||||
# places in the reducing machinery of the executor.
|
||||
_executor._temp_folder_manager = manager
|
||||
|
||||
if context_id is not None:
|
||||
# Only register the specified context once we know which manager
|
||||
# the current executor is using, in order to not register an atexit
|
||||
# finalizer twice for the same folder.
|
||||
_executor._temp_folder_manager.register_new_context(context_id)
|
||||
|
||||
return _executor
|
||||
|
||||
def terminate(self, kill_workers=False):
|
||||
self.shutdown(kill_workers=kill_workers)
|
||||
|
||||
# When workers are killed in a brutal manner, they cannot execute the
|
||||
# finalizer of their shared memmaps. The refcount of those memmaps may
|
||||
# be off by an unknown number, so instead of decref'ing them, we force
|
||||
# delete the whole temporary folder, and unregister them. There is no
|
||||
# risk of PermissionError at folder deletion because at this
|
||||
# point, all child processes are dead, so all references to temporary
|
||||
# memmaps are closed. Otherwise, just try to delete as much as possible
|
||||
# with allow_non_empty=True but if we can't, it will be clean up later
|
||||
# on by the resource_tracker.
|
||||
with self._submit_resize_lock:
|
||||
self._temp_folder_manager._clean_temporary_resources(
|
||||
force=kill_workers, allow_non_empty=True
|
||||
)
|
||||
|
||||
@property
|
||||
def _temp_folder(self):
|
||||
# Legacy property in tests. could be removed if we refactored the
|
||||
# memmapping tests. SHOULD ONLY BE USED IN TESTS!
|
||||
# We cache this property because it is called late in the tests - at
|
||||
# this point, all context have been unregistered, and
|
||||
# resolve_temp_folder_name raises an error.
|
||||
if getattr(self, "_cached_temp_folder", None) is not None:
|
||||
return self._cached_temp_folder
|
||||
else:
|
||||
self._cached_temp_folder = (
|
||||
self._temp_folder_manager.resolve_temp_folder_name()
|
||||
) # noqa
|
||||
return self._cached_temp_folder
|
||||
|
||||
|
||||
class _TestingMemmappingExecutor(MemmappingExecutor):
|
||||
"""Wrapper around ReusableExecutor to ease memmapping testing with Pool
|
||||
and Executor. This is only for testing purposes.
|
||||
|
||||
"""
|
||||
|
||||
def apply_async(self, func, args):
|
||||
"""Schedule a func to be run"""
|
||||
future = self.submit(func, *args)
|
||||
future.get = future.result
|
||||
return future
|
||||
|
||||
def map(self, f, *args):
|
||||
return list(super().map(f, *args))
|
||||
0
Backend/venv/lib/python3.12/site-packages/joblib/externals/__init__.py
vendored
Normal file
0
Backend/venv/lib/python3.12/site-packages/joblib/externals/__init__.py
vendored
Normal file
BIN
Backend/venv/lib/python3.12/site-packages/joblib/externals/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
BIN
Backend/venv/lib/python3.12/site-packages/joblib/externals/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
Binary file not shown.
18
Backend/venv/lib/python3.12/site-packages/joblib/externals/cloudpickle/__init__.py
vendored
Normal file
18
Backend/venv/lib/python3.12/site-packages/joblib/externals/cloudpickle/__init__.py
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
from . import cloudpickle
|
||||
from .cloudpickle import * # noqa
|
||||
|
||||
__doc__ = cloudpickle.__doc__
|
||||
|
||||
__version__ = "3.1.1"
|
||||
|
||||
__all__ = [ # noqa
|
||||
"__version__",
|
||||
"Pickler",
|
||||
"CloudPickler",
|
||||
"dumps",
|
||||
"loads",
|
||||
"dump",
|
||||
"load",
|
||||
"register_pickle_by_value",
|
||||
"unregister_pickle_by_value",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
1545
Backend/venv/lib/python3.12/site-packages/joblib/externals/cloudpickle/cloudpickle.py
vendored
Normal file
1545
Backend/venv/lib/python3.12/site-packages/joblib/externals/cloudpickle/cloudpickle.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
14
Backend/venv/lib/python3.12/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py
vendored
Normal file
14
Backend/venv/lib/python3.12/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
"""Compatibility module.
|
||||
|
||||
It can be necessary to load files generated by previous versions of cloudpickle
|
||||
that rely on symbols being defined under the `cloudpickle.cloudpickle_fast`
|
||||
namespace.
|
||||
|
||||
See: tests/test_backward_compat.py
|
||||
"""
|
||||
|
||||
from . import cloudpickle
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
return getattr(cloudpickle, name)
|
||||
45
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/__init__.py
vendored
Normal file
45
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/__init__.py
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
r"""The :mod:`loky` module manages a pool of worker that can be re-used across time.
|
||||
It provides a robust and dynamic implementation os the
|
||||
:class:`ProcessPoolExecutor` and a function :func:`get_reusable_executor` which
|
||||
hide the pool management under the hood.
|
||||
"""
|
||||
|
||||
from concurrent.futures import (
|
||||
ALL_COMPLETED,
|
||||
FIRST_COMPLETED,
|
||||
FIRST_EXCEPTION,
|
||||
CancelledError,
|
||||
Executor,
|
||||
TimeoutError,
|
||||
as_completed,
|
||||
wait,
|
||||
)
|
||||
|
||||
from ._base import Future
|
||||
from .backend.context import cpu_count
|
||||
from .backend.reduction import set_loky_pickler
|
||||
from .reusable_executor import get_reusable_executor
|
||||
from .cloudpickle_wrapper import wrap_non_picklable_objects
|
||||
from .process_executor import BrokenProcessPool, ProcessPoolExecutor
|
||||
|
||||
|
||||
__all__ = [
|
||||
"get_reusable_executor",
|
||||
"cpu_count",
|
||||
"wait",
|
||||
"as_completed",
|
||||
"Future",
|
||||
"Executor",
|
||||
"ProcessPoolExecutor",
|
||||
"BrokenProcessPool",
|
||||
"CancelledError",
|
||||
"TimeoutError",
|
||||
"FIRST_COMPLETED",
|
||||
"FIRST_EXCEPTION",
|
||||
"ALL_COMPLETED",
|
||||
"wrap_non_picklable_objects",
|
||||
"set_loky_pickler",
|
||||
]
|
||||
|
||||
|
||||
__version__ = "3.5.6"
|
||||
BIN
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
BIN
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-312.pyc
vendored
Normal file
Binary file not shown.
BIN
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/__pycache__/_base.cpython-312.pyc
vendored
Normal file
BIN
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/__pycache__/_base.cpython-312.pyc
vendored
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
28
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/_base.py
vendored
Normal file
28
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/_base.py
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
###############################################################################
|
||||
# Modification of concurrent.futures.Future
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from concurrent/futures/_base.py (17/02/2017)
|
||||
# * Do not use yield from
|
||||
# * Use old super syntax
|
||||
#
|
||||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
from concurrent.futures import Future as _BaseFuture
|
||||
from concurrent.futures._base import LOGGER
|
||||
|
||||
|
||||
# To make loky._base.Future instances awaitable by concurrent.futures.wait,
|
||||
# derive our custom Future class from _BaseFuture. _invoke_callback is the only
|
||||
# modification made to this class in loky.
|
||||
# TODO investigate why using `concurrent.futures.Future` directly does not
|
||||
# always work in our test suite.
|
||||
class Future(_BaseFuture):
|
||||
def _invoke_callbacks(self):
|
||||
for callback in self._done_callbacks:
|
||||
try:
|
||||
callback(self)
|
||||
except BaseException:
|
||||
LOGGER.exception(f"exception calling callback for {self!r}")
|
||||
14
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/__init__.py
vendored
Normal file
14
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/__init__.py
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
import os
|
||||
from multiprocessing import synchronize
|
||||
|
||||
from .context import get_context
|
||||
|
||||
|
||||
def _make_name():
|
||||
return f"/loky-{os.getpid()}-{next(synchronize.SemLock._rand)}"
|
||||
|
||||
|
||||
# monkey patch the name creation for multiprocessing
|
||||
synchronize.SemLock._make_name = staticmethod(_make_name)
|
||||
|
||||
__all__ = ["get_context"]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
67
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/_posix_reduction.py
vendored
Normal file
67
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/_posix_reduction.py
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
###############################################################################
|
||||
# Extra reducers for Unix based system and connections objects
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from multiprocessing/reduction.py (17/02/2017)
|
||||
# * Add adapted reduction for LokyProcesses and socket/Connection
|
||||
#
|
||||
import os
|
||||
import socket
|
||||
import _socket
|
||||
from multiprocessing.connection import Connection
|
||||
from multiprocessing.context import get_spawning_popen
|
||||
|
||||
from .reduction import register
|
||||
|
||||
HAVE_SEND_HANDLE = (
|
||||
hasattr(socket, "CMSG_LEN")
|
||||
and hasattr(socket, "SCM_RIGHTS")
|
||||
and hasattr(socket.socket, "sendmsg")
|
||||
)
|
||||
|
||||
|
||||
def _mk_inheritable(fd):
|
||||
os.set_inheritable(fd, True)
|
||||
return fd
|
||||
|
||||
|
||||
def DupFd(fd):
|
||||
"""Return a wrapper for an fd."""
|
||||
popen_obj = get_spawning_popen()
|
||||
if popen_obj is not None:
|
||||
return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
|
||||
elif HAVE_SEND_HANDLE:
|
||||
from multiprocessing import resource_sharer
|
||||
|
||||
return resource_sharer.DupFd(fd)
|
||||
else:
|
||||
raise TypeError(
|
||||
"Cannot pickle connection object. This object can only be "
|
||||
"passed when spawning a new process"
|
||||
)
|
||||
|
||||
|
||||
def _reduce_socket(s):
|
||||
df = DupFd(s.fileno())
|
||||
return _rebuild_socket, (df, s.family, s.type, s.proto)
|
||||
|
||||
|
||||
def _rebuild_socket(df, family, type, proto):
|
||||
fd = df.detach()
|
||||
return socket.fromfd(fd, family, type, proto)
|
||||
|
||||
|
||||
def rebuild_connection(df, readable, writable):
|
||||
fd = df.detach()
|
||||
return Connection(fd, readable, writable)
|
||||
|
||||
|
||||
def reduce_connection(conn):
|
||||
df = DupFd(conn.fileno())
|
||||
return rebuild_connection, (df, conn.readable, conn.writable)
|
||||
|
||||
|
||||
register(socket.socket, _reduce_socket)
|
||||
register(_socket.socket, _reduce_socket)
|
||||
register(Connection, reduce_connection)
|
||||
18
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/_win_reduction.py
vendored
Normal file
18
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/_win_reduction.py
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
###############################################################################
|
||||
# Extra reducers for Windows system and connections objects
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from multiprocessing/reduction.py (17/02/2017)
|
||||
# * Add adapted reduction for LokyProcesses and socket/PipeConnection
|
||||
#
|
||||
import socket
|
||||
from multiprocessing import connection
|
||||
from multiprocessing.reduction import _reduce_socket
|
||||
|
||||
from .reduction import register
|
||||
|
||||
# register reduction for win32 communication objects
|
||||
register(socket.socket, _reduce_socket)
|
||||
register(connection.Connection, connection.reduce_connection)
|
||||
register(connection.PipeConnection, connection.reduce_pipe_connection)
|
||||
405
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/context.py
vendored
Normal file
405
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/context.py
vendored
Normal file
@@ -0,0 +1,405 @@
|
||||
###############################################################################
|
||||
# Basic context management with LokyContext
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from multiprocessing/context.py
|
||||
# * Create a context ensuring loky uses only objects that are compatible
|
||||
# * Add LokyContext to the list of context of multiprocessing so loky can be
|
||||
# used with multiprocessing.set_start_method
|
||||
# * Implement a CFS-aware amd physical-core aware cpu_count function.
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import math
|
||||
import subprocess
|
||||
import traceback
|
||||
import warnings
|
||||
import multiprocessing as mp
|
||||
from multiprocessing import get_context as mp_get_context
|
||||
from multiprocessing.context import BaseContext
|
||||
from concurrent.futures.process import _MAX_WINDOWS_WORKERS
|
||||
|
||||
|
||||
from .process import LokyProcess, LokyInitMainProcess
|
||||
|
||||
# Apparently, on older Python versions, loky cannot work 61 workers on Windows
|
||||
# but instead 60: ¯\_(ツ)_/¯
|
||||
if sys.version_info < (3, 10):
|
||||
_MAX_WINDOWS_WORKERS = _MAX_WINDOWS_WORKERS - 1
|
||||
|
||||
START_METHODS = ["loky", "loky_init_main", "spawn"]
|
||||
if sys.platform != "win32":
|
||||
START_METHODS += ["fork", "forkserver"]
|
||||
|
||||
_DEFAULT_START_METHOD = None
|
||||
|
||||
# Cache for the number of physical cores to avoid repeating subprocess calls.
|
||||
# It should not change during the lifetime of the program.
|
||||
physical_cores_cache = None
|
||||
|
||||
|
||||
def get_context(method=None):
|
||||
# Try to overload the default context
|
||||
method = method or _DEFAULT_START_METHOD or "loky"
|
||||
if method == "fork":
|
||||
# If 'fork' is explicitly requested, warn user about potential issues.
|
||||
warnings.warn(
|
||||
"`fork` start method should not be used with "
|
||||
"`loky` as it does not respect POSIX. Try using "
|
||||
"`spawn` or `loky` instead.",
|
||||
UserWarning,
|
||||
)
|
||||
try:
|
||||
return mp_get_context(method)
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f"Unknown context '{method}'. Value should be in "
|
||||
f"{START_METHODS}."
|
||||
)
|
||||
|
||||
|
||||
def set_start_method(method, force=False):
|
||||
global _DEFAULT_START_METHOD
|
||||
if _DEFAULT_START_METHOD is not None and not force:
|
||||
raise RuntimeError("context has already been set")
|
||||
assert method is None or method in START_METHODS, (
|
||||
f"'{method}' is not a valid start_method. It should be in "
|
||||
f"{START_METHODS}"
|
||||
)
|
||||
|
||||
_DEFAULT_START_METHOD = method
|
||||
|
||||
|
||||
def get_start_method():
|
||||
return _DEFAULT_START_METHOD
|
||||
|
||||
|
||||
def cpu_count(only_physical_cores=False):
|
||||
"""Return the number of CPUs the current process can use.
|
||||
|
||||
The returned number of CPUs accounts for:
|
||||
* the number of CPUs in the system, as given by
|
||||
``multiprocessing.cpu_count``;
|
||||
* the CPU affinity settings of the current process
|
||||
(available on some Unix systems);
|
||||
* Cgroup CPU bandwidth limit (available on Linux only, typically
|
||||
set by docker and similar container orchestration systems);
|
||||
* the value of the LOKY_MAX_CPU_COUNT environment variable if defined.
|
||||
and is given as the minimum of these constraints.
|
||||
|
||||
If ``only_physical_cores`` is True, return the number of physical cores
|
||||
instead of the number of logical cores (hyperthreading / SMT). Note that
|
||||
this option is not enforced if the number of usable cores is controlled in
|
||||
any other way such as: process affinity, Cgroup restricted CPU bandwidth
|
||||
or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical
|
||||
cores is not found, return the number of logical cores.
|
||||
|
||||
Note that on Windows, the returned number of CPUs cannot exceed 61 (or 60 for
|
||||
Python < 3.10), see:
|
||||
https://bugs.python.org/issue26903.
|
||||
|
||||
It is also always larger or equal to 1.
|
||||
"""
|
||||
# Note: os.cpu_count() is allowed to return None in its docstring
|
||||
os_cpu_count = os.cpu_count() or 1
|
||||
if sys.platform == "win32":
|
||||
# On Windows, attempting to use more than 61 CPUs would result in a
|
||||
# OS-level error. See https://bugs.python.org/issue26903. According to
|
||||
# https://learn.microsoft.com/en-us/windows/win32/procthread/processor-groups
|
||||
# it might be possible to go beyond with a lot of extra work but this
|
||||
# does not look easy.
|
||||
os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS)
|
||||
|
||||
cpu_count_user = _cpu_count_user(os_cpu_count)
|
||||
aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1)
|
||||
|
||||
if not only_physical_cores:
|
||||
return aggregate_cpu_count
|
||||
|
||||
if cpu_count_user < os_cpu_count:
|
||||
# Respect user setting
|
||||
return max(cpu_count_user, 1)
|
||||
|
||||
cpu_count_physical, exception = _count_physical_cores()
|
||||
if cpu_count_physical != "not found":
|
||||
return cpu_count_physical
|
||||
|
||||
# Fallback to default behavior
|
||||
if exception is not None:
|
||||
# warns only the first time
|
||||
warnings.warn(
|
||||
"Could not find the number of physical cores for the "
|
||||
f"following reason:\n{exception}\n"
|
||||
"Returning the number of logical cores instead. You can "
|
||||
"silence this warning by setting LOKY_MAX_CPU_COUNT to "
|
||||
"the number of cores you want to use."
|
||||
)
|
||||
traceback.print_tb(exception.__traceback__)
|
||||
|
||||
return aggregate_cpu_count
|
||||
|
||||
|
||||
def _cpu_count_cgroup(os_cpu_count):
|
||||
# Cgroup CPU bandwidth limit available in Linux since 2.6 kernel
|
||||
cpu_max_fname = "/sys/fs/cgroup/cpu.max"
|
||||
cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
|
||||
cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
|
||||
if os.path.exists(cpu_max_fname):
|
||||
# cgroup v2
|
||||
# https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
|
||||
with open(cpu_max_fname) as fh:
|
||||
cpu_quota_us, cpu_period_us = fh.read().strip().split()
|
||||
elif os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname):
|
||||
# cgroup v1
|
||||
# https://www.kernel.org/doc/html/latest/scheduler/sched-bwc.html#management
|
||||
with open(cfs_quota_fname) as fh:
|
||||
cpu_quota_us = fh.read().strip()
|
||||
with open(cfs_period_fname) as fh:
|
||||
cpu_period_us = fh.read().strip()
|
||||
else:
|
||||
# No Cgroup CPU bandwidth limit (e.g. non-Linux platform)
|
||||
cpu_quota_us = "max"
|
||||
cpu_period_us = 100_000 # unused, for consistency with default values
|
||||
|
||||
if cpu_quota_us == "max":
|
||||
# No active Cgroup quota on a Cgroup-capable platform
|
||||
return os_cpu_count
|
||||
else:
|
||||
cpu_quota_us = int(cpu_quota_us)
|
||||
cpu_period_us = int(cpu_period_us)
|
||||
if cpu_quota_us > 0 and cpu_period_us > 0:
|
||||
return math.ceil(cpu_quota_us / cpu_period_us)
|
||||
else: # pragma: no cover
|
||||
# Setting a negative cpu_quota_us value is a valid way to disable
|
||||
# cgroup CPU bandwith limits
|
||||
return os_cpu_count
|
||||
|
||||
|
||||
def _cpu_count_affinity(os_cpu_count):
|
||||
# Number of available CPUs given affinity settings
|
||||
if hasattr(os, "sched_getaffinity"):
|
||||
try:
|
||||
return len(os.sched_getaffinity(0))
|
||||
except NotImplementedError:
|
||||
pass
|
||||
|
||||
# On some platforms, os.sched_getaffinity does not exist or raises
|
||||
# NotImplementedError, let's try with the psutil if installed.
|
||||
try:
|
||||
import psutil
|
||||
|
||||
p = psutil.Process()
|
||||
if hasattr(p, "cpu_affinity"):
|
||||
return len(p.cpu_affinity())
|
||||
|
||||
except ImportError: # pragma: no cover
|
||||
if (
|
||||
sys.platform == "linux"
|
||||
and os.environ.get("LOKY_MAX_CPU_COUNT") is None
|
||||
):
|
||||
# Some platforms don't implement os.sched_getaffinity on Linux which
|
||||
# can cause severe oversubscription problems. Better warn the
|
||||
# user in this particularly pathological case which can wreck
|
||||
# havoc, typically on CI workers.
|
||||
warnings.warn(
|
||||
"Failed to inspect CPU affinity constraints on this system. "
|
||||
"Please install psutil or explictly set LOKY_MAX_CPU_COUNT."
|
||||
)
|
||||
|
||||
# This can happen for platforms that do not implement any kind of CPU
|
||||
# infinity such as macOS-based platforms.
|
||||
return os_cpu_count
|
||||
|
||||
|
||||
def _cpu_count_user(os_cpu_count):
|
||||
"""Number of user defined available CPUs"""
|
||||
cpu_count_affinity = _cpu_count_affinity(os_cpu_count)
|
||||
|
||||
cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count)
|
||||
|
||||
# User defined soft-limit passed as a loky specific environment variable.
|
||||
cpu_count_loky = int(os.environ.get("LOKY_MAX_CPU_COUNT", os_cpu_count))
|
||||
|
||||
return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky)
|
||||
|
||||
|
||||
def _count_physical_cores():
|
||||
"""Return a tuple (number of physical cores, exception)
|
||||
|
||||
If the number of physical cores is found, exception is set to None.
|
||||
If it has not been found, return ("not found", exception).
|
||||
|
||||
The number of physical cores is cached to avoid repeating subprocess calls.
|
||||
"""
|
||||
exception = None
|
||||
|
||||
# First check if the value is cached
|
||||
global physical_cores_cache
|
||||
if physical_cores_cache is not None:
|
||||
return physical_cores_cache, exception
|
||||
|
||||
# Not cached yet, find it
|
||||
try:
|
||||
if sys.platform == "linux":
|
||||
cpu_count_physical = _count_physical_cores_linux()
|
||||
elif sys.platform == "win32":
|
||||
cpu_count_physical = _count_physical_cores_win32()
|
||||
elif sys.platform == "darwin":
|
||||
cpu_count_physical = _count_physical_cores_darwin()
|
||||
else:
|
||||
raise NotImplementedError(f"unsupported platform: {sys.platform}")
|
||||
|
||||
# if cpu_count_physical < 1, we did not find a valid value
|
||||
if cpu_count_physical < 1:
|
||||
raise ValueError(f"found {cpu_count_physical} physical cores < 1")
|
||||
|
||||
except Exception as e:
|
||||
exception = e
|
||||
cpu_count_physical = "not found"
|
||||
|
||||
# Put the result in cache
|
||||
physical_cores_cache = cpu_count_physical
|
||||
|
||||
return cpu_count_physical, exception
|
||||
|
||||
|
||||
def _count_physical_cores_linux():
|
||||
try:
|
||||
cpu_info = subprocess.run(
|
||||
"lscpu --parse=core".split(), capture_output=True, text=True
|
||||
)
|
||||
cpu_info = cpu_info.stdout.splitlines()
|
||||
cpu_info = {line for line in cpu_info if not line.startswith("#")}
|
||||
return len(cpu_info)
|
||||
except:
|
||||
pass # fallback to /proc/cpuinfo
|
||||
|
||||
cpu_info = subprocess.run(
|
||||
"cat /proc/cpuinfo".split(), capture_output=True, text=True
|
||||
)
|
||||
cpu_info = cpu_info.stdout.splitlines()
|
||||
cpu_info = {line for line in cpu_info if line.startswith("core id")}
|
||||
return len(cpu_info)
|
||||
|
||||
|
||||
def _count_physical_cores_win32():
|
||||
try:
|
||||
cmd = "-Command (Get-CimInstance -ClassName Win32_Processor).NumberOfCores"
|
||||
cpu_info = subprocess.run(
|
||||
f"powershell.exe {cmd}".split(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
cpu_info = cpu_info.stdout.splitlines()
|
||||
return int(cpu_info[0])
|
||||
except:
|
||||
pass # fallback to wmic (older Windows versions; deprecated now)
|
||||
|
||||
cpu_info = subprocess.run(
|
||||
"wmic CPU Get NumberOfCores /Format:csv".split(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
cpu_info = cpu_info.stdout.splitlines()
|
||||
cpu_info = [
|
||||
l.split(",")[1] for l in cpu_info if (l and l != "Node,NumberOfCores")
|
||||
]
|
||||
return sum(map(int, cpu_info))
|
||||
|
||||
|
||||
def _count_physical_cores_darwin():
|
||||
cpu_info = subprocess.run(
|
||||
"sysctl -n hw.physicalcpu".split(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
cpu_info = cpu_info.stdout
|
||||
return int(cpu_info)
|
||||
|
||||
|
||||
class LokyContext(BaseContext):
|
||||
"""Context relying on the LokyProcess."""
|
||||
|
||||
_name = "loky"
|
||||
Process = LokyProcess
|
||||
cpu_count = staticmethod(cpu_count)
|
||||
|
||||
def Queue(self, maxsize=0, reducers=None):
|
||||
"""Returns a queue object"""
|
||||
from .queues import Queue
|
||||
|
||||
return Queue(maxsize, reducers=reducers, ctx=self.get_context())
|
||||
|
||||
def SimpleQueue(self, reducers=None):
|
||||
"""Returns a queue object"""
|
||||
from .queues import SimpleQueue
|
||||
|
||||
return SimpleQueue(reducers=reducers, ctx=self.get_context())
|
||||
|
||||
if sys.platform != "win32":
|
||||
"""For Unix platform, use our custom implementation of synchronize
|
||||
ensuring that we use the loky.backend.resource_tracker to clean-up
|
||||
the semaphores in case of a worker crash.
|
||||
"""
|
||||
|
||||
def Semaphore(self, value=1):
|
||||
"""Returns a semaphore object"""
|
||||
from .synchronize import Semaphore
|
||||
|
||||
return Semaphore(value=value)
|
||||
|
||||
def BoundedSemaphore(self, value):
|
||||
"""Returns a bounded semaphore object"""
|
||||
from .synchronize import BoundedSemaphore
|
||||
|
||||
return BoundedSemaphore(value)
|
||||
|
||||
def Lock(self):
|
||||
"""Returns a lock object"""
|
||||
from .synchronize import Lock
|
||||
|
||||
return Lock()
|
||||
|
||||
def RLock(self):
|
||||
"""Returns a recurrent lock object"""
|
||||
from .synchronize import RLock
|
||||
|
||||
return RLock()
|
||||
|
||||
def Condition(self, lock=None):
|
||||
"""Returns a condition object"""
|
||||
from .synchronize import Condition
|
||||
|
||||
return Condition(lock)
|
||||
|
||||
def Event(self):
|
||||
"""Returns an event object"""
|
||||
from .synchronize import Event
|
||||
|
||||
return Event()
|
||||
|
||||
|
||||
class LokyInitMainContext(LokyContext):
|
||||
"""Extra context with LokyProcess, which does load the main module
|
||||
|
||||
This context is used for compatibility in the case ``cloudpickle`` is not
|
||||
present on the running system. This permits to load functions defined in
|
||||
the ``main`` module, using proper safeguards. The declaration of the
|
||||
``executor`` should be protected by ``if __name__ == "__main__":`` and the
|
||||
functions and variable used from main should be out of this block.
|
||||
|
||||
This mimics the default behavior of multiprocessing under Windows and the
|
||||
behavior of the ``spawn`` start method on a posix system.
|
||||
For more details, see the end of the following section of python doc
|
||||
https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
|
||||
"""
|
||||
|
||||
_name = "loky_init_main"
|
||||
Process = LokyInitMainProcess
|
||||
|
||||
|
||||
# Register loky context so it works with multiprocessing.get_context
|
||||
ctx_loky = LokyContext()
|
||||
mp.context._concrete_contexts["loky"] = ctx_loky
|
||||
mp.context._concrete_contexts["loky_init_main"] = LokyInitMainContext()
|
||||
73
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/fork_exec.py
vendored
Normal file
73
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/fork_exec.py
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
###############################################################################
|
||||
# Launch a subprocess using forkexec and make sure only the needed fd are
|
||||
# shared in the two process.
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
|
||||
def fork_exec(cmd, keep_fds, env=None):
|
||||
import _posixsubprocess
|
||||
|
||||
# Encoded command args as bytes:
|
||||
cmd = [os.fsencode(arg) for arg in cmd]
|
||||
|
||||
# Copy the environment variables to set in the child process (also encoded
|
||||
# as bytes).
|
||||
env = env or {}
|
||||
env = {**os.environ, **env}
|
||||
encoded_env = []
|
||||
for key, value in env.items():
|
||||
encoded_env.append(os.fsencode(f"{key}={value}"))
|
||||
|
||||
# Fds with fileno larger than 3 (stdin=0, stdout=1, stderr=2) are be closed
|
||||
# in the child process, except for those passed in keep_fds.
|
||||
keep_fds = tuple(sorted(map(int, keep_fds)))
|
||||
errpipe_read, errpipe_write = os.pipe()
|
||||
|
||||
if sys.version_info >= (3, 14):
|
||||
# Python >= 3.14 removed allow_vfork from _posixsubprocess.fork_exec,
|
||||
# see https://github.com/python/cpython/pull/121383
|
||||
pgid_to_set = [-1]
|
||||
allow_vfork = []
|
||||
elif sys.version_info >= (3, 11):
|
||||
# Python 3.11 - 3.13 has allow_vfork in _posixsubprocess.fork_exec
|
||||
pgid_to_set = [-1]
|
||||
allow_vfork = [subprocess._USE_VFORK]
|
||||
else:
|
||||
# Python < 3.11
|
||||
pgid_to_set = []
|
||||
allow_vfork = []
|
||||
|
||||
try:
|
||||
return _posixsubprocess.fork_exec(
|
||||
cmd, # args
|
||||
cmd[0:1], # executable_list
|
||||
True, # close_fds
|
||||
keep_fds, # pass_fds
|
||||
None, # cwd
|
||||
encoded_env, # env
|
||||
-1, # p2cread
|
||||
-1, # p2cwrite
|
||||
-1, # c2pread
|
||||
-1, # c2pwrite
|
||||
-1, # errread
|
||||
-1, # errwrite
|
||||
errpipe_read, # errpipe_read
|
||||
errpipe_write, # errpipe_write
|
||||
False, # restore_signal
|
||||
False, # call_setsid
|
||||
*pgid_to_set, # pgid_to_set
|
||||
None, # gid
|
||||
None, # extra_groups
|
||||
None, # uid
|
||||
-1, # child_umask
|
||||
None, # preexec_fn
|
||||
*allow_vfork, # extra flag if vfork is available
|
||||
)
|
||||
finally:
|
||||
os.close(errpipe_read)
|
||||
os.close(errpipe_write)
|
||||
193
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/popen_loky_posix.py
vendored
Normal file
193
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/popen_loky_posix.py
vendored
Normal file
@@ -0,0 +1,193 @@
|
||||
###############################################################################
|
||||
# Popen for LokyProcess.
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import pickle
|
||||
from io import BytesIO
|
||||
from multiprocessing import util, process
|
||||
from multiprocessing.connection import wait
|
||||
from multiprocessing.context import set_spawning_popen
|
||||
|
||||
from . import reduction, resource_tracker, spawn
|
||||
|
||||
|
||||
__all__ = ["Popen"]
|
||||
|
||||
|
||||
#
|
||||
# Wrapper for an fd used while launching a process
|
||||
#
|
||||
|
||||
|
||||
class _DupFd:
|
||||
def __init__(self, fd):
|
||||
self.fd = reduction._mk_inheritable(fd)
|
||||
|
||||
def detach(self):
|
||||
return self.fd
|
||||
|
||||
|
||||
#
|
||||
# Start child process using subprocess.Popen
|
||||
#
|
||||
|
||||
|
||||
class Popen:
|
||||
method = "loky"
|
||||
DupFd = _DupFd
|
||||
|
||||
def __init__(self, process_obj):
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
self.returncode = None
|
||||
self._fds = []
|
||||
self._launch(process_obj)
|
||||
|
||||
def duplicate_for_child(self, fd):
|
||||
self._fds.append(fd)
|
||||
return reduction._mk_inheritable(fd)
|
||||
|
||||
def poll(self, flag=os.WNOHANG):
|
||||
if self.returncode is None:
|
||||
while True:
|
||||
try:
|
||||
pid, sts = os.waitpid(self.pid, flag)
|
||||
except OSError:
|
||||
# Child process not yet created. See #1731717
|
||||
# e.errno == errno.ECHILD == 10
|
||||
return None
|
||||
else:
|
||||
break
|
||||
if pid == self.pid:
|
||||
if os.WIFSIGNALED(sts):
|
||||
self.returncode = -os.WTERMSIG(sts)
|
||||
else:
|
||||
assert os.WIFEXITED(sts)
|
||||
self.returncode = os.WEXITSTATUS(sts)
|
||||
return self.returncode
|
||||
|
||||
def wait(self, timeout=None):
|
||||
if self.returncode is None:
|
||||
if timeout is not None:
|
||||
if not wait([self.sentinel], timeout):
|
||||
return None
|
||||
# This shouldn't block if wait() returned successfully.
|
||||
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
|
||||
return self.returncode
|
||||
|
||||
def terminate(self):
|
||||
if self.returncode is None:
|
||||
try:
|
||||
os.kill(self.pid, signal.SIGTERM)
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
except OSError:
|
||||
if self.wait(timeout=0.1) is None:
|
||||
raise
|
||||
|
||||
def _launch(self, process_obj):
|
||||
|
||||
tracker_fd = resource_tracker._resource_tracker.getfd()
|
||||
|
||||
fp = BytesIO()
|
||||
set_spawning_popen(self)
|
||||
try:
|
||||
prep_data = spawn.get_preparation_data(
|
||||
process_obj._name,
|
||||
getattr(process_obj, "init_main_module", True),
|
||||
)
|
||||
reduction.dump(prep_data, fp)
|
||||
reduction.dump(process_obj, fp)
|
||||
|
||||
finally:
|
||||
set_spawning_popen(None)
|
||||
|
||||
try:
|
||||
parent_r, child_w = os.pipe()
|
||||
child_r, parent_w = os.pipe()
|
||||
# for fd in self._fds:
|
||||
# _mk_inheritable(fd)
|
||||
|
||||
cmd_python = [sys.executable]
|
||||
cmd_python += ["-m", self.__module__]
|
||||
cmd_python += ["--process-name", str(process_obj.name)]
|
||||
cmd_python += ["--pipe", str(reduction._mk_inheritable(child_r))]
|
||||
reduction._mk_inheritable(child_w)
|
||||
reduction._mk_inheritable(tracker_fd)
|
||||
self._fds += [child_r, child_w, tracker_fd]
|
||||
if os.name == "posix":
|
||||
mp_tracker_fd = prep_data["mp_tracker_fd"]
|
||||
self.duplicate_for_child(mp_tracker_fd)
|
||||
|
||||
from .fork_exec import fork_exec
|
||||
|
||||
pid = fork_exec(cmd_python, self._fds, env=process_obj.env)
|
||||
util.debug(
|
||||
f"launched python with pid {pid} and cmd:\n{cmd_python}"
|
||||
)
|
||||
self.sentinel = parent_r
|
||||
|
||||
method = "getbuffer"
|
||||
if not hasattr(fp, method):
|
||||
method = "getvalue"
|
||||
with os.fdopen(parent_w, "wb") as f:
|
||||
f.write(getattr(fp, method)())
|
||||
self.pid = pid
|
||||
finally:
|
||||
if parent_r is not None:
|
||||
util.Finalize(self, os.close, (parent_r,))
|
||||
for fd in (child_r, child_w):
|
||||
if fd is not None:
|
||||
os.close(fd)
|
||||
|
||||
@staticmethod
|
||||
def thread_is_spawning():
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser("Command line parser")
|
||||
parser.add_argument(
|
||||
"--pipe", type=int, required=True, help="File handle for the pipe"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--process-name",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Identifier for debugging purpose",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
info = {}
|
||||
exitcode = 1
|
||||
try:
|
||||
with os.fdopen(args.pipe, "rb") as from_parent:
|
||||
process.current_process()._inheriting = True
|
||||
try:
|
||||
prep_data = pickle.load(from_parent)
|
||||
spawn.prepare(prep_data)
|
||||
process_obj = pickle.load(from_parent)
|
||||
finally:
|
||||
del process.current_process()._inheriting
|
||||
|
||||
exitcode = process_obj._bootstrap()
|
||||
except Exception:
|
||||
print("\n\n" + "-" * 80)
|
||||
print(f"{args.process_name} failed with traceback: ")
|
||||
print("-" * 80)
|
||||
import traceback
|
||||
|
||||
print(traceback.format_exc())
|
||||
print("\n" + "-" * 80)
|
||||
finally:
|
||||
if from_parent is not None:
|
||||
from_parent.close()
|
||||
|
||||
sys.exit(exitcode)
|
||||
173
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/popen_loky_win32.py
vendored
Normal file
173
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/popen_loky_win32.py
vendored
Normal file
@@ -0,0 +1,173 @@
|
||||
import os
|
||||
import sys
|
||||
import msvcrt
|
||||
import _winapi
|
||||
from pickle import load
|
||||
from multiprocessing import process, util
|
||||
from multiprocessing.context import set_spawning_popen
|
||||
from multiprocessing.popen_spawn_win32 import Popen as _Popen
|
||||
|
||||
from . import reduction, spawn
|
||||
|
||||
|
||||
__all__ = ["Popen"]
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
def _path_eq(p1, p2):
|
||||
return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)
|
||||
|
||||
|
||||
WINENV = hasattr(sys, "_base_executable") and not _path_eq(
|
||||
sys.executable, sys._base_executable
|
||||
)
|
||||
|
||||
|
||||
def _close_handles(*handles):
|
||||
for handle in handles:
|
||||
_winapi.CloseHandle(handle)
|
||||
|
||||
|
||||
#
|
||||
# We define a Popen class similar to the one from subprocess, but
|
||||
# whose constructor takes a process object as its argument.
|
||||
#
|
||||
|
||||
|
||||
class Popen(_Popen):
|
||||
"""
|
||||
Start a subprocess to run the code of a process object.
|
||||
|
||||
We differ from cpython implementation with the way we handle environment
|
||||
variables, in order to be able to modify then in the child processes before
|
||||
importing any library, in order to control the number of threads in C-level
|
||||
threadpools.
|
||||
|
||||
We also use the loky preparation data, in particular to handle main_module
|
||||
inits and the loky resource tracker.
|
||||
"""
|
||||
|
||||
method = "loky"
|
||||
|
||||
def __init__(self, process_obj):
|
||||
prep_data = spawn.get_preparation_data(
|
||||
process_obj._name, getattr(process_obj, "init_main_module", True)
|
||||
)
|
||||
|
||||
# read end of pipe will be duplicated by the child process
|
||||
# -- see spawn_main() in spawn.py.
|
||||
#
|
||||
# bpo-33929: Previously, the read end of pipe was "stolen" by the child
|
||||
# process, but it leaked a handle if the child process had been
|
||||
# terminated before it could steal the handle from the parent process.
|
||||
rhandle, whandle = _winapi.CreatePipe(None, 0)
|
||||
wfd = msvcrt.open_osfhandle(whandle, 0)
|
||||
cmd = get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle)
|
||||
|
||||
python_exe = spawn.get_executable()
|
||||
|
||||
# copy the environment variables to set in the child process
|
||||
child_env = {**os.environ, **process_obj.env}
|
||||
|
||||
# bpo-35797: When running in a venv, we bypass the redirect
|
||||
# executor and launch our base Python.
|
||||
if WINENV and _path_eq(python_exe, sys.executable):
|
||||
cmd[0] = python_exe = sys._base_executable
|
||||
child_env["__PYVENV_LAUNCHER__"] = sys.executable
|
||||
|
||||
cmd = " ".join(f'"{x}"' for x in cmd)
|
||||
|
||||
with open(wfd, "wb") as to_child:
|
||||
# start process
|
||||
try:
|
||||
hp, ht, pid, _ = _winapi.CreateProcess(
|
||||
python_exe,
|
||||
cmd,
|
||||
None,
|
||||
None,
|
||||
False,
|
||||
0,
|
||||
child_env,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
_winapi.CloseHandle(ht)
|
||||
except BaseException:
|
||||
_winapi.CloseHandle(rhandle)
|
||||
raise
|
||||
|
||||
# set attributes of self
|
||||
self.pid = pid
|
||||
self.returncode = None
|
||||
self._handle = hp
|
||||
self.sentinel = int(hp)
|
||||
self.finalizer = util.Finalize(
|
||||
self, _close_handles, (self.sentinel, int(rhandle))
|
||||
)
|
||||
|
||||
# send information to child
|
||||
set_spawning_popen(self)
|
||||
try:
|
||||
reduction.dump(prep_data, to_child)
|
||||
reduction.dump(process_obj, to_child)
|
||||
finally:
|
||||
set_spawning_popen(None)
|
||||
|
||||
|
||||
def get_command_line(pipe_handle, parent_pid, **kwds):
|
||||
"""Returns prefix of command line used for spawning a child process."""
|
||||
if getattr(sys, "frozen", False):
|
||||
return [sys.executable, "--multiprocessing-fork", pipe_handle]
|
||||
else:
|
||||
prog = (
|
||||
"from joblib.externals.loky.backend.popen_loky_win32 import main; "
|
||||
f"main(pipe_handle={pipe_handle}, parent_pid={parent_pid})"
|
||||
)
|
||||
opts = util._args_from_interpreter_flags()
|
||||
return [
|
||||
spawn.get_executable(),
|
||||
*opts,
|
||||
"-c",
|
||||
prog,
|
||||
"--multiprocessing-fork",
|
||||
]
|
||||
|
||||
|
||||
def is_forking(argv):
|
||||
"""Return whether commandline indicates we are forking."""
|
||||
if len(argv) >= 2 and argv[1] == "--multiprocessing-fork":
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def main(pipe_handle, parent_pid=None):
|
||||
"""Run code specified by data received over pipe."""
|
||||
assert is_forking(sys.argv), "Not forking"
|
||||
|
||||
if parent_pid is not None:
|
||||
source_process = _winapi.OpenProcess(
|
||||
_winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid
|
||||
)
|
||||
else:
|
||||
source_process = None
|
||||
new_handle = reduction.duplicate(
|
||||
pipe_handle, source_process=source_process
|
||||
)
|
||||
fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
|
||||
parent_sentinel = source_process
|
||||
|
||||
with os.fdopen(fd, "rb", closefd=True) as from_parent:
|
||||
process.current_process()._inheriting = True
|
||||
try:
|
||||
preparation_data = load(from_parent)
|
||||
spawn.prepare(preparation_data, parent_sentinel)
|
||||
self = load(from_parent)
|
||||
finally:
|
||||
del process.current_process()._inheriting
|
||||
|
||||
exitcode = self._bootstrap(parent_sentinel)
|
||||
sys.exit(exitcode)
|
||||
85
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/process.py
vendored
Normal file
85
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/process.py
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
###############################################################################
|
||||
# LokyProcess implementation
|
||||
#
|
||||
# authors: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# based on multiprocessing/process.py (17/02/2017)
|
||||
#
|
||||
import sys
|
||||
from multiprocessing.context import assert_spawning
|
||||
from multiprocessing.process import BaseProcess
|
||||
|
||||
|
||||
class LokyProcess(BaseProcess):
|
||||
_start_method = "loky"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
group=None,
|
||||
target=None,
|
||||
name=None,
|
||||
args=(),
|
||||
kwargs={},
|
||||
daemon=None,
|
||||
init_main_module=False,
|
||||
env=None,
|
||||
):
|
||||
super().__init__(
|
||||
group=group,
|
||||
target=target,
|
||||
name=name,
|
||||
args=args,
|
||||
kwargs=kwargs,
|
||||
daemon=daemon,
|
||||
)
|
||||
self.env = {} if env is None else env
|
||||
self.authkey = self.authkey
|
||||
self.init_main_module = init_main_module
|
||||
|
||||
@staticmethod
|
||||
def _Popen(process_obj):
|
||||
if sys.platform == "win32":
|
||||
from .popen_loky_win32 import Popen
|
||||
else:
|
||||
from .popen_loky_posix import Popen
|
||||
return Popen(process_obj)
|
||||
|
||||
|
||||
class LokyInitMainProcess(LokyProcess):
|
||||
_start_method = "loky_init_main"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
group=None,
|
||||
target=None,
|
||||
name=None,
|
||||
args=(),
|
||||
kwargs={},
|
||||
daemon=None,
|
||||
):
|
||||
super().__init__(
|
||||
group=group,
|
||||
target=target,
|
||||
name=name,
|
||||
args=args,
|
||||
kwargs=kwargs,
|
||||
daemon=daemon,
|
||||
init_main_module=True,
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# We subclass bytes to avoid accidental transmission of auth keys over network
|
||||
#
|
||||
|
||||
|
||||
class AuthenticationKey(bytes):
|
||||
def __reduce__(self):
|
||||
try:
|
||||
assert_spawning(self)
|
||||
except RuntimeError:
|
||||
raise TypeError(
|
||||
"Pickling an AuthenticationKey object is "
|
||||
"disallowed for security reasons"
|
||||
)
|
||||
return AuthenticationKey, (bytes(self),)
|
||||
236
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/queues.py
vendored
Normal file
236
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/queues.py
vendored
Normal file
@@ -0,0 +1,236 @@
|
||||
###############################################################################
|
||||
# Queue and SimpleQueue implementation for loky
|
||||
#
|
||||
# authors: Thomas Moreau, Olivier Grisel
|
||||
#
|
||||
# based on multiprocessing/queues.py (16/02/2017)
|
||||
# * Add some custom reducers for the Queues/SimpleQueue to tweak the
|
||||
# pickling process. (overload Queue._feed/SimpleQueue.put)
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import errno
|
||||
import weakref
|
||||
import threading
|
||||
from multiprocessing import util
|
||||
from multiprocessing.queues import (
|
||||
Full,
|
||||
Queue as mp_Queue,
|
||||
SimpleQueue as mp_SimpleQueue,
|
||||
_sentinel,
|
||||
)
|
||||
from multiprocessing.context import assert_spawning
|
||||
|
||||
from .reduction import dumps
|
||||
|
||||
|
||||
__all__ = ["Queue", "SimpleQueue", "Full"]
|
||||
|
||||
|
||||
class Queue(mp_Queue):
|
||||
def __init__(self, maxsize=0, reducers=None, ctx=None):
|
||||
super().__init__(maxsize=maxsize, ctx=ctx)
|
||||
self._reducers = reducers
|
||||
|
||||
# Use custom queue set/get state to be able to reduce the custom reducers
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (
|
||||
self._ignore_epipe,
|
||||
self._maxsize,
|
||||
self._reader,
|
||||
self._writer,
|
||||
self._reducers,
|
||||
self._rlock,
|
||||
self._wlock,
|
||||
self._sem,
|
||||
self._opid,
|
||||
)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(
|
||||
self._ignore_epipe,
|
||||
self._maxsize,
|
||||
self._reader,
|
||||
self._writer,
|
||||
self._reducers,
|
||||
self._rlock,
|
||||
self._wlock,
|
||||
self._sem,
|
||||
self._opid,
|
||||
) = state
|
||||
if sys.version_info >= (3, 9):
|
||||
self._reset()
|
||||
else:
|
||||
self._after_fork()
|
||||
|
||||
# Overload _start_thread to correctly call our custom _feed
|
||||
def _start_thread(self):
|
||||
util.debug("Queue._start_thread()")
|
||||
|
||||
# Start thread which transfers data from buffer to pipe
|
||||
self._buffer.clear()
|
||||
self._thread = threading.Thread(
|
||||
target=Queue._feed,
|
||||
args=(
|
||||
self._buffer,
|
||||
self._notempty,
|
||||
self._send_bytes,
|
||||
self._wlock,
|
||||
self._writer.close,
|
||||
self._reducers,
|
||||
self._ignore_epipe,
|
||||
self._on_queue_feeder_error,
|
||||
self._sem,
|
||||
),
|
||||
name="QueueFeederThread",
|
||||
)
|
||||
self._thread.daemon = True
|
||||
|
||||
util.debug("doing self._thread.start()")
|
||||
self._thread.start()
|
||||
util.debug("... done self._thread.start()")
|
||||
|
||||
# On process exit we will wait for data to be flushed to pipe.
|
||||
#
|
||||
# However, if this process created the queue then all
|
||||
# processes which use the queue will be descendants of this
|
||||
# process. Therefore waiting for the queue to be flushed
|
||||
# is pointless once all the child processes have been joined.
|
||||
created_by_this_process = self._opid == os.getpid()
|
||||
if not self._joincancelled and not created_by_this_process:
|
||||
self._jointhread = util.Finalize(
|
||||
self._thread,
|
||||
Queue._finalize_join,
|
||||
[weakref.ref(self._thread)],
|
||||
exitpriority=-5,
|
||||
)
|
||||
|
||||
# Send sentinel to the thread queue object when garbage collected
|
||||
self._close = util.Finalize(
|
||||
self,
|
||||
Queue._finalize_close,
|
||||
[self._buffer, self._notempty],
|
||||
exitpriority=10,
|
||||
)
|
||||
|
||||
# Overload the _feed methods to use our custom pickling strategy.
|
||||
@staticmethod
|
||||
def _feed(
|
||||
buffer,
|
||||
notempty,
|
||||
send_bytes,
|
||||
writelock,
|
||||
close,
|
||||
reducers,
|
||||
ignore_epipe,
|
||||
onerror,
|
||||
queue_sem,
|
||||
):
|
||||
util.debug("starting thread to feed data to pipe")
|
||||
nacquire = notempty.acquire
|
||||
nrelease = notempty.release
|
||||
nwait = notempty.wait
|
||||
bpopleft = buffer.popleft
|
||||
sentinel = _sentinel
|
||||
if sys.platform != "win32":
|
||||
wacquire = writelock.acquire
|
||||
wrelease = writelock.release
|
||||
else:
|
||||
wacquire = None
|
||||
|
||||
while True:
|
||||
try:
|
||||
nacquire()
|
||||
try:
|
||||
if not buffer:
|
||||
nwait()
|
||||
finally:
|
||||
nrelease()
|
||||
try:
|
||||
while True:
|
||||
obj = bpopleft()
|
||||
if obj is sentinel:
|
||||
util.debug("feeder thread got sentinel -- exiting")
|
||||
close()
|
||||
return
|
||||
|
||||
# serialize the data before acquiring the lock
|
||||
obj_ = dumps(obj, reducers=reducers)
|
||||
if wacquire is None:
|
||||
send_bytes(obj_)
|
||||
else:
|
||||
wacquire()
|
||||
try:
|
||||
send_bytes(obj_)
|
||||
finally:
|
||||
wrelease()
|
||||
# Remove references early to avoid leaking memory
|
||||
del obj, obj_
|
||||
except IndexError:
|
||||
pass
|
||||
except BaseException as e:
|
||||
if ignore_epipe and getattr(e, "errno", 0) == errno.EPIPE:
|
||||
return
|
||||
# Since this runs in a daemon thread the resources it uses
|
||||
# may be become unusable while the process is cleaning up.
|
||||
# We ignore errors which happen after the process has
|
||||
# started to cleanup.
|
||||
if util.is_exiting():
|
||||
util.info(f"error in queue thread: {e}")
|
||||
return
|
||||
else:
|
||||
queue_sem.release()
|
||||
onerror(e, obj)
|
||||
|
||||
def _on_queue_feeder_error(self, e, obj):
|
||||
"""
|
||||
Private API hook called when feeding data in the background thread
|
||||
raises an exception. For overriding by concurrent.futures.
|
||||
"""
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
class SimpleQueue(mp_SimpleQueue):
|
||||
def __init__(self, reducers=None, ctx=None):
|
||||
super().__init__(ctx=ctx)
|
||||
|
||||
# Add possiblity to use custom reducers
|
||||
self._reducers = reducers
|
||||
|
||||
def close(self):
|
||||
self._reader.close()
|
||||
self._writer.close()
|
||||
|
||||
# Use custom queue set/get state to be able to reduce the custom reducers
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (
|
||||
self._reader,
|
||||
self._writer,
|
||||
self._reducers,
|
||||
self._rlock,
|
||||
self._wlock,
|
||||
)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(
|
||||
self._reader,
|
||||
self._writer,
|
||||
self._reducers,
|
||||
self._rlock,
|
||||
self._wlock,
|
||||
) = state
|
||||
|
||||
# Overload put to use our customizable reducer
|
||||
def put(self, obj):
|
||||
# serialize the data before acquiring the lock
|
||||
obj = dumps(obj, reducers=self._reducers)
|
||||
if self._wlock is None:
|
||||
# writes to a message oriented win32 pipe are atomic
|
||||
self._writer.send_bytes(obj)
|
||||
else:
|
||||
with self._wlock:
|
||||
self._writer.send_bytes(obj)
|
||||
223
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/reduction.py
vendored
Normal file
223
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/reduction.py
vendored
Normal file
@@ -0,0 +1,223 @@
|
||||
###############################################################################
|
||||
# Customizable Pickler with some basic reducers
|
||||
#
|
||||
# author: Thomas Moreau
|
||||
#
|
||||
# adapted from multiprocessing/reduction.py (17/02/2017)
|
||||
# * Replace the ForkingPickler with a similar _LokyPickler,
|
||||
# * Add CustomizableLokyPickler to allow customizing pickling process
|
||||
# on the fly.
|
||||
#
|
||||
import copyreg
|
||||
import io
|
||||
import functools
|
||||
import types
|
||||
import sys
|
||||
import os
|
||||
|
||||
from multiprocessing import util
|
||||
from pickle import loads, HIGHEST_PROTOCOL
|
||||
|
||||
###############################################################################
|
||||
# Enable custom pickling in Loky.
|
||||
|
||||
_dispatch_table = {}
|
||||
|
||||
|
||||
def register(type_, reduce_function):
|
||||
_dispatch_table[type_] = reduce_function
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Registers extra pickling routines to improve picklization for loky
|
||||
|
||||
|
||||
# make methods picklable
|
||||
def _reduce_method(m):
|
||||
if m.__self__ is None:
|
||||
return getattr, (m.__class__, m.__func__.__name__)
|
||||
else:
|
||||
return getattr, (m.__self__, m.__func__.__name__)
|
||||
|
||||
|
||||
class _C:
|
||||
def f(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def h(cls):
|
||||
pass
|
||||
|
||||
|
||||
register(type(_C().f), _reduce_method)
|
||||
register(type(_C.h), _reduce_method)
|
||||
|
||||
|
||||
def _reduce_method_descriptor(m):
|
||||
return getattr, (m.__objclass__, m.__name__)
|
||||
|
||||
|
||||
register(type(list.append), _reduce_method_descriptor)
|
||||
register(type(int.__add__), _reduce_method_descriptor)
|
||||
|
||||
|
||||
# Make partial func pickable
|
||||
def _reduce_partial(p):
|
||||
return _rebuild_partial, (p.func, p.args, p.keywords or {})
|
||||
|
||||
|
||||
def _rebuild_partial(func, args, keywords):
|
||||
return functools.partial(func, *args, **keywords)
|
||||
|
||||
|
||||
register(functools.partial, _reduce_partial)
|
||||
|
||||
if sys.platform != "win32":
|
||||
from ._posix_reduction import _mk_inheritable # noqa: F401
|
||||
else:
|
||||
from . import _win_reduction # noqa: F401
|
||||
|
||||
# global variable to change the pickler behavior
|
||||
try:
|
||||
from joblib.externals import cloudpickle # noqa: F401
|
||||
|
||||
DEFAULT_ENV = "cloudpickle"
|
||||
except ImportError:
|
||||
# If cloudpickle is not present, fallback to pickle
|
||||
DEFAULT_ENV = "pickle"
|
||||
|
||||
ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV)
|
||||
_LokyPickler = None
|
||||
_loky_pickler_name = None
|
||||
|
||||
|
||||
def set_loky_pickler(loky_pickler=None):
|
||||
global _LokyPickler, _loky_pickler_name
|
||||
|
||||
if loky_pickler is None:
|
||||
loky_pickler = ENV_LOKY_PICKLER
|
||||
|
||||
loky_pickler_cls = None
|
||||
|
||||
# The default loky_pickler is cloudpickle
|
||||
if loky_pickler in ["", None]:
|
||||
loky_pickler = "cloudpickle"
|
||||
|
||||
if loky_pickler == _loky_pickler_name:
|
||||
return
|
||||
|
||||
if loky_pickler == "cloudpickle":
|
||||
from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls
|
||||
else:
|
||||
try:
|
||||
from importlib import import_module
|
||||
|
||||
module_pickle = import_module(loky_pickler)
|
||||
loky_pickler_cls = module_pickle.Pickler
|
||||
except (ImportError, AttributeError) as e:
|
||||
extra_info = (
|
||||
"\nThis error occurred while setting loky_pickler to"
|
||||
f" '{loky_pickler}', as required by the env variable "
|
||||
"LOKY_PICKLER or the function set_loky_pickler."
|
||||
)
|
||||
e.args = (e.args[0] + extra_info,) + e.args[1:]
|
||||
e.msg = e.args[0]
|
||||
raise e
|
||||
|
||||
util.debug(
|
||||
f"Using '{loky_pickler if loky_pickler else 'cloudpickle'}' for "
|
||||
"serialization."
|
||||
)
|
||||
|
||||
class CustomizablePickler(loky_pickler_cls):
|
||||
_loky_pickler_cls = loky_pickler_cls
|
||||
|
||||
def _set_dispatch_table(self, dispatch_table):
|
||||
for ancestor_class in self._loky_pickler_cls.mro():
|
||||
dt_attribute = getattr(ancestor_class, "dispatch_table", None)
|
||||
if isinstance(dt_attribute, types.MemberDescriptorType):
|
||||
# Ancestor class (typically _pickle.Pickler) has a
|
||||
# member_descriptor for its "dispatch_table" attribute. Use
|
||||
# it to set the dispatch_table as a member instead of a
|
||||
# dynamic attribute in the __dict__ of the instance,
|
||||
# otherwise it will not be taken into account by the C
|
||||
# implementation of the dump method if a subclass defines a
|
||||
# class-level dispatch_table attribute as was done in
|
||||
# cloudpickle 1.6.0:
|
||||
# https://github.com/joblib/loky/pull/260
|
||||
dt_attribute.__set__(self, dispatch_table)
|
||||
break
|
||||
|
||||
# On top of member descriptor set, also use setattr such that code
|
||||
# that directly access self.dispatch_table gets a consistent view
|
||||
# of the same table.
|
||||
self.dispatch_table = dispatch_table
|
||||
|
||||
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
|
||||
loky_pickler_cls.__init__(self, writer, protocol=protocol)
|
||||
if reducers is None:
|
||||
reducers = {}
|
||||
|
||||
if hasattr(self, "dispatch_table"):
|
||||
# Force a copy that we will update without mutating the
|
||||
# any class level defined dispatch_table.
|
||||
loky_dt = dict(self.dispatch_table)
|
||||
else:
|
||||
# Use standard reducers as bases
|
||||
loky_dt = copyreg.dispatch_table.copy()
|
||||
|
||||
# Register loky specific reducers
|
||||
loky_dt.update(_dispatch_table)
|
||||
|
||||
# Set the new dispatch table, taking care of the fact that we
|
||||
# need to use the member_descriptor when we inherit from a
|
||||
# subclass of the C implementation of the Pickler base class
|
||||
# with an class level dispatch_table attribute.
|
||||
self._set_dispatch_table(loky_dt)
|
||||
|
||||
# Register the reducers
|
||||
for type, reduce_func in reducers.items():
|
||||
self.register(type, reduce_func)
|
||||
|
||||
def register(self, type, reduce_func):
|
||||
"""Attach a reducer function to a given type in the dispatch table."""
|
||||
self.dispatch_table[type] = reduce_func
|
||||
|
||||
_LokyPickler = CustomizablePickler
|
||||
_loky_pickler_name = loky_pickler
|
||||
|
||||
|
||||
def get_loky_pickler_name():
|
||||
global _loky_pickler_name
|
||||
return _loky_pickler_name
|
||||
|
||||
|
||||
def get_loky_pickler():
|
||||
global _LokyPickler
|
||||
return _LokyPickler
|
||||
|
||||
|
||||
# Set it to its default value
|
||||
set_loky_pickler()
|
||||
|
||||
|
||||
def dump(obj, file, reducers=None, protocol=None):
|
||||
"""Replacement for pickle.dump() using _LokyPickler."""
|
||||
global _LokyPickler
|
||||
_LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
|
||||
|
||||
|
||||
def dumps(obj, reducers=None, protocol=None):
|
||||
global _LokyPickler
|
||||
|
||||
buf = io.BytesIO()
|
||||
dump(obj, buf, reducers=reducers, protocol=protocol)
|
||||
return buf.getbuffer()
|
||||
|
||||
|
||||
__all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"]
|
||||
|
||||
if sys.platform == "win32":
|
||||
from multiprocessing.reduction import duplicate
|
||||
|
||||
__all__ += ["duplicate"]
|
||||
411
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/resource_tracker.py
vendored
Normal file
411
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/resource_tracker.py
vendored
Normal file
@@ -0,0 +1,411 @@
|
||||
###############################################################################
|
||||
# Server process to keep track of unlinked resources, like folders and
|
||||
# semaphores and clean them.
|
||||
#
|
||||
# author: Thomas Moreau
|
||||
#
|
||||
# Adapted from multiprocessing/resource_tracker.py
|
||||
# * add some VERBOSE logging,
|
||||
# * add support to track folders,
|
||||
# * add Windows support,
|
||||
# * refcounting scheme to avoid unlinking resources still in use.
|
||||
#
|
||||
# On Unix we run a server process which keeps track of unlinked
|
||||
# resources. The server ignores SIGINT and SIGTERM and reads from a
|
||||
# pipe. The resource_tracker implements a reference counting scheme: each time
|
||||
# a Python process anticipates the shared usage of a resource by another
|
||||
# process, it signals the resource_tracker of this shared usage, and in return,
|
||||
# the resource_tracker increments the resource's reference count by 1.
|
||||
# Similarly, when access to a resource is closed by a Python process, the
|
||||
# process notifies the resource_tracker by asking it to decrement the
|
||||
# resource's reference count by 1. When the reference count drops to 0, the
|
||||
# resource_tracker attempts to clean up the underlying resource.
|
||||
|
||||
# Finally, every other process connected to the resource tracker has a copy of
|
||||
# the writable end of the pipe used to communicate with it, so the resource
|
||||
# tracker gets EOF when all other processes have exited. Then the
|
||||
# resource_tracker process unlinks any remaining leaked resources (with
|
||||
# reference count above 0)
|
||||
|
||||
# For semaphores, this is important because the system only supports a limited
|
||||
# number of named semaphores, and they will not be automatically removed till
|
||||
# the next reboot. Without this resource tracker process, "killall python"
|
||||
# would probably leave unlinked semaphores.
|
||||
|
||||
# Note that this behavior differs from CPython's resource_tracker, which only
|
||||
# implements list of shared resources, and not a proper refcounting scheme.
|
||||
# Also, CPython's resource tracker will only attempt to cleanup those shared
|
||||
# resources once all processes connected to the resource tracker have exited.
|
||||
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import signal
|
||||
import warnings
|
||||
from multiprocessing import util
|
||||
from multiprocessing.resource_tracker import (
|
||||
ResourceTracker as _ResourceTracker,
|
||||
)
|
||||
|
||||
from . import spawn
|
||||
|
||||
if sys.platform == "win32":
|
||||
import _winapi
|
||||
import msvcrt
|
||||
from multiprocessing.reduction import duplicate
|
||||
|
||||
|
||||
__all__ = ["ensure_running", "register", "unregister"]
|
||||
|
||||
_HAVE_SIGMASK = hasattr(signal, "pthread_sigmask")
|
||||
_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
|
||||
|
||||
|
||||
def cleanup_noop(name):
|
||||
raise RuntimeError("noop should never be registered or cleaned up")
|
||||
|
||||
|
||||
_CLEANUP_FUNCS = {
|
||||
"noop": cleanup_noop,
|
||||
"folder": shutil.rmtree,
|
||||
"file": os.unlink,
|
||||
}
|
||||
|
||||
if os.name == "posix":
|
||||
import _multiprocessing
|
||||
|
||||
# Use sem_unlink() to clean up named semaphores.
|
||||
#
|
||||
# sem_unlink() may be missing if the Python build process detected the
|
||||
# absence of POSIX named semaphores. In that case, no named semaphores were
|
||||
# ever opened, so no cleanup would be necessary.
|
||||
if hasattr(_multiprocessing, "sem_unlink"):
|
||||
_CLEANUP_FUNCS.update(
|
||||
{
|
||||
"semlock": _multiprocessing.sem_unlink,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
VERBOSE = False
|
||||
|
||||
|
||||
class ResourceTracker(_ResourceTracker):
|
||||
"""Resource tracker with refcounting scheme.
|
||||
|
||||
This class is an extension of the multiprocessing ResourceTracker class
|
||||
which implements a reference counting scheme to avoid unlinking shared
|
||||
resources still in use in other processes.
|
||||
|
||||
This feature is notably used by `joblib.Parallel` to share temporary
|
||||
folders and memory mapped files between the main process and the worker
|
||||
processes.
|
||||
|
||||
The actual implementation of the refcounting scheme is in the main
|
||||
function, which is run in a dedicated process.
|
||||
"""
|
||||
|
||||
def maybe_unlink(self, name, rtype):
|
||||
"""Decrement the refcount of a resource, and delete it if it hits 0"""
|
||||
self._send("MAYBE_UNLINK", name, rtype)
|
||||
|
||||
def ensure_running(self):
|
||||
"""Make sure that resource tracker process is running.
|
||||
|
||||
This can be run from any process. Usually a child process will use
|
||||
the resource created by its parent.
|
||||
|
||||
This function is necessary for backward compatibility with python
|
||||
versions before 3.13.7.
|
||||
"""
|
||||
return self._ensure_running_and_write()
|
||||
|
||||
def _teardown_dead_process(self):
|
||||
# Override this function for compatibility with windows and
|
||||
# for python version before 3.13.7
|
||||
|
||||
# At this point, the resource_tracker process has been killed
|
||||
# or crashed.
|
||||
os.close(self._fd)
|
||||
|
||||
# Let's remove the process entry from the process table on POSIX system
|
||||
# to avoid zombie processes.
|
||||
if os.name == "posix":
|
||||
try:
|
||||
# _pid can be None if this process is a child from another
|
||||
# python process, which has started the resource_tracker.
|
||||
if self._pid is not None:
|
||||
os.waitpid(self._pid, 0)
|
||||
except OSError:
|
||||
# The resource_tracker has already been terminated.
|
||||
pass
|
||||
self._fd = None
|
||||
self._pid = None
|
||||
|
||||
warnings.warn(
|
||||
"resource_tracker: process died unexpectedly, relaunching. "
|
||||
"Some folders/semaphores might leak."
|
||||
)
|
||||
|
||||
def _launch(self):
|
||||
# This is the overridden part of the resource tracker, which launches
|
||||
# loky's version, which is compatible with windows and allow to track
|
||||
# folders with external ref counting.
|
||||
|
||||
fds_to_pass = []
|
||||
try:
|
||||
fds_to_pass.append(sys.stderr.fileno())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Create a pipe for posix and windows
|
||||
r, w = os.pipe()
|
||||
if sys.platform == "win32":
|
||||
_r = duplicate(msvcrt.get_osfhandle(r), inheritable=True)
|
||||
os.close(r)
|
||||
r = _r
|
||||
|
||||
cmd = f"from {main.__module__} import main; main({r}, {VERBOSE})"
|
||||
try:
|
||||
fds_to_pass.append(r)
|
||||
# process will out live us, so no need to wait on pid
|
||||
exe = spawn.get_executable()
|
||||
args = [exe, *util._args_from_interpreter_flags(), "-c", cmd]
|
||||
util.debug(f"launching resource tracker: {args}")
|
||||
# bpo-33613: Register a signal mask that will block the
|
||||
# signals. This signal mask will be inherited by the child
|
||||
# that is going to be spawned and will protect the child from a
|
||||
# race condition that can make the child die before it
|
||||
# registers signal handlers for SIGINT and SIGTERM. The mask is
|
||||
# unregistered after spawning the child.
|
||||
try:
|
||||
if _HAVE_SIGMASK:
|
||||
signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS)
|
||||
pid = spawnv_passfds(exe, args, fds_to_pass)
|
||||
finally:
|
||||
if _HAVE_SIGMASK:
|
||||
signal.pthread_sigmask(
|
||||
signal.SIG_UNBLOCK, _IGNORED_SIGNALS
|
||||
)
|
||||
except BaseException:
|
||||
os.close(w)
|
||||
raise
|
||||
else:
|
||||
self._fd = w
|
||||
self._pid = pid
|
||||
finally:
|
||||
if sys.platform == "win32":
|
||||
_winapi.CloseHandle(r)
|
||||
else:
|
||||
os.close(r)
|
||||
|
||||
def _ensure_running_and_write(self, msg=None):
|
||||
"""Make sure that resource tracker process is running.
|
||||
|
||||
This can be run from any process. Usually a child process will use
|
||||
the resource created by its parent.
|
||||
|
||||
|
||||
This function is added for compatibility with python version before 3.13.7.
|
||||
"""
|
||||
with self._lock:
|
||||
if (
|
||||
self._fd is not None
|
||||
): # resource tracker was launched before, is it still running?
|
||||
if msg is None:
|
||||
to_send = b"PROBE:0:noop\n"
|
||||
else:
|
||||
to_send = msg
|
||||
try:
|
||||
self._write(to_send)
|
||||
except OSError:
|
||||
self._teardown_dead_process()
|
||||
self._launch()
|
||||
|
||||
msg = None # message was sent in probe
|
||||
else:
|
||||
self._launch()
|
||||
|
||||
if msg is not None:
|
||||
self._write(msg)
|
||||
|
||||
def _write(self, msg):
|
||||
nbytes = os.write(self._fd, msg)
|
||||
assert nbytes == len(msg), f"{nbytes=} != {len(msg)=}"
|
||||
|
||||
def __del__(self):
|
||||
# ignore error due to trying to clean up child process which has already been
|
||||
# shutdown on windows. See https://github.com/joblib/loky/pull/450
|
||||
# This is only required if __del__ is defined
|
||||
if not hasattr(_ResourceTracker, "__del__"):
|
||||
return
|
||||
try:
|
||||
super().__del__()
|
||||
except ChildProcessError:
|
||||
pass
|
||||
|
||||
|
||||
_resource_tracker = ResourceTracker()
|
||||
ensure_running = _resource_tracker.ensure_running
|
||||
register = _resource_tracker.register
|
||||
maybe_unlink = _resource_tracker.maybe_unlink
|
||||
unregister = _resource_tracker.unregister
|
||||
getfd = _resource_tracker.getfd
|
||||
|
||||
|
||||
def main(fd, verbose=0):
|
||||
"""Run resource tracker."""
|
||||
if verbose:
|
||||
util.log_to_stderr(level=util.DEBUG)
|
||||
|
||||
# protect the process from ^C and "killall python" etc
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
|
||||
if _HAVE_SIGMASK:
|
||||
signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
|
||||
|
||||
for f in (sys.stdin, sys.stdout):
|
||||
try:
|
||||
f.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if verbose:
|
||||
util.debug("Main resource tracker is running")
|
||||
|
||||
registry = {rtype: {} for rtype in _CLEANUP_FUNCS.keys()}
|
||||
|
||||
try:
|
||||
if sys.platform == "win32":
|
||||
fd = msvcrt.open_osfhandle(fd, os.O_RDONLY)
|
||||
# keep track of registered/unregistered resources
|
||||
with open(fd, "rb") as f:
|
||||
for line in f:
|
||||
try:
|
||||
splitted = line.strip().decode("ascii").split(":")
|
||||
# name can potentially contain separator symbols (for
|
||||
# instance folders on Windows)
|
||||
cmd, name, rtype = (
|
||||
splitted[0],
|
||||
":".join(splitted[1:-1]),
|
||||
splitted[-1],
|
||||
)
|
||||
|
||||
if rtype not in _CLEANUP_FUNCS:
|
||||
raise ValueError(
|
||||
f"Cannot register {name} for automatic cleanup: "
|
||||
f"unknown resource type ({rtype}). Resource type "
|
||||
"should be one of the following: "
|
||||
f"{list(_CLEANUP_FUNCS.keys())}"
|
||||
)
|
||||
|
||||
if cmd == "PROBE":
|
||||
pass
|
||||
elif cmd == "REGISTER":
|
||||
if name not in registry[rtype]:
|
||||
registry[rtype][name] = 1
|
||||
else:
|
||||
registry[rtype][name] += 1
|
||||
|
||||
if verbose:
|
||||
util.debug(
|
||||
"[ResourceTracker] incremented refcount of "
|
||||
f"{rtype} {name} "
|
||||
f"(current {registry[rtype][name]})"
|
||||
)
|
||||
elif cmd == "UNREGISTER":
|
||||
del registry[rtype][name]
|
||||
if verbose:
|
||||
util.debug(
|
||||
f"[ResourceTracker] unregister {name} {rtype}: "
|
||||
f"registry({len(registry)})"
|
||||
)
|
||||
elif cmd == "MAYBE_UNLINK":
|
||||
registry[rtype][name] -= 1
|
||||
if verbose:
|
||||
util.debug(
|
||||
"[ResourceTracker] decremented refcount of "
|
||||
f"{rtype} {name} "
|
||||
f"(current {registry[rtype][name]})"
|
||||
)
|
||||
|
||||
if registry[rtype][name] == 0:
|
||||
del registry[rtype][name]
|
||||
try:
|
||||
if verbose:
|
||||
util.debug(
|
||||
f"[ResourceTracker] unlink {name}"
|
||||
)
|
||||
_CLEANUP_FUNCS[rtype](name)
|
||||
except Exception as e:
|
||||
warnings.warn(
|
||||
f"resource_tracker: {name}: {e!r}"
|
||||
)
|
||||
|
||||
else:
|
||||
raise RuntimeError(f"unrecognized command {cmd!r}")
|
||||
except BaseException:
|
||||
try:
|
||||
sys.excepthook(*sys.exc_info())
|
||||
except BaseException:
|
||||
pass
|
||||
finally:
|
||||
# all processes have terminated; cleanup any remaining resources
|
||||
def _unlink_resources(rtype_registry, rtype):
|
||||
if rtype_registry:
|
||||
try:
|
||||
warnings.warn(
|
||||
"resource_tracker: There appear to be "
|
||||
f"{len(rtype_registry)} leaked {rtype} objects to "
|
||||
"clean up at shutdown"
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
for name in rtype_registry:
|
||||
# For some reason the process which created and registered this
|
||||
# resource has failed to unregister it. Presumably it has
|
||||
# died. We therefore clean it up.
|
||||
try:
|
||||
_CLEANUP_FUNCS[rtype](name)
|
||||
if verbose:
|
||||
util.debug(f"[ResourceTracker] unlink {name}")
|
||||
except Exception as e:
|
||||
warnings.warn(f"resource_tracker: {name}: {e!r}")
|
||||
|
||||
for rtype, rtype_registry in registry.items():
|
||||
if rtype == "folder":
|
||||
continue
|
||||
else:
|
||||
_unlink_resources(rtype_registry, rtype)
|
||||
|
||||
# The default cleanup routine for folders deletes everything inside
|
||||
# those folders recursively, which can include other resources tracked
|
||||
# by the resource tracker). To limit the risk of the resource tracker
|
||||
# attempting to delete twice a resource (once as part of a tracked
|
||||
# folder, and once as a resource), we delete the folders after all
|
||||
# other resource types.
|
||||
if "folder" in registry:
|
||||
_unlink_resources(registry["folder"], "folder")
|
||||
|
||||
if verbose:
|
||||
util.debug("resource tracker shut down")
|
||||
|
||||
|
||||
def spawnv_passfds(path, args, passfds):
|
||||
if sys.platform != "win32":
|
||||
args = [arg.encode("utf-8") for arg in args]
|
||||
path = path.encode("utf-8")
|
||||
return util.spawnv_passfds(path, args, passfds)
|
||||
else:
|
||||
passfds = sorted(passfds)
|
||||
cmd = " ".join(f'"{x}"' for x in args)
|
||||
try:
|
||||
_, ht, pid, _ = _winapi.CreateProcess(
|
||||
path, cmd, None, None, True, 0, None, None, None
|
||||
)
|
||||
_winapi.CloseHandle(ht)
|
||||
except BaseException:
|
||||
pass
|
||||
return pid
|
||||
244
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/spawn.py
vendored
Normal file
244
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/spawn.py
vendored
Normal file
@@ -0,0 +1,244 @@
|
||||
###############################################################################
|
||||
# Prepares and processes the data to setup the new process environment
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from multiprocessing/spawn.py (17/02/2017)
|
||||
# * Improve logging data
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import runpy
|
||||
import textwrap
|
||||
import types
|
||||
from multiprocessing import process, util
|
||||
|
||||
|
||||
if sys.platform != "win32":
|
||||
WINEXE = False
|
||||
WINSERVICE = False
|
||||
else:
|
||||
import msvcrt
|
||||
from multiprocessing.reduction import duplicate
|
||||
|
||||
WINEXE = sys.platform == "win32" and getattr(sys, "frozen", False)
|
||||
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
|
||||
|
||||
if WINSERVICE:
|
||||
_python_exe = os.path.join(sys.exec_prefix, "python.exe")
|
||||
else:
|
||||
_python_exe = sys.executable
|
||||
|
||||
|
||||
def get_executable():
|
||||
return _python_exe
|
||||
|
||||
|
||||
def _check_not_importing_main():
|
||||
if getattr(process.current_process(), "_inheriting", False):
|
||||
raise RuntimeError(
|
||||
textwrap.dedent(
|
||||
"""\
|
||||
An attempt has been made to start a new process before the
|
||||
current process has finished its bootstrapping phase.
|
||||
|
||||
This probably means that you are not using fork to start your
|
||||
child processes and you have forgotten to use the proper idiom
|
||||
in the main module:
|
||||
|
||||
if __name__ == '__main__':
|
||||
freeze_support()
|
||||
...
|
||||
|
||||
The "freeze_support()" line can be omitted if the program
|
||||
is not going to be frozen to produce an executable."""
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def get_preparation_data(name, init_main_module=True):
|
||||
"""Return info about parent needed by child to unpickle process object."""
|
||||
_check_not_importing_main()
|
||||
d = dict(
|
||||
log_to_stderr=util._log_to_stderr,
|
||||
authkey=bytes(process.current_process().authkey),
|
||||
name=name,
|
||||
sys_argv=sys.argv,
|
||||
orig_dir=process.ORIGINAL_DIR,
|
||||
dir=os.getcwd(),
|
||||
)
|
||||
|
||||
# Send sys_path and make sure the current directory will not be changed
|
||||
d["sys_path"] = [p if p != "" else process.ORIGINAL_DIR for p in sys.path]
|
||||
|
||||
# Make sure to pass the information if the multiprocessing logger is active
|
||||
if util._logger is not None:
|
||||
d["log_level"] = util._logger.getEffectiveLevel()
|
||||
if util._logger.handlers:
|
||||
h = util._logger.handlers[0]
|
||||
d["log_fmt"] = h.formatter._fmt
|
||||
|
||||
# Tell the child how to communicate with the resource_tracker
|
||||
from .resource_tracker import _resource_tracker
|
||||
|
||||
_resource_tracker.ensure_running()
|
||||
if sys.platform == "win32":
|
||||
d["tracker_fd"] = msvcrt.get_osfhandle(_resource_tracker._fd)
|
||||
else:
|
||||
d["tracker_fd"] = _resource_tracker._fd
|
||||
|
||||
if os.name == "posix":
|
||||
# joblib/loky#242: allow loky processes to retrieve the resource
|
||||
# tracker of their parent in case the child processes depickles
|
||||
# shared_memory objects, that are still tracked by multiprocessing's
|
||||
# resource_tracker by default.
|
||||
# XXX: this is a workaround that may be error prone: in the future, it
|
||||
# would be better to have loky subclass multiprocessing's shared_memory
|
||||
# to force registration of shared_memory segments via loky's
|
||||
# resource_tracker.
|
||||
from multiprocessing.resource_tracker import (
|
||||
_resource_tracker as mp_resource_tracker,
|
||||
)
|
||||
|
||||
# multiprocessing's resource_tracker must be running before loky
|
||||
# process is created (othewise the child won't be able to use it if it
|
||||
# is created later on)
|
||||
mp_resource_tracker.ensure_running()
|
||||
d["mp_tracker_fd"] = mp_resource_tracker._fd
|
||||
|
||||
# Figure out whether to initialise main in the subprocess as a module
|
||||
# or through direct execution (or to leave it alone entirely)
|
||||
if init_main_module:
|
||||
main_module = sys.modules["__main__"]
|
||||
try:
|
||||
main_mod_name = getattr(main_module.__spec__, "name", None)
|
||||
except BaseException:
|
||||
main_mod_name = None
|
||||
if main_mod_name is not None:
|
||||
d["init_main_from_name"] = main_mod_name
|
||||
elif sys.platform != "win32" or (not WINEXE and not WINSERVICE):
|
||||
main_path = getattr(main_module, "__file__", None)
|
||||
if main_path is not None:
|
||||
if (
|
||||
not os.path.isabs(main_path)
|
||||
and process.ORIGINAL_DIR is not None
|
||||
):
|
||||
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
|
||||
d["init_main_from_path"] = os.path.normpath(main_path)
|
||||
|
||||
return d
|
||||
|
||||
|
||||
#
|
||||
# Prepare current process
|
||||
#
|
||||
old_main_modules = []
|
||||
|
||||
|
||||
def prepare(data, parent_sentinel=None):
|
||||
"""Try to get current process ready to unpickle process object."""
|
||||
if "name" in data:
|
||||
process.current_process().name = data["name"]
|
||||
|
||||
if "authkey" in data:
|
||||
process.current_process().authkey = data["authkey"]
|
||||
|
||||
if "log_to_stderr" in data and data["log_to_stderr"]:
|
||||
util.log_to_stderr()
|
||||
|
||||
if "log_level" in data:
|
||||
util.get_logger().setLevel(data["log_level"])
|
||||
|
||||
if "log_fmt" in data:
|
||||
import logging
|
||||
|
||||
util.get_logger().handlers[0].setFormatter(
|
||||
logging.Formatter(data["log_fmt"])
|
||||
)
|
||||
|
||||
if "sys_path" in data:
|
||||
sys.path = data["sys_path"]
|
||||
|
||||
if "sys_argv" in data:
|
||||
sys.argv = data["sys_argv"]
|
||||
|
||||
if "dir" in data:
|
||||
os.chdir(data["dir"])
|
||||
|
||||
if "orig_dir" in data:
|
||||
process.ORIGINAL_DIR = data["orig_dir"]
|
||||
|
||||
if "mp_tracker_fd" in data:
|
||||
from multiprocessing.resource_tracker import (
|
||||
_resource_tracker as mp_resource_tracker,
|
||||
)
|
||||
|
||||
mp_resource_tracker._fd = data["mp_tracker_fd"]
|
||||
if "tracker_fd" in data:
|
||||
from .resource_tracker import _resource_tracker
|
||||
|
||||
if sys.platform == "win32":
|
||||
handle = data["tracker_fd"]
|
||||
handle = duplicate(handle, source_process=parent_sentinel)
|
||||
_resource_tracker._fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
|
||||
else:
|
||||
_resource_tracker._fd = data["tracker_fd"]
|
||||
|
||||
if "init_main_from_name" in data:
|
||||
_fixup_main_from_name(data["init_main_from_name"])
|
||||
elif "init_main_from_path" in data:
|
||||
_fixup_main_from_path(data["init_main_from_path"])
|
||||
|
||||
|
||||
# Multiprocessing module helpers to fix up the main module in
|
||||
# spawned subprocesses
|
||||
def _fixup_main_from_name(mod_name):
|
||||
# __main__.py files for packages, directories, zip archives, etc, run
|
||||
# their "main only" code unconditionally, so we don't even try to
|
||||
# populate anything in __main__, nor do we make any changes to
|
||||
# __main__ attributes
|
||||
current_main = sys.modules["__main__"]
|
||||
if mod_name == "__main__" or mod_name.endswith(".__main__"):
|
||||
return
|
||||
|
||||
# If this process was forked, __main__ may already be populated
|
||||
if getattr(current_main.__spec__, "name", None) == mod_name:
|
||||
return
|
||||
|
||||
# Otherwise, __main__ may contain some non-main code where we need to
|
||||
# support unpickling it properly. We rerun it as __mp_main__ and make
|
||||
# the normal __main__ an alias to that
|
||||
old_main_modules.append(current_main)
|
||||
main_module = types.ModuleType("__mp_main__")
|
||||
main_content = runpy.run_module(
|
||||
mod_name, run_name="__mp_main__", alter_sys=True
|
||||
)
|
||||
main_module.__dict__.update(main_content)
|
||||
sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module
|
||||
|
||||
|
||||
def _fixup_main_from_path(main_path):
|
||||
# If this process was forked, __main__ may already be populated
|
||||
current_main = sys.modules["__main__"]
|
||||
|
||||
# Unfortunately, the main ipython launch script historically had no
|
||||
# "if __name__ == '__main__'" guard, so we work around that
|
||||
# by treating it like a __main__.py file
|
||||
# See https://github.com/ipython/ipython/issues/4698
|
||||
main_name = os.path.splitext(os.path.basename(main_path))[0]
|
||||
if main_name == "ipython":
|
||||
return
|
||||
|
||||
# Otherwise, if __file__ already has the setting we expect,
|
||||
# there's nothing more to do
|
||||
if getattr(current_main, "__file__", None) == main_path:
|
||||
return
|
||||
|
||||
# If the parent process has sent a path through rather than a module
|
||||
# name we assume it is an executable script that may contain
|
||||
# non-main code that needs to be executed
|
||||
old_main_modules.append(current_main)
|
||||
main_module = types.ModuleType("__mp_main__")
|
||||
main_content = runpy.run_path(main_path, run_name="__mp_main__")
|
||||
main_module.__dict__.update(main_content)
|
||||
sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module
|
||||
409
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/synchronize.py
vendored
Normal file
409
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/synchronize.py
vendored
Normal file
@@ -0,0 +1,409 @@
|
||||
###############################################################################
|
||||
# Synchronization primitives based on our SemLock implementation
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from multiprocessing/synchronize.py (17/02/2017)
|
||||
# * Remove ctx argument for compatibility reason
|
||||
# * Registers a cleanup function with the loky resource_tracker to remove the
|
||||
# semaphore when the process dies instead.
|
||||
#
|
||||
# TODO: investigate which Python version is required to be able to use
|
||||
# multiprocessing.resource_tracker and therefore multiprocessing.synchronize
|
||||
# instead of a loky-specific fork.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import _multiprocessing
|
||||
from time import time as _time
|
||||
from multiprocessing import process, util
|
||||
from multiprocessing.context import assert_spawning
|
||||
|
||||
from . import resource_tracker
|
||||
|
||||
__all__ = [
|
||||
"Lock",
|
||||
"RLock",
|
||||
"Semaphore",
|
||||
"BoundedSemaphore",
|
||||
"Condition",
|
||||
"Event",
|
||||
]
|
||||
# Try to import the mp.synchronize module cleanly, if it fails
|
||||
# raise ImportError for platforms lacking a working sem_open implementation.
|
||||
# See issue 3770
|
||||
try:
|
||||
from _multiprocessing import SemLock as _SemLock
|
||||
from _multiprocessing import sem_unlink
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"This platform lacks a functioning sem_open"
|
||||
" implementation, therefore, the required"
|
||||
" synchronization primitives needed will not"
|
||||
" function, see issue 3770."
|
||||
)
|
||||
|
||||
#
|
||||
# Constants
|
||||
#
|
||||
|
||||
RECURSIVE_MUTEX, SEMAPHORE = range(2)
|
||||
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
|
||||
|
||||
|
||||
#
|
||||
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
|
||||
#
|
||||
|
||||
|
||||
class SemLock:
|
||||
|
||||
_rand = tempfile._RandomNameSequence()
|
||||
|
||||
def __init__(self, kind, value, maxvalue, name=None):
|
||||
# unlink_now is only used on win32 or when we are using fork.
|
||||
unlink_now = False
|
||||
if name is None:
|
||||
# Try to find an unused name for the SemLock instance.
|
||||
for _ in range(100):
|
||||
try:
|
||||
self._semlock = _SemLock(
|
||||
kind, value, maxvalue, SemLock._make_name(), unlink_now
|
||||
)
|
||||
except FileExistsError: # pragma: no cover
|
||||
pass
|
||||
else:
|
||||
break
|
||||
else: # pragma: no cover
|
||||
raise FileExistsError("cannot find name for semaphore")
|
||||
else:
|
||||
self._semlock = _SemLock(kind, value, maxvalue, name, unlink_now)
|
||||
self.name = name
|
||||
util.debug(
|
||||
f"created semlock with handle {self._semlock.handle} and name "
|
||||
f'"{self.name}"'
|
||||
)
|
||||
|
||||
self._make_methods()
|
||||
|
||||
def _after_fork(obj):
|
||||
obj._semlock._after_fork()
|
||||
|
||||
util.register_after_fork(self, _after_fork)
|
||||
|
||||
# When the object is garbage collected or the
|
||||
# process shuts down we unlink the semaphore name
|
||||
resource_tracker.register(self._semlock.name, "semlock")
|
||||
util.Finalize(
|
||||
self, SemLock._cleanup, (self._semlock.name,), exitpriority=0
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _cleanup(name):
|
||||
try:
|
||||
sem_unlink(name)
|
||||
except FileNotFoundError:
|
||||
# Already unlinked, possibly by user code: ignore and make sure to
|
||||
# unregister the semaphore from the resource tracker.
|
||||
pass
|
||||
finally:
|
||||
resource_tracker.unregister(name, "semlock")
|
||||
|
||||
def _make_methods(self):
|
||||
self.acquire = self._semlock.acquire
|
||||
self.release = self._semlock.release
|
||||
|
||||
def __enter__(self):
|
||||
return self._semlock.acquire()
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self._semlock.release()
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
sl = self._semlock
|
||||
h = sl.handle
|
||||
return (h, sl.kind, sl.maxvalue, sl.name)
|
||||
|
||||
def __setstate__(self, state):
|
||||
self._semlock = _SemLock._rebuild(*state)
|
||||
util.debug(
|
||||
f'recreated blocker with handle {state[0]!r} and name "{state[3]}"'
|
||||
)
|
||||
self._make_methods()
|
||||
|
||||
@staticmethod
|
||||
def _make_name():
|
||||
# OSX does not support long names for semaphores
|
||||
return f"/loky-{os.getpid()}-{next(SemLock._rand)}"
|
||||
|
||||
|
||||
#
|
||||
# Semaphore
|
||||
#
|
||||
|
||||
|
||||
class Semaphore(SemLock):
|
||||
def __init__(self, value=1):
|
||||
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
|
||||
|
||||
def get_value(self):
|
||||
if sys.platform == "darwin":
|
||||
raise NotImplementedError("OSX does not implement sem_getvalue")
|
||||
return self._semlock._get_value()
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
value = self._semlock._get_value()
|
||||
except Exception:
|
||||
value = "unknown"
|
||||
return f"<{self.__class__.__name__}(value={value})>"
|
||||
|
||||
|
||||
#
|
||||
# Bounded semaphore
|
||||
#
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
def __init__(self, value=1):
|
||||
SemLock.__init__(self, SEMAPHORE, value, value)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
value = self._semlock._get_value()
|
||||
except Exception:
|
||||
value = "unknown"
|
||||
return (
|
||||
f"<{self.__class__.__name__}(value={value}, "
|
||||
f"maxvalue={self._semlock.maxvalue})>"
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Non-recursive lock
|
||||
#
|
||||
|
||||
|
||||
class Lock(SemLock):
|
||||
def __init__(self):
|
||||
super().__init__(SEMAPHORE, 1, 1)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
if self._semlock._is_mine():
|
||||
name = process.current_process().name
|
||||
if threading.current_thread().name != "MainThread":
|
||||
name = f"{name}|{threading.current_thread().name}"
|
||||
elif self._semlock._get_value() == 1:
|
||||
name = "None"
|
||||
elif self._semlock._count() > 0:
|
||||
name = "SomeOtherThread"
|
||||
else:
|
||||
name = "SomeOtherProcess"
|
||||
except Exception:
|
||||
name = "unknown"
|
||||
return f"<{self.__class__.__name__}(owner={name})>"
|
||||
|
||||
|
||||
#
|
||||
# Recursive lock
|
||||
#
|
||||
|
||||
|
||||
class RLock(SemLock):
|
||||
def __init__(self):
|
||||
super().__init__(RECURSIVE_MUTEX, 1, 1)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
if self._semlock._is_mine():
|
||||
name = process.current_process().name
|
||||
if threading.current_thread().name != "MainThread":
|
||||
name = f"{name}|{threading.current_thread().name}"
|
||||
count = self._semlock._count()
|
||||
elif self._semlock._get_value() == 1:
|
||||
name, count = "None", 0
|
||||
elif self._semlock._count() > 0:
|
||||
name, count = "SomeOtherThread", "nonzero"
|
||||
else:
|
||||
name, count = "SomeOtherProcess", "nonzero"
|
||||
except Exception:
|
||||
name, count = "unknown", "unknown"
|
||||
return f"<{self.__class__.__name__}({name}, {count})>"
|
||||
|
||||
|
||||
#
|
||||
# Condition variable
|
||||
#
|
||||
|
||||
|
||||
class Condition:
|
||||
def __init__(self, lock=None):
|
||||
self._lock = lock or RLock()
|
||||
self._sleeping_count = Semaphore(0)
|
||||
self._woken_count = Semaphore(0)
|
||||
self._wait_semaphore = Semaphore(0)
|
||||
self._make_methods()
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (
|
||||
self._lock,
|
||||
self._sleeping_count,
|
||||
self._woken_count,
|
||||
self._wait_semaphore,
|
||||
)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(
|
||||
self._lock,
|
||||
self._sleeping_count,
|
||||
self._woken_count,
|
||||
self._wait_semaphore,
|
||||
) = state
|
||||
self._make_methods()
|
||||
|
||||
def __enter__(self):
|
||||
return self._lock.__enter__()
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self._lock.__exit__(*args)
|
||||
|
||||
def _make_methods(self):
|
||||
self.acquire = self._lock.acquire
|
||||
self.release = self._lock.release
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
num_waiters = (
|
||||
self._sleeping_count._semlock._get_value()
|
||||
- self._woken_count._semlock._get_value()
|
||||
)
|
||||
except Exception:
|
||||
num_waiters = "unknown"
|
||||
return f"<{self.__class__.__name__}({self._lock}, {num_waiters})>"
|
||||
|
||||
def wait(self, timeout=None):
|
||||
assert (
|
||||
self._lock._semlock._is_mine()
|
||||
), "must acquire() condition before using wait()"
|
||||
|
||||
# indicate that this thread is going to sleep
|
||||
self._sleeping_count.release()
|
||||
|
||||
# release lock
|
||||
count = self._lock._semlock._count()
|
||||
for _ in range(count):
|
||||
self._lock.release()
|
||||
|
||||
try:
|
||||
# wait for notification or timeout
|
||||
return self._wait_semaphore.acquire(True, timeout)
|
||||
finally:
|
||||
# indicate that this thread has woken
|
||||
self._woken_count.release()
|
||||
|
||||
# reacquire lock
|
||||
for _ in range(count):
|
||||
self._lock.acquire()
|
||||
|
||||
def notify(self):
|
||||
assert self._lock._semlock._is_mine(), "lock is not owned"
|
||||
assert not self._wait_semaphore.acquire(False)
|
||||
|
||||
# to take account of timeouts since last notify() we subtract
|
||||
# woken_count from sleeping_count and rezero woken_count
|
||||
while self._woken_count.acquire(False):
|
||||
res = self._sleeping_count.acquire(False)
|
||||
assert res
|
||||
|
||||
if self._sleeping_count.acquire(False): # try grabbing a sleeper
|
||||
self._wait_semaphore.release() # wake up one sleeper
|
||||
self._woken_count.acquire() # wait for the sleeper to wake
|
||||
|
||||
# rezero _wait_semaphore in case a timeout just happened
|
||||
self._wait_semaphore.acquire(False)
|
||||
|
||||
def notify_all(self):
|
||||
assert self._lock._semlock._is_mine(), "lock is not owned"
|
||||
assert not self._wait_semaphore.acquire(False)
|
||||
|
||||
# to take account of timeouts since last notify*() we subtract
|
||||
# woken_count from sleeping_count and rezero woken_count
|
||||
while self._woken_count.acquire(False):
|
||||
res = self._sleeping_count.acquire(False)
|
||||
assert res
|
||||
|
||||
sleepers = 0
|
||||
while self._sleeping_count.acquire(False):
|
||||
self._wait_semaphore.release() # wake up one sleeper
|
||||
sleepers += 1
|
||||
|
||||
if sleepers:
|
||||
for _ in range(sleepers):
|
||||
self._woken_count.acquire() # wait for a sleeper to wake
|
||||
|
||||
# rezero wait_semaphore in case some timeouts just happened
|
||||
while self._wait_semaphore.acquire(False):
|
||||
pass
|
||||
|
||||
def wait_for(self, predicate, timeout=None):
|
||||
result = predicate()
|
||||
if result:
|
||||
return result
|
||||
if timeout is not None:
|
||||
endtime = _time() + timeout
|
||||
else:
|
||||
endtime = None
|
||||
waittime = None
|
||||
while not result:
|
||||
if endtime is not None:
|
||||
waittime = endtime - _time()
|
||||
if waittime <= 0:
|
||||
break
|
||||
self.wait(waittime)
|
||||
result = predicate()
|
||||
return result
|
||||
|
||||
|
||||
#
|
||||
# Event
|
||||
#
|
||||
|
||||
|
||||
class Event:
|
||||
def __init__(self):
|
||||
self._cond = Condition(Lock())
|
||||
self._flag = Semaphore(0)
|
||||
|
||||
def is_set(self):
|
||||
with self._cond:
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
return True
|
||||
return False
|
||||
|
||||
def set(self):
|
||||
with self._cond:
|
||||
self._flag.acquire(False)
|
||||
self._flag.release()
|
||||
self._cond.notify_all()
|
||||
|
||||
def clear(self):
|
||||
with self._cond:
|
||||
self._flag.acquire(False)
|
||||
|
||||
def wait(self, timeout=None):
|
||||
with self._cond:
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
else:
|
||||
self._cond.wait(timeout)
|
||||
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
return True
|
||||
return False
|
||||
181
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/utils.py
vendored
Normal file
181
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/backend/utils.py
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import errno
|
||||
import signal
|
||||
import warnings
|
||||
import subprocess
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import psutil
|
||||
except ImportError:
|
||||
psutil = None
|
||||
|
||||
|
||||
def kill_process_tree(process, use_psutil=True):
|
||||
"""Terminate process and its descendants with SIGKILL"""
|
||||
if use_psutil and psutil is not None:
|
||||
_kill_process_tree_with_psutil(process)
|
||||
else:
|
||||
_kill_process_tree_without_psutil(process)
|
||||
|
||||
|
||||
def recursive_terminate(process, use_psutil=True):
|
||||
warnings.warn(
|
||||
"recursive_terminate is deprecated in loky 3.2, use kill_process_tree"
|
||||
"instead",
|
||||
DeprecationWarning,
|
||||
)
|
||||
kill_process_tree(process, use_psutil=use_psutil)
|
||||
|
||||
|
||||
def _kill_process_tree_with_psutil(process):
|
||||
try:
|
||||
descendants = psutil.Process(process.pid).children(recursive=True)
|
||||
except psutil.NoSuchProcess:
|
||||
return
|
||||
|
||||
# Kill the descendants in reverse order to avoid killing the parents before
|
||||
# the descendant in cases where there are more processes nested.
|
||||
for descendant in descendants[::-1]:
|
||||
try:
|
||||
descendant.kill()
|
||||
except psutil.NoSuchProcess:
|
||||
pass
|
||||
|
||||
try:
|
||||
psutil.Process(process.pid).kill()
|
||||
except psutil.NoSuchProcess:
|
||||
pass
|
||||
process.join()
|
||||
|
||||
|
||||
def _kill_process_tree_without_psutil(process):
|
||||
"""Terminate a process and its descendants."""
|
||||
try:
|
||||
if sys.platform == "win32":
|
||||
_windows_taskkill_process_tree(process.pid)
|
||||
else:
|
||||
_posix_recursive_kill(process.pid)
|
||||
except Exception: # pragma: no cover
|
||||
details = traceback.format_exc()
|
||||
warnings.warn(
|
||||
"Failed to kill subprocesses on this platform. Please install"
|
||||
"psutil: https://github.com/giampaolo/psutil\n"
|
||||
f"Details:\n{details}"
|
||||
)
|
||||
# In case we cannot introspect or kill the descendants, we fall back to
|
||||
# only killing the main process.
|
||||
#
|
||||
# Note: on Windows, process.kill() is an alias for process.terminate()
|
||||
# which in turns calls the Win32 API function TerminateProcess().
|
||||
process.kill()
|
||||
process.join()
|
||||
|
||||
|
||||
def _windows_taskkill_process_tree(pid):
|
||||
# On windows, the taskkill function with option `/T` terminate a given
|
||||
# process pid and its children.
|
||||
try:
|
||||
subprocess.check_output(
|
||||
["taskkill", "/F", "/T", "/PID", str(pid)], stderr=None
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
# In Windows, taskkill returns 128, 255 for no process found.
|
||||
if e.returncode not in [128, 255]:
|
||||
# Let's raise to let the caller log the error details in a
|
||||
# warning and only kill the root process.
|
||||
raise # pragma: no cover
|
||||
|
||||
|
||||
def _kill(pid):
|
||||
# Not all systems (e.g. Windows) have a SIGKILL, but the C specification
|
||||
# mandates a SIGTERM signal. While Windows is handled specifically above,
|
||||
# let's try to be safe for other hypothetic platforms that only have
|
||||
# SIGTERM without SIGKILL.
|
||||
kill_signal = getattr(signal, "SIGKILL", signal.SIGTERM)
|
||||
try:
|
||||
os.kill(pid, kill_signal)
|
||||
except OSError as e:
|
||||
# if OSError is raised with [Errno 3] no such process, the process
|
||||
# is already terminated, else, raise the error and let the top
|
||||
# level function raise a warning and retry to kill the process.
|
||||
if e.errno != errno.ESRCH:
|
||||
raise # pragma: no cover
|
||||
|
||||
|
||||
def _posix_recursive_kill(pid):
|
||||
"""Recursively kill the descendants of a process before killing it."""
|
||||
try:
|
||||
children_pids = subprocess.check_output(
|
||||
["pgrep", "-P", str(pid)], stderr=None, text=True
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
# `ps` returns 1 when no child process has been found
|
||||
if e.returncode == 1:
|
||||
children_pids = ""
|
||||
else:
|
||||
raise # pragma: no cover
|
||||
|
||||
# Decode the result, split the cpid and remove the trailing line
|
||||
for cpid in children_pids.splitlines():
|
||||
cpid = int(cpid)
|
||||
_posix_recursive_kill(cpid)
|
||||
|
||||
_kill(pid)
|
||||
|
||||
|
||||
def get_exitcodes_terminated_worker(processes):
|
||||
"""Return a formatted string with the exitcodes of terminated workers.
|
||||
|
||||
If necessary, wait (up to .25s) for the system to correctly set the
|
||||
exitcode of one terminated worker.
|
||||
"""
|
||||
patience = 5
|
||||
|
||||
# Catch the exitcode of the terminated workers. There should at least be
|
||||
# one. If not, wait a bit for the system to correctly set the exitcode of
|
||||
# the terminated worker.
|
||||
exitcodes = [
|
||||
p.exitcode for p in list(processes.values()) if p.exitcode is not None
|
||||
]
|
||||
while not exitcodes and patience > 0:
|
||||
patience -= 1
|
||||
exitcodes = [
|
||||
p.exitcode
|
||||
for p in list(processes.values())
|
||||
if p.exitcode is not None
|
||||
]
|
||||
time.sleep(0.05)
|
||||
|
||||
return _format_exitcodes(exitcodes)
|
||||
|
||||
|
||||
def _format_exitcodes(exitcodes):
|
||||
"""Format a list of exit code with names of the signals if possible"""
|
||||
str_exitcodes = [
|
||||
f"{_get_exitcode_name(e)}({e})" for e in exitcodes if e is not None
|
||||
]
|
||||
return "{" + ", ".join(str_exitcodes) + "}"
|
||||
|
||||
|
||||
def _get_exitcode_name(exitcode):
|
||||
if sys.platform == "win32":
|
||||
# The exitcode are unreliable on windows (see bpo-31863).
|
||||
# For this case, return UNKNOWN
|
||||
return "UNKNOWN"
|
||||
|
||||
if exitcode < 0:
|
||||
try:
|
||||
import signal
|
||||
|
||||
return signal.Signals(-exitcode).name
|
||||
except ValueError:
|
||||
return "UNKNOWN"
|
||||
elif exitcode != 255:
|
||||
# The exitcode are unreliable on forkserver were 255 is always returned
|
||||
# (see bpo-30589). For this case, return UNKNOWN
|
||||
return "EXIT"
|
||||
|
||||
return "UNKNOWN"
|
||||
102
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/cloudpickle_wrapper.py
vendored
Normal file
102
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/cloudpickle_wrapper.py
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
import inspect
|
||||
from functools import partial
|
||||
from joblib.externals.cloudpickle import dumps, loads
|
||||
|
||||
|
||||
WRAP_CACHE = {}
|
||||
|
||||
|
||||
class CloudpickledObjectWrapper:
|
||||
def __init__(self, obj, keep_wrapper=False):
|
||||
self._obj = obj
|
||||
self._keep_wrapper = keep_wrapper
|
||||
|
||||
def __reduce__(self):
|
||||
_pickled_object = dumps(self._obj)
|
||||
if not self._keep_wrapper:
|
||||
return loads, (_pickled_object,)
|
||||
|
||||
return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# Ensure that the wrapped object can be used seemlessly as the
|
||||
# previous object.
|
||||
if attr not in ["_obj", "_keep_wrapper"]:
|
||||
return getattr(self._obj, attr)
|
||||
return getattr(self, attr)
|
||||
|
||||
|
||||
# Make sure the wrapped object conserves the callable property
|
||||
class CallableObjectWrapper(CloudpickledObjectWrapper):
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self._obj(*args, **kwargs)
|
||||
|
||||
|
||||
def _wrap_non_picklable_objects(obj, keep_wrapper):
|
||||
if callable(obj):
|
||||
return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
|
||||
return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
|
||||
|
||||
|
||||
def _reconstruct_wrapper(_pickled_object, keep_wrapper):
|
||||
obj = loads(_pickled_object)
|
||||
return _wrap_non_picklable_objects(obj, keep_wrapper)
|
||||
|
||||
|
||||
def _wrap_objects_when_needed(obj):
|
||||
# Function to introspect an object and decide if it should be wrapped or
|
||||
# not.
|
||||
need_wrap = "__main__" in getattr(obj, "__module__", "")
|
||||
if isinstance(obj, partial):
|
||||
return partial(
|
||||
_wrap_objects_when_needed(obj.func),
|
||||
*[_wrap_objects_when_needed(a) for a in obj.args],
|
||||
**{
|
||||
k: _wrap_objects_when_needed(v)
|
||||
for k, v in obj.keywords.items()
|
||||
},
|
||||
)
|
||||
if callable(obj):
|
||||
# Need wrap if the object is a function defined in a local scope of
|
||||
# another function.
|
||||
func_code = getattr(obj, "__code__", "")
|
||||
need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED
|
||||
|
||||
# Need wrap if the obj is a lambda expression
|
||||
func_name = getattr(obj, "__name__", "")
|
||||
need_wrap |= "<lambda>" in func_name
|
||||
|
||||
if not need_wrap:
|
||||
return obj
|
||||
|
||||
wrapped_obj = WRAP_CACHE.get(obj)
|
||||
if wrapped_obj is None:
|
||||
wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False)
|
||||
WRAP_CACHE[obj] = wrapped_obj
|
||||
return wrapped_obj
|
||||
|
||||
|
||||
def wrap_non_picklable_objects(obj, keep_wrapper=True):
|
||||
"""Wrapper for non-picklable object to use cloudpickle to serialize them.
|
||||
|
||||
Note that this wrapper tends to slow down the serialization process as it
|
||||
is done with cloudpickle which is typically slower compared to pickle. The
|
||||
proper way to solve serialization issues is to avoid defining functions and
|
||||
objects in the main scripts and to implement __reduce__ functions for
|
||||
complex classes.
|
||||
"""
|
||||
# If obj is a class, create a CloudpickledClassWrapper which instantiates
|
||||
# the object internally and wrap it directly in a CloudpickledObjectWrapper
|
||||
if inspect.isclass(obj):
|
||||
|
||||
class CloudpickledClassWrapper(CloudpickledObjectWrapper):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._obj = obj(*args, **kwargs)
|
||||
self._keep_wrapper = keep_wrapper
|
||||
|
||||
CloudpickledClassWrapper.__name__ = obj.__name__
|
||||
return CloudpickledClassWrapper
|
||||
|
||||
# If obj is an instance of a class, just wrap it in a regular
|
||||
# CloudpickledObjectWrapper
|
||||
return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper)
|
||||
80
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/initializers.py
vendored
Normal file
80
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/initializers.py
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
import warnings
|
||||
|
||||
|
||||
def _viztracer_init(init_kwargs):
|
||||
"""Initialize viztracer's profiler in worker processes"""
|
||||
from viztracer import VizTracer
|
||||
|
||||
tracer = VizTracer(**init_kwargs)
|
||||
tracer.register_exit()
|
||||
tracer.start()
|
||||
|
||||
|
||||
def _make_viztracer_initializer_and_initargs():
|
||||
try:
|
||||
import viztracer
|
||||
|
||||
tracer = viztracer.get_tracer()
|
||||
if tracer is not None and getattr(tracer, "enable", False):
|
||||
# Profiler is active: introspect its configuration to
|
||||
# initialize the workers with the same configuration.
|
||||
return _viztracer_init, (tracer.init_kwargs,)
|
||||
except ImportError:
|
||||
# viztracer is not installed: nothing to do
|
||||
pass
|
||||
except Exception as e:
|
||||
# In case viztracer's API evolve, we do not want to crash loky but
|
||||
# we want to know about it to be able to update loky.
|
||||
warnings.warn(f"Unable to introspect viztracer state: {e}")
|
||||
return None, ()
|
||||
|
||||
|
||||
class _ChainedInitializer:
|
||||
"""Compound worker initializer
|
||||
|
||||
This is meant to be used in conjunction with _chain_initializers to
|
||||
produce the necessary chained_args list to be passed to __call__.
|
||||
"""
|
||||
|
||||
def __init__(self, initializers):
|
||||
self._initializers = initializers
|
||||
|
||||
def __call__(self, *chained_args):
|
||||
for initializer, args in zip(self._initializers, chained_args):
|
||||
initializer(*args)
|
||||
|
||||
|
||||
def _chain_initializers(initializer_and_args):
|
||||
"""Convenience helper to combine a sequence of initializers.
|
||||
|
||||
If some initializers are None, they are filtered out.
|
||||
"""
|
||||
filtered_initializers = []
|
||||
filtered_initargs = []
|
||||
for initializer, initargs in initializer_and_args:
|
||||
if initializer is not None:
|
||||
filtered_initializers.append(initializer)
|
||||
filtered_initargs.append(initargs)
|
||||
|
||||
if not filtered_initializers:
|
||||
return None, ()
|
||||
elif len(filtered_initializers) == 1:
|
||||
return filtered_initializers[0], filtered_initargs[0]
|
||||
else:
|
||||
return _ChainedInitializer(filtered_initializers), filtered_initargs
|
||||
|
||||
|
||||
def _prepare_initializer(initializer, initargs):
|
||||
if initializer is not None and not callable(initializer):
|
||||
raise TypeError(
|
||||
f"initializer must be a callable, got: {initializer!r}"
|
||||
)
|
||||
|
||||
# Introspect runtime to determine if we need to propagate the viztracer
|
||||
# profiler information to the workers:
|
||||
return _chain_initializers(
|
||||
[
|
||||
(initializer, initargs),
|
||||
_make_viztracer_initializer_and_initargs(),
|
||||
]
|
||||
)
|
||||
1344
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/process_executor.py
vendored
Normal file
1344
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/process_executor.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
294
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/reusable_executor.py
vendored
Normal file
294
Backend/venv/lib/python3.12/site-packages/joblib/externals/loky/reusable_executor.py
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
###############################################################################
|
||||
# Reusable ProcessPoolExecutor
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
import time
|
||||
import warnings
|
||||
import threading
|
||||
import multiprocessing as mp
|
||||
|
||||
from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS
|
||||
from .backend.context import cpu_count
|
||||
from .backend import get_context
|
||||
|
||||
__all__ = ["get_reusable_executor"]
|
||||
|
||||
# Singleton executor and id management
|
||||
_executor_lock = threading.RLock()
|
||||
_next_executor_id = 0
|
||||
_executor = None
|
||||
_executor_kwargs = None
|
||||
|
||||
|
||||
def _get_next_executor_id():
|
||||
"""Ensure that each successive executor instance has a unique, monotonic id.
|
||||
|
||||
The purpose of this monotonic id is to help debug and test automated
|
||||
instance creation.
|
||||
"""
|
||||
global _next_executor_id
|
||||
with _executor_lock:
|
||||
executor_id = _next_executor_id
|
||||
_next_executor_id += 1
|
||||
return executor_id
|
||||
|
||||
|
||||
def get_reusable_executor(
|
||||
max_workers=None,
|
||||
context=None,
|
||||
timeout=10,
|
||||
kill_workers=False,
|
||||
reuse="auto",
|
||||
job_reducers=None,
|
||||
result_reducers=None,
|
||||
initializer=None,
|
||||
initargs=(),
|
||||
env=None,
|
||||
):
|
||||
"""Return the current ReusableExectutor instance.
|
||||
|
||||
Start a new instance if it has not been started already or if the previous
|
||||
instance was left in a broken state.
|
||||
|
||||
If the previous instance does not have the requested number of workers, the
|
||||
executor is dynamically resized to adjust the number of workers prior to
|
||||
returning.
|
||||
|
||||
Reusing a singleton instance spares the overhead of starting new worker
|
||||
processes and importing common python packages each time.
|
||||
|
||||
``max_workers`` controls the maximum number of tasks that can be running in
|
||||
parallel in worker processes. By default this is set to the number of
|
||||
CPUs on the host.
|
||||
|
||||
Setting ``timeout`` (in seconds) makes idle workers automatically shutdown
|
||||
so as to release system resources. New workers are respawn upon submission
|
||||
of new tasks so that ``max_workers`` are available to accept the newly
|
||||
submitted tasks. Setting ``timeout`` to around 100 times the time required
|
||||
to spawn new processes and import packages in them (on the order of 100ms)
|
||||
ensures that the overhead of spawning workers is negligible.
|
||||
|
||||
Setting ``kill_workers=True`` makes it possible to forcibly interrupt
|
||||
previously spawned jobs to get a new instance of the reusable executor
|
||||
with new constructor argument values.
|
||||
|
||||
The ``job_reducers`` and ``result_reducers`` are used to customize the
|
||||
pickling of tasks and results send to the executor.
|
||||
|
||||
When provided, the ``initializer`` is run first in newly spawned
|
||||
processes with argument ``initargs``.
|
||||
|
||||
The environment variable in the child process are a copy of the values in
|
||||
the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and
|
||||
``VAL`` are string literals to overwrite the environment variable ``ENV``
|
||||
in the child processes to value ``VAL``. The environment variables are set
|
||||
in the children before any module is loaded. This only works with the
|
||||
``loky`` context.
|
||||
"""
|
||||
_executor, _ = _ReusablePoolExecutor.get_reusable_executor(
|
||||
max_workers=max_workers,
|
||||
context=context,
|
||||
timeout=timeout,
|
||||
kill_workers=kill_workers,
|
||||
reuse=reuse,
|
||||
job_reducers=job_reducers,
|
||||
result_reducers=result_reducers,
|
||||
initializer=initializer,
|
||||
initargs=initargs,
|
||||
env=env,
|
||||
)
|
||||
return _executor
|
||||
|
||||
|
||||
class _ReusablePoolExecutor(ProcessPoolExecutor):
|
||||
def __init__(
|
||||
self,
|
||||
submit_resize_lock,
|
||||
max_workers=None,
|
||||
context=None,
|
||||
timeout=None,
|
||||
executor_id=0,
|
||||
job_reducers=None,
|
||||
result_reducers=None,
|
||||
initializer=None,
|
||||
initargs=(),
|
||||
env=None,
|
||||
):
|
||||
super().__init__(
|
||||
max_workers=max_workers,
|
||||
context=context,
|
||||
timeout=timeout,
|
||||
job_reducers=job_reducers,
|
||||
result_reducers=result_reducers,
|
||||
initializer=initializer,
|
||||
initargs=initargs,
|
||||
env=env,
|
||||
)
|
||||
self.executor_id = executor_id
|
||||
self._submit_resize_lock = submit_resize_lock
|
||||
|
||||
@classmethod
|
||||
def get_reusable_executor(
|
||||
cls,
|
||||
max_workers=None,
|
||||
context=None,
|
||||
timeout=10,
|
||||
kill_workers=False,
|
||||
reuse="auto",
|
||||
job_reducers=None,
|
||||
result_reducers=None,
|
||||
initializer=None,
|
||||
initargs=(),
|
||||
env=None,
|
||||
):
|
||||
with _executor_lock:
|
||||
global _executor, _executor_kwargs
|
||||
executor = _executor
|
||||
|
||||
if max_workers is None:
|
||||
if reuse is True and executor is not None:
|
||||
max_workers = executor._max_workers
|
||||
else:
|
||||
max_workers = cpu_count()
|
||||
elif max_workers <= 0:
|
||||
raise ValueError(
|
||||
f"max_workers must be greater than 0, got {max_workers}."
|
||||
)
|
||||
|
||||
if isinstance(context, str):
|
||||
context = get_context(context)
|
||||
if context is not None and context.get_start_method() == "fork":
|
||||
raise ValueError(
|
||||
"Cannot use reusable executor with the 'fork' context"
|
||||
)
|
||||
|
||||
kwargs = dict(
|
||||
context=context,
|
||||
timeout=timeout,
|
||||
job_reducers=job_reducers,
|
||||
result_reducers=result_reducers,
|
||||
initializer=initializer,
|
||||
initargs=initargs,
|
||||
env=env,
|
||||
)
|
||||
if executor is None:
|
||||
is_reused = False
|
||||
mp.util.debug(
|
||||
f"Create a executor with max_workers={max_workers}."
|
||||
)
|
||||
executor_id = _get_next_executor_id()
|
||||
_executor_kwargs = kwargs
|
||||
_executor = executor = cls(
|
||||
_executor_lock,
|
||||
max_workers=max_workers,
|
||||
executor_id=executor_id,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
if reuse == "auto":
|
||||
reuse = kwargs == _executor_kwargs
|
||||
if (
|
||||
executor._flags.broken
|
||||
or executor._flags.shutdown
|
||||
or not reuse
|
||||
or executor.queue_size < max_workers
|
||||
):
|
||||
if executor._flags.broken:
|
||||
reason = "broken"
|
||||
elif executor._flags.shutdown:
|
||||
reason = "shutdown"
|
||||
elif executor.queue_size < max_workers:
|
||||
# Do not reuse the executor if the queue size is too
|
||||
# small as this would lead to limited parallelism.
|
||||
reason = "queue size is too small"
|
||||
else:
|
||||
reason = "arguments have changed"
|
||||
mp.util.debug(
|
||||
"Creating a new executor with max_workers="
|
||||
f"{max_workers} as the previous instance cannot be "
|
||||
f"reused ({reason})."
|
||||
)
|
||||
executor.shutdown(wait=True, kill_workers=kill_workers)
|
||||
_executor = executor = _executor_kwargs = None
|
||||
# Recursive call to build a new instance
|
||||
return cls.get_reusable_executor(
|
||||
max_workers=max_workers, **kwargs
|
||||
)
|
||||
else:
|
||||
mp.util.debug(
|
||||
"Reusing existing executor with "
|
||||
f"max_workers={executor._max_workers}."
|
||||
)
|
||||
is_reused = True
|
||||
executor._resize(max_workers)
|
||||
|
||||
return executor, is_reused
|
||||
|
||||
def submit(self, fn, *args, **kwargs):
|
||||
with self._submit_resize_lock:
|
||||
return super().submit(fn, *args, **kwargs)
|
||||
|
||||
def _resize(self, max_workers):
|
||||
with self._submit_resize_lock:
|
||||
if max_workers is None:
|
||||
raise ValueError("Trying to resize with max_workers=None")
|
||||
elif max_workers == self._max_workers:
|
||||
return
|
||||
|
||||
if self._executor_manager_thread is None:
|
||||
# If the executor_manager_thread has not been started
|
||||
# then no processes have been spawned and we can just
|
||||
# update _max_workers and return
|
||||
self._max_workers = max_workers
|
||||
return
|
||||
|
||||
self._wait_job_completion()
|
||||
|
||||
# Some process might have returned due to timeout so check how many
|
||||
# children are still alive. Use the _process_management_lock to
|
||||
# ensure that no process are spawned or timeout during the resize.
|
||||
with self._processes_management_lock:
|
||||
processes = list(self._processes.values())
|
||||
nb_children_alive = sum(p.is_alive() for p in processes)
|
||||
self._max_workers = max_workers
|
||||
for _ in range(max_workers, nb_children_alive):
|
||||
self._call_queue.put(None)
|
||||
while (
|
||||
len(self._processes) > max_workers and not self._flags.broken
|
||||
):
|
||||
time.sleep(1e-3)
|
||||
|
||||
self._adjust_process_count()
|
||||
processes = list(self._processes.values())
|
||||
while not all(p.is_alive() for p in processes):
|
||||
time.sleep(1e-3)
|
||||
|
||||
def _wait_job_completion(self):
|
||||
"""Wait for the cache to be empty before resizing the pool."""
|
||||
# Issue a warning to the user about the bad effect of this usage.
|
||||
if self._pending_work_items:
|
||||
warnings.warn(
|
||||
"Trying to resize an executor with running jobs: "
|
||||
"waiting for jobs completion before resizing.",
|
||||
UserWarning,
|
||||
)
|
||||
mp.util.debug(
|
||||
f"Executor {self.executor_id} waiting for jobs completion "
|
||||
"before resizing"
|
||||
)
|
||||
# Wait for the completion of the jobs
|
||||
while self._pending_work_items:
|
||||
time.sleep(1e-3)
|
||||
|
||||
def _setup_queues(self, job_reducers, result_reducers):
|
||||
# As this executor can be resized, use a large queue size to avoid
|
||||
# underestimating capacity and introducing overhead
|
||||
# Also handle the case where the user set max_workers to a value larger
|
||||
# than cpu_count(), to avoid limiting the number of parallel jobs.
|
||||
|
||||
min_queue_size = max(cpu_count(), self._max_workers)
|
||||
self.queue_size = 2 * min_queue_size + EXTRA_QUEUED_CALLS
|
||||
super()._setup_queues(
|
||||
job_reducers, result_reducers, queue_size=self.queue_size
|
||||
)
|
||||
379
Backend/venv/lib/python3.12/site-packages/joblib/func_inspect.py
Normal file
379
Backend/venv/lib/python3.12/site-packages/joblib/func_inspect.py
Normal file
@@ -0,0 +1,379 @@
|
||||
"""
|
||||
My own variation on function-specific inspect-like features.
|
||||
"""
|
||||
|
||||
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
|
||||
# Copyright (c) 2009 Gael Varoquaux
|
||||
# License: BSD Style, 3 clauses.
|
||||
|
||||
import collections
|
||||
import inspect
|
||||
import os
|
||||
import re
|
||||
import warnings
|
||||
from itertools import islice
|
||||
from tokenize import open as open_py_source
|
||||
|
||||
from .logger import pformat
|
||||
|
||||
full_argspec_fields = (
|
||||
"args varargs varkw defaults kwonlyargs kwonlydefaults annotations"
|
||||
)
|
||||
full_argspec_type = collections.namedtuple("FullArgSpec", full_argspec_fields)
|
||||
|
||||
|
||||
def get_func_code(func):
|
||||
"""Attempts to retrieve a reliable function code hash.
|
||||
|
||||
The reason we don't use inspect.getsource is that it caches the
|
||||
source, whereas we want this to be modified on the fly when the
|
||||
function is modified.
|
||||
|
||||
Returns
|
||||
-------
|
||||
func_code: string
|
||||
The function code
|
||||
source_file: string
|
||||
The path to the file in which the function is defined.
|
||||
first_line: int
|
||||
The first line of the code in the source file.
|
||||
|
||||
Notes
|
||||
------
|
||||
This function does a bit more magic than inspect, and is thus
|
||||
more robust.
|
||||
"""
|
||||
source_file = None
|
||||
try:
|
||||
code = func.__code__
|
||||
source_file = code.co_filename
|
||||
if not os.path.exists(source_file):
|
||||
# Use inspect for lambda functions and functions defined in an
|
||||
# interactive shell, or in doctests
|
||||
source_code = "".join(inspect.getsourcelines(func)[0])
|
||||
line_no = 1
|
||||
if source_file.startswith("<doctest "):
|
||||
source_file, line_no = re.match(
|
||||
r"\<doctest (.*\.rst)\[(.*)\]\>", source_file
|
||||
).groups()
|
||||
line_no = int(line_no)
|
||||
source_file = "<doctest %s>" % source_file
|
||||
return source_code, source_file, line_no
|
||||
# Try to retrieve the source code.
|
||||
with open_py_source(source_file) as source_file_obj:
|
||||
first_line = code.co_firstlineno
|
||||
# All the lines after the function definition:
|
||||
source_lines = list(islice(source_file_obj, first_line - 1, None))
|
||||
return "".join(inspect.getblock(source_lines)), source_file, first_line
|
||||
except: # noqa: E722
|
||||
# If the source code fails, we use the hash. This is fragile and
|
||||
# might change from one session to another.
|
||||
if hasattr(func, "__code__"):
|
||||
# Python 3.X
|
||||
return str(func.__code__.__hash__()), source_file, -1
|
||||
else:
|
||||
# Weird objects like numpy ufunc don't have __code__
|
||||
# This is fragile, as quite often the id of the object is
|
||||
# in the repr, so it might not persist across sessions,
|
||||
# however it will work for ufuncs.
|
||||
return repr(func), source_file, -1
|
||||
|
||||
|
||||
def _clean_win_chars(string):
|
||||
"""Windows cannot encode some characters in filename."""
|
||||
import urllib
|
||||
|
||||
if hasattr(urllib, "quote"):
|
||||
quote = urllib.quote
|
||||
else:
|
||||
# In Python 3, quote is elsewhere
|
||||
import urllib.parse
|
||||
|
||||
quote = urllib.parse.quote
|
||||
for char in ("<", ">", "!", ":", "\\"):
|
||||
string = string.replace(char, quote(char))
|
||||
return string
|
||||
|
||||
|
||||
def get_func_name(func, resolv_alias=True, win_characters=True):
|
||||
"""Return the function import path (as a list of module names), and
|
||||
a name for the function.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func: callable
|
||||
The func to inspect
|
||||
resolv_alias: boolean, optional
|
||||
If true, possible local aliases are indicated.
|
||||
win_characters: boolean, optional
|
||||
If true, substitute special characters using urllib.quote
|
||||
This is useful in Windows, as it cannot encode some filenames
|
||||
"""
|
||||
if hasattr(func, "__module__"):
|
||||
module = func.__module__
|
||||
else:
|
||||
try:
|
||||
module = inspect.getmodule(func)
|
||||
except TypeError:
|
||||
if hasattr(func, "__class__"):
|
||||
module = func.__class__.__module__
|
||||
else:
|
||||
module = "unknown"
|
||||
if module is None:
|
||||
# Happens in doctests, eg
|
||||
module = ""
|
||||
if module == "__main__":
|
||||
try:
|
||||
filename = os.path.abspath(inspect.getsourcefile(func))
|
||||
except: # noqa: E722
|
||||
filename = None
|
||||
if filename is not None:
|
||||
# mangling of full path to filename
|
||||
parts = filename.split(os.sep)
|
||||
if parts[-1].startswith("<ipython-input"):
|
||||
# We're in a IPython (or notebook) session. parts[-1] comes
|
||||
# from func.__code__.co_filename and is of the form
|
||||
# <ipython-input-N-XYZ>, where:
|
||||
# - N is the cell number where the function was defined
|
||||
# - XYZ is a hash representing the function's code (and name).
|
||||
# It will be consistent across sessions and kernel restarts,
|
||||
# and will change if the function's code/name changes
|
||||
# We remove N so that cache is properly hit if the cell where
|
||||
# the func is defined is re-exectuted.
|
||||
# The XYZ hash should avoid collisions between functions with
|
||||
# the same name, both within the same notebook but also across
|
||||
# notebooks
|
||||
split = parts[-1].split("-")
|
||||
parts[-1] = "-".join(split[:2] + split[3:])
|
||||
elif len(parts) > 2 and parts[-2].startswith("ipykernel_"):
|
||||
# In a notebook session (ipykernel). Filename seems to be 'xyz'
|
||||
# of above. parts[-2] has the structure ipykernel_XXXXXX where
|
||||
# XXXXXX is a six-digit number identifying the current run (?).
|
||||
# If we split it off, the function again has the same
|
||||
# identifier across runs.
|
||||
parts[-2] = "ipykernel"
|
||||
filename = "-".join(parts)
|
||||
if filename.endswith(".py"):
|
||||
filename = filename[:-3]
|
||||
module = module + "-" + filename
|
||||
module = module.split(".")
|
||||
if hasattr(func, "func_name"):
|
||||
name = func.func_name
|
||||
elif hasattr(func, "__name__"):
|
||||
name = func.__name__
|
||||
else:
|
||||
name = "unknown"
|
||||
# Hack to detect functions not defined at the module-level
|
||||
if resolv_alias:
|
||||
# TODO: Maybe add a warning here?
|
||||
if hasattr(func, "func_globals") and name in func.func_globals:
|
||||
if func.func_globals[name] is not func:
|
||||
name = "%s-alias" % name
|
||||
if hasattr(func, "__qualname__") and func.__qualname__ != name:
|
||||
# Extend the module name in case of nested functions to avoid
|
||||
# (module, name) collisions
|
||||
module.extend(func.__qualname__.split(".")[:-1])
|
||||
if inspect.ismethod(func):
|
||||
# We need to add the name of the class
|
||||
if hasattr(func, "im_class"):
|
||||
klass = func.im_class
|
||||
module.append(klass.__name__)
|
||||
if os.name == "nt" and win_characters:
|
||||
# Windows can't encode certain characters in filenames
|
||||
name = _clean_win_chars(name)
|
||||
module = [_clean_win_chars(s) for s in module]
|
||||
return module, name
|
||||
|
||||
|
||||
def _signature_str(function_name, arg_sig):
|
||||
"""Helper function to output a function signature"""
|
||||
return "{}{}".format(function_name, arg_sig)
|
||||
|
||||
|
||||
def _function_called_str(function_name, args, kwargs):
|
||||
"""Helper function to output a function call"""
|
||||
template_str = "{0}({1}, {2})"
|
||||
|
||||
args_str = repr(args)[1:-1]
|
||||
kwargs_str = ", ".join("%s=%s" % (k, v) for k, v in kwargs.items())
|
||||
return template_str.format(function_name, args_str, kwargs_str)
|
||||
|
||||
|
||||
def filter_args(func, ignore_lst, args=(), kwargs=dict()):
|
||||
"""Filters the given args and kwargs using a list of arguments to
|
||||
ignore, and a function specification.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func: callable
|
||||
Function giving the argument specification
|
||||
ignore_lst: list of strings
|
||||
List of arguments to ignore (either a name of an argument
|
||||
in the function spec, or '*', or '**')
|
||||
*args: list
|
||||
Positional arguments passed to the function.
|
||||
**kwargs: dict
|
||||
Keyword arguments passed to the function
|
||||
|
||||
Returns
|
||||
-------
|
||||
filtered_args: list
|
||||
List of filtered positional and keyword arguments.
|
||||
"""
|
||||
args = list(args)
|
||||
if isinstance(ignore_lst, str):
|
||||
# Catch a common mistake
|
||||
raise ValueError(
|
||||
"ignore_lst must be a list of parameters to ignore "
|
||||
"%s (type %s) was given" % (ignore_lst, type(ignore_lst))
|
||||
)
|
||||
# Special case for functools.partial objects
|
||||
if not inspect.ismethod(func) and not inspect.isfunction(func):
|
||||
if ignore_lst:
|
||||
warnings.warn(
|
||||
"Cannot inspect object %s, ignore list will not work." % func,
|
||||
stacklevel=2,
|
||||
)
|
||||
return {"*": args, "**": kwargs}
|
||||
arg_sig = inspect.signature(func)
|
||||
arg_names = []
|
||||
arg_defaults = []
|
||||
arg_kwonlyargs = []
|
||||
arg_varargs = None
|
||||
arg_varkw = None
|
||||
for param in arg_sig.parameters.values():
|
||||
if param.kind is param.POSITIONAL_OR_KEYWORD:
|
||||
arg_names.append(param.name)
|
||||
elif param.kind is param.KEYWORD_ONLY:
|
||||
arg_names.append(param.name)
|
||||
arg_kwonlyargs.append(param.name)
|
||||
elif param.kind is param.VAR_POSITIONAL:
|
||||
arg_varargs = param.name
|
||||
elif param.kind is param.VAR_KEYWORD:
|
||||
arg_varkw = param.name
|
||||
if param.default is not param.empty:
|
||||
arg_defaults.append(param.default)
|
||||
if inspect.ismethod(func):
|
||||
# First argument is 'self', it has been removed by Python
|
||||
# we need to add it back:
|
||||
args = [
|
||||
func.__self__,
|
||||
] + args
|
||||
# func is an instance method, inspect.signature(func) does not
|
||||
# include self, we need to fetch it from the class method, i.e
|
||||
# func.__func__
|
||||
class_method_sig = inspect.signature(func.__func__)
|
||||
self_name = next(iter(class_method_sig.parameters))
|
||||
arg_names = [self_name] + arg_names
|
||||
# XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such
|
||||
# as on ndarrays.
|
||||
|
||||
_, name = get_func_name(func, resolv_alias=False)
|
||||
arg_dict = dict()
|
||||
arg_position = -1
|
||||
for arg_position, arg_name in enumerate(arg_names):
|
||||
if arg_position < len(args):
|
||||
# Positional argument or keyword argument given as positional
|
||||
if arg_name not in arg_kwonlyargs:
|
||||
arg_dict[arg_name] = args[arg_position]
|
||||
else:
|
||||
raise ValueError(
|
||||
"Keyword-only parameter '%s' was passed as "
|
||||
"positional parameter for %s:\n"
|
||||
" %s was called."
|
||||
% (
|
||||
arg_name,
|
||||
_signature_str(name, arg_sig),
|
||||
_function_called_str(name, args, kwargs),
|
||||
)
|
||||
)
|
||||
|
||||
else:
|
||||
position = arg_position - len(arg_names)
|
||||
if arg_name in kwargs:
|
||||
arg_dict[arg_name] = kwargs[arg_name]
|
||||
else:
|
||||
try:
|
||||
arg_dict[arg_name] = arg_defaults[position]
|
||||
except (IndexError, KeyError) as e:
|
||||
# Missing argument
|
||||
raise ValueError(
|
||||
"Wrong number of arguments for %s:\n"
|
||||
" %s was called."
|
||||
% (
|
||||
_signature_str(name, arg_sig),
|
||||
_function_called_str(name, args, kwargs),
|
||||
)
|
||||
) from e
|
||||
|
||||
varkwargs = dict()
|
||||
for arg_name, arg_value in sorted(kwargs.items()):
|
||||
if arg_name in arg_dict:
|
||||
arg_dict[arg_name] = arg_value
|
||||
elif arg_varkw is not None:
|
||||
varkwargs[arg_name] = arg_value
|
||||
else:
|
||||
raise TypeError(
|
||||
"Ignore list for %s() contains an unexpected "
|
||||
"keyword argument '%s'" % (name, arg_name)
|
||||
)
|
||||
|
||||
if arg_varkw is not None:
|
||||
arg_dict["**"] = varkwargs
|
||||
if arg_varargs is not None:
|
||||
varargs = args[arg_position + 1 :]
|
||||
arg_dict["*"] = varargs
|
||||
|
||||
# Now remove the arguments to be ignored
|
||||
for item in ignore_lst:
|
||||
if item in arg_dict:
|
||||
arg_dict.pop(item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Ignore list: argument '%s' is not defined for "
|
||||
"function %s" % (item, _signature_str(name, arg_sig))
|
||||
)
|
||||
# XXX: Return a sorted list of pairs?
|
||||
return arg_dict
|
||||
|
||||
|
||||
def _format_arg(arg):
|
||||
formatted_arg = pformat(arg, indent=2)
|
||||
if len(formatted_arg) > 1500:
|
||||
formatted_arg = "%s..." % formatted_arg[:700]
|
||||
return formatted_arg
|
||||
|
||||
|
||||
def format_signature(func, *args, **kwargs):
|
||||
# XXX: Should this use inspect.formatargvalues/formatargspec?
|
||||
module, name = get_func_name(func)
|
||||
module = [m for m in module if m]
|
||||
if module:
|
||||
module.append(name)
|
||||
module_path = ".".join(module)
|
||||
else:
|
||||
module_path = name
|
||||
arg_str = list()
|
||||
previous_length = 0
|
||||
for arg in args:
|
||||
formatted_arg = _format_arg(arg)
|
||||
if previous_length > 80:
|
||||
formatted_arg = "\n%s" % formatted_arg
|
||||
previous_length = len(formatted_arg)
|
||||
arg_str.append(formatted_arg)
|
||||
arg_str.extend(["%s=%s" % (v, _format_arg(i)) for v, i in kwargs.items()])
|
||||
arg_str = ", ".join(arg_str)
|
||||
|
||||
signature = "%s(%s)" % (name, arg_str)
|
||||
return module_path, signature
|
||||
|
||||
|
||||
def format_call(func, args, kwargs, object_name="Memory"):
|
||||
"""Returns a nicely formatted statement displaying the function
|
||||
call with the given arguments.
|
||||
"""
|
||||
path, signature = format_signature(func, *args, **kwargs)
|
||||
msg = "%s\n[%s] Calling %s...\n%s" % (80 * "_", object_name, path, signature)
|
||||
return msg
|
||||
# XXX: Not using logging framework
|
||||
# self.debug(msg)
|
||||
270
Backend/venv/lib/python3.12/site-packages/joblib/hashing.py
Normal file
270
Backend/venv/lib/python3.12/site-packages/joblib/hashing.py
Normal file
@@ -0,0 +1,270 @@
|
||||
"""
|
||||
Fast cryptographic hash of Python objects, with a special case for fast
|
||||
hashing of numpy arrays.
|
||||
"""
|
||||
|
||||
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
|
||||
# Copyright (c) 2009 Gael Varoquaux
|
||||
# License: BSD Style, 3 clauses.
|
||||
|
||||
import decimal
|
||||
import hashlib
|
||||
import io
|
||||
import pickle
|
||||
import struct
|
||||
import sys
|
||||
import types
|
||||
|
||||
Pickler = pickle._Pickler
|
||||
|
||||
|
||||
class _ConsistentSet(object):
|
||||
"""Class used to ensure the hash of Sets is preserved
|
||||
whatever the order of its items.
|
||||
"""
|
||||
|
||||
def __init__(self, set_sequence):
|
||||
# Forces order of elements in set to ensure consistent hash.
|
||||
try:
|
||||
# Trying first to order the set assuming the type of elements is
|
||||
# consistent and orderable.
|
||||
# This fails on python 3 when elements are unorderable
|
||||
# but we keep it in a try as it's faster.
|
||||
self._sequence = sorted(set_sequence)
|
||||
except (TypeError, decimal.InvalidOperation):
|
||||
# If elements are unorderable, sorting them using their hash.
|
||||
# This is slower but works in any case.
|
||||
self._sequence = sorted((hash(e) for e in set_sequence))
|
||||
|
||||
|
||||
class _MyHash(object):
|
||||
"""Class used to hash objects that won't normally pickle"""
|
||||
|
||||
def __init__(self, *args):
|
||||
self.args = args
|
||||
|
||||
|
||||
class Hasher(Pickler):
|
||||
"""A subclass of pickler, to do cryptographic hashing, rather than
|
||||
pickling. This is used to produce a unique hash of the given
|
||||
Python object that is not necessarily cryptographically secure.
|
||||
"""
|
||||
|
||||
def __init__(self, hash_name="md5"):
|
||||
self.stream = io.BytesIO()
|
||||
# By default we want a pickle protocol that only changes with
|
||||
# the major python version and not the minor one
|
||||
protocol = 3
|
||||
Pickler.__init__(self, self.stream, protocol=protocol)
|
||||
# Initialise the hash obj
|
||||
self._hash = hashlib.new(hash_name, usedforsecurity=False)
|
||||
|
||||
def hash(self, obj, return_digest=True):
|
||||
try:
|
||||
self.dump(obj)
|
||||
except pickle.PicklingError as e:
|
||||
e.args += ("PicklingError while hashing %r: %r" % (obj, e),)
|
||||
raise
|
||||
dumps = self.stream.getvalue()
|
||||
self._hash.update(dumps)
|
||||
if return_digest:
|
||||
return self._hash.hexdigest()
|
||||
|
||||
def save(self, obj):
|
||||
if isinstance(obj, (types.MethodType, type({}.pop))):
|
||||
# the Pickler cannot pickle instance methods; here we decompose
|
||||
# them into components that make them uniquely identifiable
|
||||
if hasattr(obj, "__func__"):
|
||||
func_name = obj.__func__.__name__
|
||||
else:
|
||||
func_name = obj.__name__
|
||||
inst = obj.__self__
|
||||
if type(inst) is type(pickle):
|
||||
obj = _MyHash(func_name, inst.__name__)
|
||||
elif inst is None:
|
||||
# type(None) or type(module) do not pickle
|
||||
obj = _MyHash(func_name, inst)
|
||||
else:
|
||||
cls = obj.__self__.__class__
|
||||
obj = _MyHash(func_name, inst, cls)
|
||||
Pickler.save(self, obj)
|
||||
|
||||
def memoize(self, obj):
|
||||
# We want hashing to be sensitive to value instead of reference.
|
||||
# For example we want ['aa', 'aa'] and ['aa', 'aaZ'[:2]]
|
||||
# to hash to the same value and that's why we disable memoization
|
||||
# for strings
|
||||
if isinstance(obj, (bytes, str)):
|
||||
return
|
||||
Pickler.memoize(self, obj)
|
||||
|
||||
# The dispatch table of the pickler is not accessible in Python
|
||||
# 3, as these lines are only bugware for IPython, we skip them.
|
||||
def save_global(self, obj, name=None, pack=struct.pack):
|
||||
# We have to override this method in order to deal with objects
|
||||
# defined interactively in IPython that are not injected in
|
||||
# __main__
|
||||
kwargs = dict(name=name, pack=pack)
|
||||
del kwargs["pack"]
|
||||
try:
|
||||
Pickler.save_global(self, obj, **kwargs)
|
||||
except pickle.PicklingError:
|
||||
Pickler.save_global(self, obj, **kwargs)
|
||||
module = getattr(obj, "__module__", None)
|
||||
if module == "__main__":
|
||||
my_name = name
|
||||
if my_name is None:
|
||||
my_name = obj.__name__
|
||||
mod = sys.modules[module]
|
||||
if not hasattr(mod, my_name):
|
||||
# IPython doesn't inject the variables define
|
||||
# interactively in __main__
|
||||
setattr(mod, my_name, obj)
|
||||
|
||||
dispatch = Pickler.dispatch.copy()
|
||||
# builtin
|
||||
dispatch[type(len)] = save_global
|
||||
# type
|
||||
dispatch[type(object)] = save_global
|
||||
# classobj
|
||||
dispatch[type(Pickler)] = save_global
|
||||
# function
|
||||
dispatch[type(pickle.dump)] = save_global
|
||||
|
||||
# We use *args in _batch_setitems signature because _batch_setitems has an
|
||||
# additional 'obj' argument in Python 3.14
|
||||
def _batch_setitems(self, items, *args):
|
||||
# forces order of keys in dict to ensure consistent hash.
|
||||
try:
|
||||
# Trying first to compare dict assuming the type of keys is
|
||||
# consistent and orderable.
|
||||
# This fails on python 3 when keys are unorderable
|
||||
# but we keep it in a try as it's faster.
|
||||
Pickler._batch_setitems(self, iter(sorted(items)), *args)
|
||||
except TypeError:
|
||||
# If keys are unorderable, sorting them using their hash. This is
|
||||
# slower but works in any case.
|
||||
Pickler._batch_setitems(
|
||||
self, iter(sorted((hash(k), v) for k, v in items)), *args
|
||||
)
|
||||
|
||||
def save_set(self, set_items):
|
||||
# forces order of items in Set to ensure consistent hash
|
||||
Pickler.save(self, _ConsistentSet(set_items))
|
||||
|
||||
dispatch[type(set())] = save_set
|
||||
|
||||
|
||||
class NumpyHasher(Hasher):
|
||||
"""Special case the hasher for when numpy is loaded."""
|
||||
|
||||
def __init__(self, hash_name="md5", coerce_mmap=False):
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
hash_name: string
|
||||
The hash algorithm to be used
|
||||
coerce_mmap: boolean
|
||||
Make no difference between np.memmap and np.ndarray
|
||||
objects.
|
||||
"""
|
||||
self.coerce_mmap = coerce_mmap
|
||||
Hasher.__init__(self, hash_name=hash_name)
|
||||
# delayed import of numpy, to avoid tight coupling
|
||||
import numpy as np
|
||||
|
||||
self.np = np
|
||||
if hasattr(np, "getbuffer"):
|
||||
self._getbuffer = np.getbuffer
|
||||
else:
|
||||
self._getbuffer = memoryview
|
||||
|
||||
def save(self, obj):
|
||||
"""Subclass the save method, to hash ndarray subclass, rather
|
||||
than pickling them. Off course, this is a total abuse of
|
||||
the Pickler class.
|
||||
"""
|
||||
if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject:
|
||||
# Compute a hash of the object
|
||||
# The update function of the hash requires a c_contiguous buffer.
|
||||
if obj.shape == ():
|
||||
# 0d arrays need to be flattened because viewing them as bytes
|
||||
# raises a ValueError exception.
|
||||
obj_c_contiguous = obj.flatten()
|
||||
elif obj.flags.c_contiguous:
|
||||
obj_c_contiguous = obj
|
||||
elif obj.flags.f_contiguous:
|
||||
obj_c_contiguous = obj.T
|
||||
else:
|
||||
# Cater for non-single-segment arrays: this creates a
|
||||
# copy, and thus alleviates this issue.
|
||||
# XXX: There might be a more efficient way of doing this
|
||||
obj_c_contiguous = obj.flatten()
|
||||
|
||||
# memoryview is not supported for some dtypes, e.g. datetime64, see
|
||||
# https://github.com/numpy/numpy/issues/4983. The
|
||||
# workaround is to view the array as bytes before
|
||||
# taking the memoryview.
|
||||
self._hash.update(self._getbuffer(obj_c_contiguous.view(self.np.uint8)))
|
||||
|
||||
# We store the class, to be able to distinguish between
|
||||
# Objects with the same binary content, but different
|
||||
# classes.
|
||||
if self.coerce_mmap and isinstance(obj, self.np.memmap):
|
||||
# We don't make the difference between memmap and
|
||||
# normal ndarrays, to be able to reload previously
|
||||
# computed results with memmap.
|
||||
klass = self.np.ndarray
|
||||
else:
|
||||
klass = obj.__class__
|
||||
# We also return the dtype and the shape, to distinguish
|
||||
# different views on the same data with different dtypes.
|
||||
|
||||
# The object will be pickled by the pickler hashed at the end.
|
||||
obj = (klass, ("HASHED", obj.dtype, obj.shape, obj.strides))
|
||||
elif isinstance(obj, self.np.dtype):
|
||||
# numpy.dtype consistent hashing is tricky to get right. This comes
|
||||
# from the fact that atomic np.dtype objects are interned:
|
||||
# ``np.dtype('f4') is np.dtype('f4')``. The situation is
|
||||
# complicated by the fact that this interning does not resist a
|
||||
# simple pickle.load/dump roundtrip:
|
||||
# ``pickle.loads(pickle.dumps(np.dtype('f4'))) is not
|
||||
# np.dtype('f4') Because pickle relies on memoization during
|
||||
# pickling, it is easy to
|
||||
# produce different hashes for seemingly identical objects, such as
|
||||
# ``[np.dtype('f4'), np.dtype('f4')]``
|
||||
# and ``[np.dtype('f4'), pickle.loads(pickle.dumps('f4'))]``.
|
||||
# To prevent memoization from interfering with hashing, we isolate
|
||||
# the serialization (and thus the pickle memoization) of each dtype
|
||||
# using each time a different ``pickle.dumps`` call unrelated to
|
||||
# the current Hasher instance.
|
||||
self._hash.update("_HASHED_DTYPE".encode("utf-8"))
|
||||
self._hash.update(pickle.dumps(obj))
|
||||
return
|
||||
Hasher.save(self, obj)
|
||||
|
||||
|
||||
def hash(obj, hash_name="md5", coerce_mmap=False):
|
||||
"""Quick calculation of a hash to identify uniquely Python objects
|
||||
containing numpy arrays.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hash_name: 'md5' or 'sha1'
|
||||
Hashing algorithm used. sha1 is supposedly safer, but md5 is
|
||||
faster.
|
||||
coerce_mmap: boolean
|
||||
Make no difference between np.memmap and np.ndarray
|
||||
"""
|
||||
valid_hash_names = ("md5", "sha1")
|
||||
if hash_name not in valid_hash_names:
|
||||
raise ValueError(
|
||||
"Valid options for 'hash_name' are {}. Got hash_name={!r} instead.".format(
|
||||
valid_hash_names, hash_name
|
||||
)
|
||||
)
|
||||
if "numpy" in sys.modules:
|
||||
hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
|
||||
else:
|
||||
hasher = Hasher(hash_name=hash_name)
|
||||
return hasher.hash(obj)
|
||||
159
Backend/venv/lib/python3.12/site-packages/joblib/logger.py
Normal file
159
Backend/venv/lib/python3.12/site-packages/joblib/logger.py
Normal file
@@ -0,0 +1,159 @@
|
||||
"""
|
||||
Helpers for logging.
|
||||
|
||||
This module needs much love to become useful.
|
||||
"""
|
||||
|
||||
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
|
||||
# Copyright (c) 2008 Gael Varoquaux
|
||||
# License: BSD Style, 3 clauses.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import logging
|
||||
import os
|
||||
import pprint
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
|
||||
from .disk import mkdirp
|
||||
|
||||
|
||||
def _squeeze_time(t):
|
||||
"""Remove .1s to the time under Windows: this is the time it take to
|
||||
stat files. This is needed to make results similar to timings under
|
||||
Unix, for tests
|
||||
"""
|
||||
if sys.platform.startswith("win"):
|
||||
return max(0, t - 0.1)
|
||||
else:
|
||||
return t
|
||||
|
||||
|
||||
def format_time(t):
|
||||
t = _squeeze_time(t)
|
||||
return "%.1fs, %.1fmin" % (t, t / 60.0)
|
||||
|
||||
|
||||
def short_format_time(t):
|
||||
t = _squeeze_time(t)
|
||||
if t > 60:
|
||||
return "%4.1fmin" % (t / 60.0)
|
||||
else:
|
||||
return " %5.1fs" % (t)
|
||||
|
||||
|
||||
def pformat(obj, indent=0, depth=3):
|
||||
if "numpy" in sys.modules:
|
||||
import numpy as np
|
||||
|
||||
print_options = np.get_printoptions()
|
||||
np.set_printoptions(precision=6, threshold=64, edgeitems=1)
|
||||
else:
|
||||
print_options = None
|
||||
out = pprint.pformat(obj, depth=depth, indent=indent)
|
||||
if print_options:
|
||||
np.set_printoptions(**print_options)
|
||||
return out
|
||||
|
||||
|
||||
###############################################################################
|
||||
# class `Logger`
|
||||
###############################################################################
|
||||
class Logger(object):
|
||||
"""Base class for logging messages."""
|
||||
|
||||
def __init__(self, depth=3, name=None):
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
depth: int, optional
|
||||
The depth of objects printed.
|
||||
name: str, optional
|
||||
The namespace to log to. If None, defaults to joblib.
|
||||
"""
|
||||
self.depth = depth
|
||||
self._name = name if name else "joblib"
|
||||
|
||||
def warn(self, msg):
|
||||
logging.getLogger(self._name).warning("[%s]: %s" % (self, msg))
|
||||
|
||||
def info(self, msg):
|
||||
logging.info("[%s]: %s" % (self, msg))
|
||||
|
||||
def debug(self, msg):
|
||||
# XXX: This conflicts with the debug flag used in children class
|
||||
logging.getLogger(self._name).debug("[%s]: %s" % (self, msg))
|
||||
|
||||
def format(self, obj, indent=0):
|
||||
"""Return the formatted representation of the object."""
|
||||
return pformat(obj, indent=indent, depth=self.depth)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# class `PrintTime`
|
||||
###############################################################################
|
||||
class PrintTime(object):
|
||||
"""Print and log messages while keeping track of time."""
|
||||
|
||||
def __init__(self, logfile=None, logdir=None):
|
||||
if logfile is not None and logdir is not None:
|
||||
raise ValueError("Cannot specify both logfile and logdir")
|
||||
# XXX: Need argument docstring
|
||||
self.last_time = time.time()
|
||||
self.start_time = self.last_time
|
||||
if logdir is not None:
|
||||
logfile = os.path.join(logdir, "joblib.log")
|
||||
self.logfile = logfile
|
||||
if logfile is not None:
|
||||
mkdirp(os.path.dirname(logfile))
|
||||
if os.path.exists(logfile):
|
||||
# Rotate the logs
|
||||
for i in range(1, 9):
|
||||
try:
|
||||
shutil.move(logfile + ".%i" % i, logfile + ".%i" % (i + 1))
|
||||
except: # noqa: E722
|
||||
"No reason failing here"
|
||||
# Use a copy rather than a move, so that a process
|
||||
# monitoring this file does not get lost.
|
||||
try:
|
||||
shutil.copy(logfile, logfile + ".1")
|
||||
except: # noqa: E722
|
||||
"No reason failing here"
|
||||
try:
|
||||
with open(logfile, "w") as logfile:
|
||||
logfile.write("\nLogging joblib python script\n")
|
||||
logfile.write("\n---%s---\n" % time.ctime(self.last_time))
|
||||
except: # noqa: E722
|
||||
""" Multiprocessing writing to files can create race
|
||||
conditions. Rather fail silently than crash the
|
||||
computation.
|
||||
"""
|
||||
# XXX: We actually need a debug flag to disable this
|
||||
# silent failure.
|
||||
|
||||
def __call__(self, msg="", total=False):
|
||||
"""Print the time elapsed between the last call and the current
|
||||
call, with an optional message.
|
||||
"""
|
||||
if not total:
|
||||
time_lapse = time.time() - self.last_time
|
||||
full_msg = "%s: %s" % (msg, format_time(time_lapse))
|
||||
else:
|
||||
# FIXME: Too much logic duplicated
|
||||
time_lapse = time.time() - self.start_time
|
||||
full_msg = "%s: %.2fs, %.1f min" % (msg, time_lapse, time_lapse / 60)
|
||||
print(full_msg, file=sys.stderr)
|
||||
if self.logfile is not None:
|
||||
try:
|
||||
with open(self.logfile, "a") as f:
|
||||
print(full_msg, file=f)
|
||||
except: # noqa: E722
|
||||
""" Multiprocessing writing to files can create race
|
||||
conditions. Rather fail silently than crash the
|
||||
calculation.
|
||||
"""
|
||||
# XXX: We actually need a debug flag to disable this
|
||||
# silent failure.
|
||||
self.last_time = time.time()
|
||||
1242
Backend/venv/lib/python3.12/site-packages/joblib/memory.py
Normal file
1242
Backend/venv/lib/python3.12/site-packages/joblib/memory.py
Normal file
File diff suppressed because it is too large
Load Diff
756
Backend/venv/lib/python3.12/site-packages/joblib/numpy_pickle.py
Normal file
756
Backend/venv/lib/python3.12/site-packages/joblib/numpy_pickle.py
Normal file
@@ -0,0 +1,756 @@
|
||||
"""Utilities for fast persistence of big data, with optional compression."""
|
||||
|
||||
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
|
||||
# Copyright (c) 2009 Gael Varoquaux
|
||||
# License: BSD Style, 3 clauses.
|
||||
|
||||
import io
|
||||
import os
|
||||
import pickle
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
|
||||
from .backports import make_memmap
|
||||
from .compressor import (
|
||||
_COMPRESSORS,
|
||||
LZ4_NOT_INSTALLED_ERROR,
|
||||
BinaryZlibFile,
|
||||
BZ2CompressorWrapper,
|
||||
GzipCompressorWrapper,
|
||||
LZ4CompressorWrapper,
|
||||
LZMACompressorWrapper,
|
||||
XZCompressorWrapper,
|
||||
ZlibCompressorWrapper,
|
||||
lz4,
|
||||
register_compressor,
|
||||
)
|
||||
|
||||
# For compatibility with old versions of joblib, we need ZNDArrayWrapper
|
||||
# to be visible in the current namespace.
|
||||
from .numpy_pickle_compat import (
|
||||
NDArrayWrapper,
|
||||
ZNDArrayWrapper, # noqa: F401
|
||||
load_compatibility,
|
||||
)
|
||||
from .numpy_pickle_utils import (
|
||||
BUFFER_SIZE,
|
||||
Pickler,
|
||||
Unpickler,
|
||||
_ensure_native_byte_order,
|
||||
_read_bytes,
|
||||
_reconstruct,
|
||||
_validate_fileobject_and_memmap,
|
||||
_write_fileobject,
|
||||
)
|
||||
|
||||
# Register supported compressors
|
||||
register_compressor("zlib", ZlibCompressorWrapper())
|
||||
register_compressor("gzip", GzipCompressorWrapper())
|
||||
register_compressor("bz2", BZ2CompressorWrapper())
|
||||
register_compressor("lzma", LZMACompressorWrapper())
|
||||
register_compressor("xz", XZCompressorWrapper())
|
||||
register_compressor("lz4", LZ4CompressorWrapper())
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Utility objects for persistence.
|
||||
|
||||
# For convenience, 16 bytes are used to be sure to cover all the possible
|
||||
# dtypes' alignments. For reference, see:
|
||||
# https://numpy.org/devdocs/dev/alignment.html
|
||||
NUMPY_ARRAY_ALIGNMENT_BYTES = 16
|
||||
|
||||
|
||||
class NumpyArrayWrapper(object):
|
||||
"""An object to be persisted instead of numpy arrays.
|
||||
|
||||
This object is used to hack into the pickle machinery and read numpy
|
||||
array data from our custom persistence format.
|
||||
More precisely, this object is used for:
|
||||
* carrying the information of the persisted array: subclass, shape, order,
|
||||
dtype. Those ndarray metadata are used to correctly reconstruct the array
|
||||
with low level numpy functions.
|
||||
* determining if memmap is allowed on the array.
|
||||
* reading the array bytes from a file.
|
||||
* reading the array using memorymap from a file.
|
||||
* writing the array bytes to a file.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
subclass: numpy.ndarray subclass
|
||||
Determine the subclass of the wrapped array.
|
||||
shape: numpy.ndarray shape
|
||||
Determine the shape of the wrapped array.
|
||||
order: {'C', 'F'}
|
||||
Determine the order of wrapped array data. 'C' is for C order, 'F' is
|
||||
for fortran order.
|
||||
dtype: numpy.ndarray dtype
|
||||
Determine the data type of the wrapped array.
|
||||
allow_mmap: bool
|
||||
Determine if memory mapping is allowed on the wrapped array.
|
||||
Default: False.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
subclass,
|
||||
shape,
|
||||
order,
|
||||
dtype,
|
||||
allow_mmap=False,
|
||||
numpy_array_alignment_bytes=NUMPY_ARRAY_ALIGNMENT_BYTES,
|
||||
):
|
||||
"""Constructor. Store the useful information for later."""
|
||||
self.subclass = subclass
|
||||
self.shape = shape
|
||||
self.order = order
|
||||
self.dtype = dtype
|
||||
self.allow_mmap = allow_mmap
|
||||
# We make numpy_array_alignment_bytes an instance attribute to allow us
|
||||
# to change our mind about the default alignment and still load the old
|
||||
# pickles (with the previous alignment) correctly
|
||||
self.numpy_array_alignment_bytes = numpy_array_alignment_bytes
|
||||
|
||||
def safe_get_numpy_array_alignment_bytes(self):
|
||||
# NumpyArrayWrapper instances loaded from joblib <= 1.1 pickles don't
|
||||
# have an numpy_array_alignment_bytes attribute
|
||||
return getattr(self, "numpy_array_alignment_bytes", None)
|
||||
|
||||
def write_array(self, array, pickler):
|
||||
"""Write array bytes to pickler file handle.
|
||||
|
||||
This function is an adaptation of the numpy write_array function
|
||||
available in version 1.10.1 in numpy/lib/format.py.
|
||||
"""
|
||||
# Set buffer size to 16 MiB to hide the Python loop overhead.
|
||||
buffersize = max(16 * 1024**2 // array.itemsize, 1)
|
||||
if array.dtype.hasobject:
|
||||
# We contain Python objects so we cannot write out the data
|
||||
# directly. Instead, we will pickle it out with version 5 of the
|
||||
# pickle protocol.
|
||||
pickle.dump(array, pickler.file_handle, protocol=5)
|
||||
else:
|
||||
numpy_array_alignment_bytes = self.safe_get_numpy_array_alignment_bytes()
|
||||
if numpy_array_alignment_bytes is not None:
|
||||
current_pos = pickler.file_handle.tell()
|
||||
pos_after_padding_byte = current_pos + 1
|
||||
padding_length = numpy_array_alignment_bytes - (
|
||||
pos_after_padding_byte % numpy_array_alignment_bytes
|
||||
)
|
||||
# A single byte is written that contains the padding length in
|
||||
# bytes
|
||||
padding_length_byte = int.to_bytes(
|
||||
padding_length, length=1, byteorder="little"
|
||||
)
|
||||
pickler.file_handle.write(padding_length_byte)
|
||||
|
||||
if padding_length != 0:
|
||||
padding = b"\xff" * padding_length
|
||||
pickler.file_handle.write(padding)
|
||||
|
||||
for chunk in pickler.np.nditer(
|
||||
array,
|
||||
flags=["external_loop", "buffered", "zerosize_ok"],
|
||||
buffersize=buffersize,
|
||||
order=self.order,
|
||||
):
|
||||
pickler.file_handle.write(chunk.tobytes("C"))
|
||||
|
||||
def read_array(self, unpickler, ensure_native_byte_order):
|
||||
"""Read array from unpickler file handle.
|
||||
|
||||
This function is an adaptation of the numpy read_array function
|
||||
available in version 1.10.1 in numpy/lib/format.py.
|
||||
"""
|
||||
if len(self.shape) == 0:
|
||||
count = 1
|
||||
else:
|
||||
# joblib issue #859: we cast the elements of self.shape to int64 to
|
||||
# prevent a potential overflow when computing their product.
|
||||
shape_int64 = [unpickler.np.int64(x) for x in self.shape]
|
||||
count = unpickler.np.multiply.reduce(shape_int64)
|
||||
# Now read the actual data.
|
||||
if self.dtype.hasobject:
|
||||
# The array contained Python objects. We need to unpickle the data.
|
||||
array = pickle.load(unpickler.file_handle)
|
||||
else:
|
||||
numpy_array_alignment_bytes = self.safe_get_numpy_array_alignment_bytes()
|
||||
if numpy_array_alignment_bytes is not None:
|
||||
padding_byte = unpickler.file_handle.read(1)
|
||||
padding_length = int.from_bytes(padding_byte, byteorder="little")
|
||||
if padding_length != 0:
|
||||
unpickler.file_handle.read(padding_length)
|
||||
|
||||
# This is not a real file. We have to read it the
|
||||
# memory-intensive way.
|
||||
# crc32 module fails on reads greater than 2 ** 32 bytes,
|
||||
# breaking large reads from gzip streams. Chunk reads to
|
||||
# BUFFER_SIZE bytes to avoid issue and reduce memory overhead
|
||||
# of the read. In non-chunked case count < max_read_count, so
|
||||
# only one read is performed.
|
||||
max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, self.dtype.itemsize)
|
||||
|
||||
array = unpickler.np.empty(count, dtype=self.dtype)
|
||||
for i in range(0, count, max_read_count):
|
||||
read_count = min(max_read_count, count - i)
|
||||
read_size = int(read_count * self.dtype.itemsize)
|
||||
data = _read_bytes(unpickler.file_handle, read_size, "array data")
|
||||
array[i : i + read_count] = unpickler.np.frombuffer(
|
||||
data, dtype=self.dtype, count=read_count
|
||||
)
|
||||
del data
|
||||
|
||||
if self.order == "F":
|
||||
array.shape = self.shape[::-1]
|
||||
array = array.transpose()
|
||||
else:
|
||||
array.shape = self.shape
|
||||
|
||||
if ensure_native_byte_order:
|
||||
# Detect byte order mismatch and swap as needed.
|
||||
array = _ensure_native_byte_order(array)
|
||||
|
||||
return array
|
||||
|
||||
def read_mmap(self, unpickler):
|
||||
"""Read an array using numpy memmap."""
|
||||
current_pos = unpickler.file_handle.tell()
|
||||
offset = current_pos
|
||||
numpy_array_alignment_bytes = self.safe_get_numpy_array_alignment_bytes()
|
||||
|
||||
if numpy_array_alignment_bytes is not None:
|
||||
padding_byte = unpickler.file_handle.read(1)
|
||||
padding_length = int.from_bytes(padding_byte, byteorder="little")
|
||||
# + 1 is for the padding byte
|
||||
offset += padding_length + 1
|
||||
|
||||
if unpickler.mmap_mode == "w+":
|
||||
unpickler.mmap_mode = "r+"
|
||||
|
||||
marray = make_memmap(
|
||||
unpickler.filename,
|
||||
dtype=self.dtype,
|
||||
shape=self.shape,
|
||||
order=self.order,
|
||||
mode=unpickler.mmap_mode,
|
||||
offset=offset,
|
||||
)
|
||||
# update the offset so that it corresponds to the end of the read array
|
||||
unpickler.file_handle.seek(offset + marray.nbytes)
|
||||
|
||||
if (
|
||||
numpy_array_alignment_bytes is None
|
||||
and current_pos % NUMPY_ARRAY_ALIGNMENT_BYTES != 0
|
||||
):
|
||||
message = (
|
||||
f"The memmapped array {marray} loaded from the file "
|
||||
f"{unpickler.file_handle.name} is not byte aligned. "
|
||||
"This may cause segmentation faults if this memmapped array "
|
||||
"is used in some libraries like BLAS or PyTorch. "
|
||||
"To get rid of this warning, regenerate your pickle file "
|
||||
"with joblib >= 1.2.0. "
|
||||
"See https://github.com/joblib/joblib/issues/563 "
|
||||
"for more details"
|
||||
)
|
||||
warnings.warn(message)
|
||||
|
||||
return marray
|
||||
|
||||
def read(self, unpickler, ensure_native_byte_order):
|
||||
"""Read the array corresponding to this wrapper.
|
||||
|
||||
Use the unpickler to get all information to correctly read the array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
unpickler: NumpyUnpickler
|
||||
ensure_native_byte_order: bool
|
||||
If true, coerce the array to use the native endianness of the
|
||||
host system.
|
||||
|
||||
Returns
|
||||
-------
|
||||
array: numpy.ndarray
|
||||
|
||||
"""
|
||||
# When requested, only use memmap mode if allowed.
|
||||
if unpickler.mmap_mode is not None and self.allow_mmap:
|
||||
assert not ensure_native_byte_order, (
|
||||
"Memmaps cannot be coerced to a given byte order, "
|
||||
"this code path is impossible."
|
||||
)
|
||||
array = self.read_mmap(unpickler)
|
||||
else:
|
||||
array = self.read_array(unpickler, ensure_native_byte_order)
|
||||
|
||||
# Manage array subclass case
|
||||
if hasattr(array, "__array_prepare__") and self.subclass not in (
|
||||
unpickler.np.ndarray,
|
||||
unpickler.np.memmap,
|
||||
):
|
||||
# We need to reconstruct another subclass
|
||||
new_array = _reconstruct(self.subclass, (0,), "b")
|
||||
return new_array.__array_prepare__(array)
|
||||
else:
|
||||
return array
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Pickler classes
|
||||
|
||||
|
||||
class NumpyPickler(Pickler):
|
||||
"""A pickler to persist big data efficiently.
|
||||
|
||||
The main features of this object are:
|
||||
* persistence of numpy arrays in a single file.
|
||||
* optional compression with a special care on avoiding memory copies.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
fp: file
|
||||
File object handle used for serializing the input object.
|
||||
protocol: int, optional
|
||||
Pickle protocol used. Default is pickle.DEFAULT_PROTOCOL.
|
||||
"""
|
||||
|
||||
dispatch = Pickler.dispatch.copy()
|
||||
|
||||
def __init__(self, fp, protocol=None):
|
||||
self.file_handle = fp
|
||||
self.buffered = isinstance(self.file_handle, BinaryZlibFile)
|
||||
|
||||
# By default we want a pickle protocol that only changes with
|
||||
# the major python version and not the minor one
|
||||
if protocol is None:
|
||||
protocol = pickle.DEFAULT_PROTOCOL
|
||||
|
||||
Pickler.__init__(self, self.file_handle, protocol=protocol)
|
||||
# delayed import of numpy, to avoid tight coupling
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError:
|
||||
np = None
|
||||
self.np = np
|
||||
|
||||
def _create_array_wrapper(self, array):
|
||||
"""Create and returns a numpy array wrapper from a numpy array."""
|
||||
order = (
|
||||
"F" if (array.flags.f_contiguous and not array.flags.c_contiguous) else "C"
|
||||
)
|
||||
allow_mmap = not self.buffered and not array.dtype.hasobject
|
||||
|
||||
kwargs = {}
|
||||
try:
|
||||
self.file_handle.tell()
|
||||
except io.UnsupportedOperation:
|
||||
kwargs = {"numpy_array_alignment_bytes": None}
|
||||
|
||||
wrapper = NumpyArrayWrapper(
|
||||
type(array),
|
||||
array.shape,
|
||||
order,
|
||||
array.dtype,
|
||||
allow_mmap=allow_mmap,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return wrapper
|
||||
|
||||
def save(self, obj):
|
||||
"""Subclass the Pickler `save` method.
|
||||
|
||||
This is a total abuse of the Pickler class in order to use the numpy
|
||||
persistence function `save` instead of the default pickle
|
||||
implementation. The numpy array is replaced by a custom wrapper in the
|
||||
pickle persistence stack and the serialized array is written right
|
||||
after in the file. Warning: the file produced does not follow the
|
||||
pickle format. As such it can not be read with `pickle.load`.
|
||||
"""
|
||||
if self.np is not None and type(obj) in (
|
||||
self.np.ndarray,
|
||||
self.np.matrix,
|
||||
self.np.memmap,
|
||||
):
|
||||
if type(obj) is self.np.memmap:
|
||||
# Pickling doesn't work with memmapped arrays
|
||||
obj = self.np.asanyarray(obj)
|
||||
|
||||
# The array wrapper is pickled instead of the real array.
|
||||
wrapper = self._create_array_wrapper(obj)
|
||||
Pickler.save(self, wrapper)
|
||||
|
||||
# A framer was introduced with pickle protocol 4 and we want to
|
||||
# ensure the wrapper object is written before the numpy array
|
||||
# buffer in the pickle file.
|
||||
# See https://www.python.org/dev/peps/pep-3154/#framing to get
|
||||
# more information on the framer behavior.
|
||||
if self.proto >= 4:
|
||||
self.framer.commit_frame(force=True)
|
||||
|
||||
# And then array bytes are written right after the wrapper.
|
||||
wrapper.write_array(obj, self)
|
||||
return
|
||||
|
||||
return Pickler.save(self, obj)
|
||||
|
||||
|
||||
class NumpyUnpickler(Unpickler):
|
||||
"""A subclass of the Unpickler to unpickle our numpy pickles.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
mmap_mode: str
|
||||
The memorymap mode to use for reading numpy arrays.
|
||||
file_handle: file_like
|
||||
File object to unpickle from.
|
||||
ensure_native_byte_order: bool
|
||||
If True, coerce the array to use the native endianness of the
|
||||
host system.
|
||||
filename: str
|
||||
Name of the file to unpickle from. It should correspond to file_handle.
|
||||
This parameter is required when using mmap_mode.
|
||||
np: module
|
||||
Reference to numpy module if numpy is installed else None.
|
||||
|
||||
"""
|
||||
|
||||
dispatch = Unpickler.dispatch.copy()
|
||||
|
||||
def __init__(self, filename, file_handle, ensure_native_byte_order, mmap_mode=None):
|
||||
# The next line is for backward compatibility with pickle generated
|
||||
# with joblib versions less than 0.10.
|
||||
self._dirname = os.path.dirname(filename)
|
||||
|
||||
self.mmap_mode = mmap_mode
|
||||
self.file_handle = file_handle
|
||||
# filename is required for numpy mmap mode.
|
||||
self.filename = filename
|
||||
self.compat_mode = False
|
||||
self.ensure_native_byte_order = ensure_native_byte_order
|
||||
Unpickler.__init__(self, self.file_handle)
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError:
|
||||
np = None
|
||||
self.np = np
|
||||
|
||||
def load_build(self):
|
||||
"""Called to set the state of a newly created object.
|
||||
|
||||
We capture it to replace our place-holder objects, NDArrayWrapper or
|
||||
NumpyArrayWrapper, by the array we are interested in. We
|
||||
replace them directly in the stack of pickler.
|
||||
NDArrayWrapper is used for backward compatibility with joblib <= 0.9.
|
||||
"""
|
||||
Unpickler.load_build(self)
|
||||
|
||||
# For backward compatibility, we support NDArrayWrapper objects.
|
||||
if isinstance(self.stack[-1], (NDArrayWrapper, NumpyArrayWrapper)):
|
||||
if self.np is None:
|
||||
raise ImportError(
|
||||
"Trying to unpickle an ndarray, but numpy didn't import correctly"
|
||||
)
|
||||
array_wrapper = self.stack.pop()
|
||||
# If any NDArrayWrapper is found, we switch to compatibility mode,
|
||||
# this will be used to raise a DeprecationWarning to the user at
|
||||
# the end of the unpickling.
|
||||
if isinstance(array_wrapper, NDArrayWrapper):
|
||||
self.compat_mode = True
|
||||
_array_payload = array_wrapper.read(self)
|
||||
else:
|
||||
_array_payload = array_wrapper.read(self, self.ensure_native_byte_order)
|
||||
|
||||
self.stack.append(_array_payload)
|
||||
|
||||
# Be careful to register our new method.
|
||||
dispatch[pickle.BUILD[0]] = load_build
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Utility functions
|
||||
|
||||
|
||||
def dump(value, filename, compress=0, protocol=None):
|
||||
"""Persist an arbitrary Python object into one file.
|
||||
|
||||
Read more in the :ref:`User Guide <persistence>`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value: any Python object
|
||||
The object to store to disk.
|
||||
filename: str, pathlib.Path, or file object.
|
||||
The file object or path of the file in which it is to be stored.
|
||||
The compression method corresponding to one of the supported filename
|
||||
extensions ('.z', '.gz', '.bz2', '.xz' or '.lzma') will be used
|
||||
automatically.
|
||||
compress: int from 0 to 9 or bool or 2-tuple, optional
|
||||
Optional compression level for the data. 0 or False is no compression.
|
||||
Higher value means more compression, but also slower read and
|
||||
write times. Using a value of 3 is often a good compromise.
|
||||
See the notes for more details.
|
||||
If compress is True, the compression level used is 3.
|
||||
If compress is a 2-tuple, the first element must correspond to a string
|
||||
between supported compressors (e.g 'zlib', 'gzip', 'bz2', 'lzma'
|
||||
'xz'), the second element must be an integer from 0 to 9, corresponding
|
||||
to the compression level.
|
||||
protocol: int, optional
|
||||
Pickle protocol, see pickle.dump documentation for more details.
|
||||
|
||||
Returns
|
||||
-------
|
||||
filenames: list of strings
|
||||
The list of file names in which the data is stored. If
|
||||
compress is false, each array is stored in a different file.
|
||||
|
||||
See Also
|
||||
--------
|
||||
joblib.load : corresponding loader
|
||||
|
||||
Notes
|
||||
-----
|
||||
Memmapping on load cannot be used for compressed files. Thus
|
||||
using compression can significantly slow down loading. In
|
||||
addition, compressed files take up extra memory during
|
||||
dump and load.
|
||||
|
||||
"""
|
||||
|
||||
if Path is not None and isinstance(filename, Path):
|
||||
filename = str(filename)
|
||||
|
||||
is_filename = isinstance(filename, str)
|
||||
is_fileobj = hasattr(filename, "write")
|
||||
|
||||
compress_method = "zlib" # zlib is the default compression method.
|
||||
if compress is True:
|
||||
# By default, if compress is enabled, we want the default compress
|
||||
# level of the compressor.
|
||||
compress_level = None
|
||||
elif isinstance(compress, tuple):
|
||||
# a 2-tuple was set in compress
|
||||
if len(compress) != 2:
|
||||
raise ValueError(
|
||||
"Compress argument tuple should contain exactly 2 elements: "
|
||||
"(compress method, compress level), you passed {}".format(compress)
|
||||
)
|
||||
compress_method, compress_level = compress
|
||||
elif isinstance(compress, str):
|
||||
compress_method = compress
|
||||
compress_level = None # Use default compress level
|
||||
compress = (compress_method, compress_level)
|
||||
else:
|
||||
compress_level = compress
|
||||
|
||||
if compress_method == "lz4" and lz4 is None:
|
||||
raise ValueError(LZ4_NOT_INSTALLED_ERROR)
|
||||
|
||||
if (
|
||||
compress_level is not None
|
||||
and compress_level is not False
|
||||
and compress_level not in range(10)
|
||||
):
|
||||
# Raising an error if a non valid compress level is given.
|
||||
raise ValueError(
|
||||
'Non valid compress level given: "{}". Possible values are {}.'.format(
|
||||
compress_level, list(range(10))
|
||||
)
|
||||
)
|
||||
|
||||
if compress_method not in _COMPRESSORS:
|
||||
# Raising an error if an unsupported compression method is given.
|
||||
raise ValueError(
|
||||
'Non valid compression method given: "{}". Possible values are {}.'.format(
|
||||
compress_method, _COMPRESSORS
|
||||
)
|
||||
)
|
||||
|
||||
if not is_filename and not is_fileobj:
|
||||
# People keep inverting arguments, and the resulting error is
|
||||
# incomprehensible
|
||||
raise ValueError(
|
||||
"Second argument should be a filename or a file-like object, "
|
||||
"%s (type %s) was given." % (filename, type(filename))
|
||||
)
|
||||
|
||||
if is_filename and not isinstance(compress, tuple):
|
||||
# In case no explicit compression was requested using both compression
|
||||
# method and level in a tuple and the filename has an explicit
|
||||
# extension, we select the corresponding compressor.
|
||||
|
||||
# unset the variable to be sure no compression level is set afterwards.
|
||||
compress_method = None
|
||||
for name, compressor in _COMPRESSORS.items():
|
||||
if filename.endswith(compressor.extension):
|
||||
compress_method = name
|
||||
|
||||
if compress_method in _COMPRESSORS and compress_level == 0:
|
||||
# we choose the default compress_level in case it was not given
|
||||
# as an argument (using compress).
|
||||
compress_level = None
|
||||
|
||||
if compress_level != 0:
|
||||
with _write_fileobject(
|
||||
filename, compress=(compress_method, compress_level)
|
||||
) as f:
|
||||
NumpyPickler(f, protocol=protocol).dump(value)
|
||||
elif is_filename:
|
||||
with open(filename, "wb") as f:
|
||||
NumpyPickler(f, protocol=protocol).dump(value)
|
||||
else:
|
||||
NumpyPickler(filename, protocol=protocol).dump(value)
|
||||
|
||||
# If the target container is a file object, nothing is returned.
|
||||
if is_fileobj:
|
||||
return
|
||||
|
||||
# For compatibility, the list of created filenames (e.g with one element
|
||||
# after 0.10.0) is returned by default.
|
||||
return [filename]
|
||||
|
||||
|
||||
def _unpickle(fobj, ensure_native_byte_order, filename="", mmap_mode=None):
|
||||
"""Internal unpickling function."""
|
||||
# We are careful to open the file handle early and keep it open to
|
||||
# avoid race-conditions on renames.
|
||||
# That said, if data is stored in companion files, which can be
|
||||
# the case with the old persistence format, moving the directory
|
||||
# will create a race when joblib tries to access the companion
|
||||
# files.
|
||||
unpickler = NumpyUnpickler(
|
||||
filename, fobj, ensure_native_byte_order, mmap_mode=mmap_mode
|
||||
)
|
||||
obj = None
|
||||
try:
|
||||
obj = unpickler.load()
|
||||
if unpickler.compat_mode:
|
||||
warnings.warn(
|
||||
"The file '%s' has been generated with a "
|
||||
"joblib version less than 0.10. "
|
||||
"Please regenerate this pickle file." % filename,
|
||||
DeprecationWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
except UnicodeDecodeError as exc:
|
||||
# More user-friendly error message
|
||||
new_exc = ValueError(
|
||||
"You may be trying to read with "
|
||||
"python 3 a joblib pickle generated with python 2. "
|
||||
"This feature is not supported by joblib."
|
||||
)
|
||||
new_exc.__cause__ = exc
|
||||
raise new_exc
|
||||
return obj
|
||||
|
||||
|
||||
def load_temporary_memmap(filename, mmap_mode, unlink_on_gc_collect):
|
||||
from ._memmapping_reducer import JOBLIB_MMAPS, add_maybe_unlink_finalizer
|
||||
|
||||
with open(filename, "rb") as f:
|
||||
with _validate_fileobject_and_memmap(f, filename, mmap_mode) as (
|
||||
fobj,
|
||||
validated_mmap_mode,
|
||||
):
|
||||
# Memmap are used for interprocess communication, which should
|
||||
# keep the objects untouched. We pass `ensure_native_byte_order=False`
|
||||
# to remain consistent with the loading behavior of non-memmaped arrays
|
||||
# in workers, where the byte order is preserved.
|
||||
# Note that we do not implement endianness change for memmaps, as this
|
||||
# would result in inconsistent behavior.
|
||||
obj = _unpickle(
|
||||
fobj,
|
||||
ensure_native_byte_order=False,
|
||||
filename=filename,
|
||||
mmap_mode=validated_mmap_mode,
|
||||
)
|
||||
|
||||
JOBLIB_MMAPS.add(obj.filename)
|
||||
if unlink_on_gc_collect:
|
||||
add_maybe_unlink_finalizer(obj)
|
||||
return obj
|
||||
|
||||
|
||||
def load(filename, mmap_mode=None, ensure_native_byte_order="auto"):
|
||||
"""Reconstruct a Python object from a file persisted with joblib.dump.
|
||||
|
||||
Read more in the :ref:`User Guide <persistence>`.
|
||||
|
||||
WARNING: joblib.load relies on the pickle module and can therefore
|
||||
execute arbitrary Python code. It should therefore never be used
|
||||
to load files from untrusted sources.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filename: str, pathlib.Path, or file object.
|
||||
The file object or path of the file from which to load the object
|
||||
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
|
||||
If not None, the arrays are memory-mapped from the disk. This
|
||||
mode has no effect for compressed files. Note that in this
|
||||
case the reconstructed object might no longer match exactly
|
||||
the originally pickled object.
|
||||
ensure_native_byte_order: bool, or 'auto', default=='auto'
|
||||
If True, ensures that the byte order of the loaded arrays matches the
|
||||
native byte ordering (or _endianness_) of the host system. This is not
|
||||
compatible with memory-mapped arrays and using non-null `mmap_mode`
|
||||
parameter at the same time will raise an error. The default 'auto'
|
||||
parameter is equivalent to True if `mmap_mode` is None, else False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
result: any Python object
|
||||
The object stored in the file.
|
||||
|
||||
See Also
|
||||
--------
|
||||
joblib.dump : function to save an object
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
This function can load numpy array files saved separately during the
|
||||
dump. If the mmap_mode argument is given, it is passed to np.load and
|
||||
arrays are loaded as memmaps. As a consequence, the reconstructed
|
||||
object might not match the original pickled object. Note that if the
|
||||
file was saved with compression, the arrays cannot be memmapped.
|
||||
"""
|
||||
if ensure_native_byte_order == "auto":
|
||||
ensure_native_byte_order = mmap_mode is None
|
||||
|
||||
if ensure_native_byte_order and mmap_mode is not None:
|
||||
raise ValueError(
|
||||
"Native byte ordering can only be enforced if 'mmap_mode' parameter "
|
||||
f"is set to None, but got 'mmap_mode={mmap_mode}' instead."
|
||||
)
|
||||
|
||||
if Path is not None and isinstance(filename, Path):
|
||||
filename = str(filename)
|
||||
|
||||
if hasattr(filename, "read"):
|
||||
fobj = filename
|
||||
filename = getattr(fobj, "name", "")
|
||||
with _validate_fileobject_and_memmap(fobj, filename, mmap_mode) as (fobj, _):
|
||||
obj = _unpickle(fobj, ensure_native_byte_order=ensure_native_byte_order)
|
||||
else:
|
||||
with open(filename, "rb") as f:
|
||||
with _validate_fileobject_and_memmap(f, filename, mmap_mode) as (
|
||||
fobj,
|
||||
validated_mmap_mode,
|
||||
):
|
||||
if isinstance(fobj, str):
|
||||
# if the returned file object is a string, this means we
|
||||
# try to load a pickle file generated with an version of
|
||||
# Joblib so we load it with joblib compatibility function.
|
||||
return load_compatibility(fobj)
|
||||
|
||||
# A memory-mapped array has to be mapped with the endianness
|
||||
# it has been written with. Other arrays are coerced to the
|
||||
# native endianness of the host system.
|
||||
obj = _unpickle(
|
||||
fobj,
|
||||
ensure_native_byte_order=ensure_native_byte_order,
|
||||
filename=filename,
|
||||
mmap_mode=validated_mmap_mode,
|
||||
)
|
||||
|
||||
return obj
|
||||
@@ -0,0 +1,250 @@
|
||||
"""Numpy pickle compatibility functions."""
|
||||
|
||||
import inspect
|
||||
import os
|
||||
import pickle
|
||||
import zlib
|
||||
from io import BytesIO
|
||||
|
||||
from .numpy_pickle_utils import (
|
||||
_ZFILE_PREFIX,
|
||||
Unpickler,
|
||||
_ensure_native_byte_order,
|
||||
_reconstruct,
|
||||
)
|
||||
|
||||
|
||||
def hex_str(an_int):
|
||||
"""Convert an int to an hexadecimal string."""
|
||||
return "{:#x}".format(an_int)
|
||||
|
||||
|
||||
def asbytes(s):
|
||||
if isinstance(s, bytes):
|
||||
return s
|
||||
return s.encode("latin1")
|
||||
|
||||
|
||||
_MAX_LEN = len(hex_str(2**64))
|
||||
_CHUNK_SIZE = 64 * 1024
|
||||
|
||||
|
||||
def read_zfile(file_handle):
|
||||
"""Read the z-file and return the content as a string.
|
||||
|
||||
Z-files are raw data compressed with zlib used internally by joblib
|
||||
for persistence. Backward compatibility is not guaranteed. Do not
|
||||
use for external purposes.
|
||||
"""
|
||||
file_handle.seek(0)
|
||||
header_length = len(_ZFILE_PREFIX) + _MAX_LEN
|
||||
length = file_handle.read(header_length)
|
||||
length = length[len(_ZFILE_PREFIX) :]
|
||||
length = int(length, 16)
|
||||
|
||||
# With python2 and joblib version <= 0.8.4 compressed pickle header is one
|
||||
# character wider so we need to ignore an additional space if present.
|
||||
# Note: the first byte of the zlib data is guaranteed not to be a
|
||||
# space according to
|
||||
# https://tools.ietf.org/html/rfc6713#section-2.1
|
||||
next_byte = file_handle.read(1)
|
||||
if next_byte != b" ":
|
||||
# The zlib compressed data has started and we need to go back
|
||||
# one byte
|
||||
file_handle.seek(header_length)
|
||||
|
||||
# We use the known length of the data to tell Zlib the size of the
|
||||
# buffer to allocate.
|
||||
data = zlib.decompress(file_handle.read(), 15, length)
|
||||
assert len(data) == length, (
|
||||
"Incorrect data length while decompressing %s."
|
||||
"The file could be corrupted." % file_handle
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
def write_zfile(file_handle, data, compress=1):
|
||||
"""Write the data in the given file as a Z-file.
|
||||
|
||||
Z-files are raw data compressed with zlib used internally by joblib
|
||||
for persistence. Backward compatibility is not guaranteed. Do not
|
||||
use for external purposes.
|
||||
"""
|
||||
file_handle.write(_ZFILE_PREFIX)
|
||||
length = hex_str(len(data))
|
||||
# Store the length of the data
|
||||
file_handle.write(asbytes(length.ljust(_MAX_LEN)))
|
||||
file_handle.write(zlib.compress(asbytes(data), compress))
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Utility objects for persistence.
|
||||
|
||||
|
||||
class NDArrayWrapper(object):
|
||||
"""An object to be persisted instead of numpy arrays.
|
||||
|
||||
The only thing this object does, is to carry the filename in which
|
||||
the array has been persisted, and the array subclass.
|
||||
"""
|
||||
|
||||
def __init__(self, filename, subclass, allow_mmap=True):
|
||||
"""Constructor. Store the useful information for later."""
|
||||
self.filename = filename
|
||||
self.subclass = subclass
|
||||
self.allow_mmap = allow_mmap
|
||||
|
||||
def read(self, unpickler):
|
||||
"""Reconstruct the array."""
|
||||
filename = os.path.join(unpickler._dirname, self.filename)
|
||||
# Load the array from the disk
|
||||
# use getattr instead of self.allow_mmap to ensure backward compat
|
||||
# with NDArrayWrapper instances pickled with joblib < 0.9.0
|
||||
allow_mmap = getattr(self, "allow_mmap", True)
|
||||
kwargs = {}
|
||||
if allow_mmap:
|
||||
kwargs["mmap_mode"] = unpickler.mmap_mode
|
||||
if "allow_pickle" in inspect.signature(unpickler.np.load).parameters:
|
||||
# Required in numpy 1.16.3 and later to acknowledge the security
|
||||
# risk.
|
||||
kwargs["allow_pickle"] = True
|
||||
array = unpickler.np.load(filename, **kwargs)
|
||||
|
||||
# Detect byte order mismatch and swap as needed.
|
||||
array = _ensure_native_byte_order(array)
|
||||
|
||||
# Reconstruct subclasses. This does not work with old
|
||||
# versions of numpy
|
||||
if hasattr(array, "__array_prepare__") and self.subclass not in (
|
||||
unpickler.np.ndarray,
|
||||
unpickler.np.memmap,
|
||||
):
|
||||
# We need to reconstruct another subclass
|
||||
new_array = _reconstruct(self.subclass, (0,), "b")
|
||||
return new_array.__array_prepare__(array)
|
||||
else:
|
||||
return array
|
||||
|
||||
|
||||
class ZNDArrayWrapper(NDArrayWrapper):
|
||||
"""An object to be persisted instead of numpy arrays.
|
||||
|
||||
This object store the Zfile filename in which
|
||||
the data array has been persisted, and the meta information to
|
||||
retrieve it.
|
||||
The reason that we store the raw buffer data of the array and
|
||||
the meta information, rather than array representation routine
|
||||
(tobytes) is that it enables us to use completely the strided
|
||||
model to avoid memory copies (a and a.T store as fast). In
|
||||
addition saving the heavy information separately can avoid
|
||||
creating large temporary buffers when unpickling data with
|
||||
large arrays.
|
||||
"""
|
||||
|
||||
def __init__(self, filename, init_args, state):
|
||||
"""Constructor. Store the useful information for later."""
|
||||
self.filename = filename
|
||||
self.state = state
|
||||
self.init_args = init_args
|
||||
|
||||
def read(self, unpickler):
|
||||
"""Reconstruct the array from the meta-information and the z-file."""
|
||||
# Here we a simply reproducing the unpickling mechanism for numpy
|
||||
# arrays
|
||||
filename = os.path.join(unpickler._dirname, self.filename)
|
||||
array = _reconstruct(*self.init_args)
|
||||
with open(filename, "rb") as f:
|
||||
data = read_zfile(f)
|
||||
state = self.state + (data,)
|
||||
array.__setstate__(state)
|
||||
return array
|
||||
|
||||
|
||||
class ZipNumpyUnpickler(Unpickler):
|
||||
"""A subclass of the Unpickler to unpickle our numpy pickles."""
|
||||
|
||||
dispatch = Unpickler.dispatch.copy()
|
||||
|
||||
def __init__(self, filename, file_handle, mmap_mode=None):
|
||||
"""Constructor."""
|
||||
self._filename = os.path.basename(filename)
|
||||
self._dirname = os.path.dirname(filename)
|
||||
self.mmap_mode = mmap_mode
|
||||
self.file_handle = self._open_pickle(file_handle)
|
||||
Unpickler.__init__(self, self.file_handle)
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError:
|
||||
np = None
|
||||
self.np = np
|
||||
|
||||
def _open_pickle(self, file_handle):
|
||||
return BytesIO(read_zfile(file_handle))
|
||||
|
||||
def load_build(self):
|
||||
"""Set the state of a newly created object.
|
||||
|
||||
We capture it to replace our place-holder objects,
|
||||
NDArrayWrapper, by the array we are interested in. We
|
||||
replace them directly in the stack of pickler.
|
||||
"""
|
||||
Unpickler.load_build(self)
|
||||
if isinstance(self.stack[-1], NDArrayWrapper):
|
||||
if self.np is None:
|
||||
raise ImportError(
|
||||
"Trying to unpickle an ndarray, but numpy didn't import correctly"
|
||||
)
|
||||
nd_array_wrapper = self.stack.pop()
|
||||
array = nd_array_wrapper.read(self)
|
||||
self.stack.append(array)
|
||||
|
||||
dispatch[pickle.BUILD[0]] = load_build
|
||||
|
||||
|
||||
def load_compatibility(filename):
|
||||
"""Reconstruct a Python object from a file persisted with joblib.dump.
|
||||
|
||||
This function ensures the compatibility with joblib old persistence format
|
||||
(<= 0.9.3).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filename: string
|
||||
The name of the file from which to load the object
|
||||
|
||||
Returns
|
||||
-------
|
||||
result: any Python object
|
||||
The object stored in the file.
|
||||
|
||||
See Also
|
||||
--------
|
||||
joblib.dump : function to save an object
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
This function can load numpy array files saved separately during the
|
||||
dump.
|
||||
"""
|
||||
with open(filename, "rb") as file_handle:
|
||||
# We are careful to open the file handle early and keep it open to
|
||||
# avoid race-conditions on renames. That said, if data is stored in
|
||||
# companion files, moving the directory will create a race when
|
||||
# joblib tries to access the companion files.
|
||||
unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)
|
||||
try:
|
||||
obj = unpickler.load()
|
||||
except UnicodeDecodeError as exc:
|
||||
# More user-friendly error message
|
||||
new_exc = ValueError(
|
||||
"You may be trying to read with "
|
||||
"python 3 a joblib pickle generated with python 2. "
|
||||
"This feature is not supported by joblib."
|
||||
)
|
||||
new_exc.__cause__ = exc
|
||||
raise new_exc
|
||||
finally:
|
||||
if hasattr(unpickler, "file_handle"):
|
||||
unpickler.file_handle.close()
|
||||
return obj
|
||||
@@ -0,0 +1,291 @@
|
||||
"""Utilities for fast persistence of big data, with optional compression."""
|
||||
|
||||
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
|
||||
# Copyright (c) 2009 Gael Varoquaux
|
||||
# License: BSD Style, 3 clauses.
|
||||
|
||||
import contextlib
|
||||
import io
|
||||
import pickle
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from .compressor import _COMPRESSORS, _ZFILE_PREFIX
|
||||
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError:
|
||||
np = None
|
||||
|
||||
Unpickler = pickle._Unpickler
|
||||
Pickler = pickle._Pickler
|
||||
xrange = range
|
||||
|
||||
|
||||
try:
|
||||
# The python standard library can be built without bz2 so we make bz2
|
||||
# usage optional.
|
||||
# see https://github.com/scikit-learn/scikit-learn/issues/7526 for more
|
||||
# details.
|
||||
import bz2
|
||||
except ImportError:
|
||||
bz2 = None
|
||||
|
||||
# Buffer size used in io.BufferedReader and io.BufferedWriter
|
||||
_IO_BUFFER_SIZE = 1024**2
|
||||
|
||||
|
||||
def _is_raw_file(fileobj):
|
||||
"""Check if fileobj is a raw file object, e.g created with open."""
|
||||
fileobj = getattr(fileobj, "raw", fileobj)
|
||||
return isinstance(fileobj, io.FileIO)
|
||||
|
||||
|
||||
def _get_prefixes_max_len():
|
||||
# Compute the max prefix len of registered compressors.
|
||||
prefixes = [len(compressor.prefix) for compressor in _COMPRESSORS.values()]
|
||||
prefixes += [len(_ZFILE_PREFIX)]
|
||||
return max(prefixes)
|
||||
|
||||
|
||||
def _is_numpy_array_byte_order_mismatch(array):
|
||||
"""Check if numpy array is having byte order mismatch"""
|
||||
return (
|
||||
sys.byteorder == "big"
|
||||
and (
|
||||
array.dtype.byteorder == "<"
|
||||
or (
|
||||
array.dtype.byteorder == "|"
|
||||
and array.dtype.fields
|
||||
and all(e[0].byteorder == "<" for e in array.dtype.fields.values())
|
||||
)
|
||||
)
|
||||
) or (
|
||||
sys.byteorder == "little"
|
||||
and (
|
||||
array.dtype.byteorder == ">"
|
||||
or (
|
||||
array.dtype.byteorder == "|"
|
||||
and array.dtype.fields
|
||||
and all(e[0].byteorder == ">" for e in array.dtype.fields.values())
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _ensure_native_byte_order(array):
|
||||
"""Use the byte order of the host while preserving values
|
||||
|
||||
Does nothing if array already uses the system byte order.
|
||||
"""
|
||||
if _is_numpy_array_byte_order_mismatch(array):
|
||||
array = array.byteswap().view(array.dtype.newbyteorder("="))
|
||||
return array
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Cache file utilities
|
||||
def _detect_compressor(fileobj):
|
||||
"""Return the compressor matching fileobj.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fileobj: file object
|
||||
|
||||
Returns
|
||||
-------
|
||||
str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat', 'not-compressed'}
|
||||
"""
|
||||
# Read the magic number in the first bytes of the file.
|
||||
max_prefix_len = _get_prefixes_max_len()
|
||||
if hasattr(fileobj, "peek"):
|
||||
# Peek allows to read those bytes without moving the cursor in the
|
||||
# file which.
|
||||
first_bytes = fileobj.peek(max_prefix_len)
|
||||
else:
|
||||
# Fallback to seek if the fileobject is not peekable.
|
||||
first_bytes = fileobj.read(max_prefix_len)
|
||||
fileobj.seek(0)
|
||||
|
||||
if first_bytes.startswith(_ZFILE_PREFIX):
|
||||
return "compat"
|
||||
else:
|
||||
for name, compressor in _COMPRESSORS.items():
|
||||
if first_bytes.startswith(compressor.prefix):
|
||||
return name
|
||||
|
||||
return "not-compressed"
|
||||
|
||||
|
||||
def _buffered_read_file(fobj):
|
||||
"""Return a buffered version of a read file object."""
|
||||
return io.BufferedReader(fobj, buffer_size=_IO_BUFFER_SIZE)
|
||||
|
||||
|
||||
def _buffered_write_file(fobj):
|
||||
"""Return a buffered version of a write file object."""
|
||||
return io.BufferedWriter(fobj, buffer_size=_IO_BUFFER_SIZE)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _validate_fileobject_and_memmap(fileobj, filename, mmap_mode=None):
|
||||
"""Utility function opening the right fileobject from a filename.
|
||||
|
||||
The magic number is used to choose between the type of file object to open:
|
||||
* regular file object (default)
|
||||
* zlib file object
|
||||
* gzip file object
|
||||
* bz2 file object
|
||||
* lzma file object (for xz and lzma compressor)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fileobj: file object
|
||||
filename: str
|
||||
filename path corresponding to the fileobj parameter.
|
||||
mmap_mode: str
|
||||
memory map mode that should be used to open the pickle file. This
|
||||
parameter is useful to verify that the user is not trying to one with
|
||||
compression. Default: None.
|
||||
|
||||
Returns
|
||||
-------
|
||||
a tuple with a file like object, and the validated mmap_mode.
|
||||
|
||||
"""
|
||||
# Detect if the fileobj contains compressed data.
|
||||
compressor = _detect_compressor(fileobj)
|
||||
validated_mmap_mode = mmap_mode
|
||||
|
||||
if compressor == "compat":
|
||||
# Compatibility with old pickle mode: simply return the input
|
||||
# filename "as-is" and let the compatibility function be called by the
|
||||
# caller.
|
||||
warnings.warn(
|
||||
"The file '%s' has been generated with a joblib "
|
||||
"version less than 0.10. "
|
||||
"Please regenerate this pickle file." % filename,
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
yield filename, validated_mmap_mode
|
||||
else:
|
||||
if compressor in _COMPRESSORS:
|
||||
# based on the compressor detected in the file, we open the
|
||||
# correct decompressor file object, wrapped in a buffer.
|
||||
compressor_wrapper = _COMPRESSORS[compressor]
|
||||
inst = compressor_wrapper.decompressor_file(fileobj)
|
||||
fileobj = _buffered_read_file(inst)
|
||||
|
||||
# Checking if incompatible load parameters with the type of file:
|
||||
# mmap_mode cannot be used with compressed file or in memory buffers
|
||||
# such as io.BytesIO.
|
||||
if mmap_mode is not None:
|
||||
validated_mmap_mode = None
|
||||
if isinstance(fileobj, io.BytesIO):
|
||||
warnings.warn(
|
||||
"In memory persistence is not compatible with "
|
||||
'mmap_mode "%(mmap_mode)s" flag passed. '
|
||||
"mmap_mode option will be ignored." % locals(),
|
||||
stacklevel=2,
|
||||
)
|
||||
elif compressor != "not-compressed":
|
||||
warnings.warn(
|
||||
'mmap_mode "%(mmap_mode)s" is not compatible '
|
||||
"with compressed file %(filename)s. "
|
||||
'"%(mmap_mode)s" flag will be ignored.' % locals(),
|
||||
stacklevel=2,
|
||||
)
|
||||
elif not _is_raw_file(fileobj):
|
||||
warnings.warn(
|
||||
'"%(fileobj)r" is not a raw file, mmap_mode '
|
||||
'"%(mmap_mode)s" flag will be ignored.' % locals(),
|
||||
stacklevel=2,
|
||||
)
|
||||
else:
|
||||
validated_mmap_mode = mmap_mode
|
||||
|
||||
yield fileobj, validated_mmap_mode
|
||||
|
||||
|
||||
def _write_fileobject(filename, compress=("zlib", 3)):
|
||||
"""Return the right compressor file object in write mode."""
|
||||
compressmethod = compress[0]
|
||||
compresslevel = compress[1]
|
||||
|
||||
if compressmethod in _COMPRESSORS.keys():
|
||||
file_instance = _COMPRESSORS[compressmethod].compressor_file(
|
||||
filename, compresslevel=compresslevel
|
||||
)
|
||||
return _buffered_write_file(file_instance)
|
||||
else:
|
||||
file_instance = _COMPRESSORS["zlib"].compressor_file(
|
||||
filename, compresslevel=compresslevel
|
||||
)
|
||||
return _buffered_write_file(file_instance)
|
||||
|
||||
|
||||
# Utility functions/variables from numpy required for writing arrays.
|
||||
# We need at least the functions introduced in version 1.9 of numpy. Here,
|
||||
# we use the ones from numpy 1.10.2.
|
||||
BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
|
||||
|
||||
|
||||
def _read_bytes(fp, size, error_template="ran out of data"):
|
||||
"""Read from file-like object until size bytes are read.
|
||||
|
||||
TODO python2_drop: is it still needed? The docstring mentions python 2.6
|
||||
and it looks like this can be at least simplified ...
|
||||
|
||||
Raises ValueError if not EOF is encountered before size bytes are read.
|
||||
Non-blocking objects only supported if they derive from io objects.
|
||||
|
||||
Required as e.g. ZipExtFile in python 2.6 can return less data than
|
||||
requested.
|
||||
|
||||
This function was taken from numpy/lib/format.py in version 1.10.2.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fp: file-like object
|
||||
size: int
|
||||
error_template: str
|
||||
|
||||
Returns
|
||||
-------
|
||||
a bytes object
|
||||
The data read in bytes.
|
||||
|
||||
"""
|
||||
data = bytes()
|
||||
while True:
|
||||
# io files (default in python3) return None or raise on
|
||||
# would-block, python2 file will truncate, probably nothing can be
|
||||
# done about that. note that regular files can't be non-blocking
|
||||
try:
|
||||
r = fp.read(size - len(data))
|
||||
data += r
|
||||
if len(r) == 0 or len(data) == size:
|
||||
break
|
||||
except io.BlockingIOError:
|
||||
pass
|
||||
if len(data) != size:
|
||||
msg = "EOF: reading %s, expected %d bytes got %d"
|
||||
raise ValueError(msg % (error_template, size, len(data)))
|
||||
else:
|
||||
return data
|
||||
|
||||
|
||||
def _reconstruct(*args, **kwargs):
|
||||
# Wrapper for numpy._core.multiarray._reconstruct with backward compat
|
||||
# for numpy 1.X
|
||||
#
|
||||
# XXX: Remove this function when numpy 1.X is not supported anymore
|
||||
|
||||
np_major_version = np.__version__[:2]
|
||||
if np_major_version == "1.":
|
||||
from numpy.core.multiarray import _reconstruct as np_reconstruct
|
||||
elif np_major_version == "2.":
|
||||
from numpy._core.multiarray import _reconstruct as np_reconstruct
|
||||
|
||||
return np_reconstruct(*args, **kwargs)
|
||||
2075
Backend/venv/lib/python3.12/site-packages/joblib/parallel.py
Normal file
2075
Backend/venv/lib/python3.12/site-packages/joblib/parallel.py
Normal file
File diff suppressed because it is too large
Load Diff
362
Backend/venv/lib/python3.12/site-packages/joblib/pool.py
Normal file
362
Backend/venv/lib/python3.12/site-packages/joblib/pool.py
Normal file
@@ -0,0 +1,362 @@
|
||||
"""Custom implementation of multiprocessing.Pool with custom pickler.
|
||||
|
||||
This module provides efficient ways of working with data stored in
|
||||
shared memory with numpy.memmap arrays without inducing any memory
|
||||
copy between the parent and child processes.
|
||||
|
||||
This module should not be imported if multiprocessing is not
|
||||
available as it implements subclasses of multiprocessing Pool
|
||||
that uses a custom alternative to SimpleQueue.
|
||||
|
||||
"""
|
||||
# Author: Olivier Grisel <olivier.grisel@ensta.org>
|
||||
# Copyright: 2012, Olivier Grisel
|
||||
# License: BSD 3 clause
|
||||
|
||||
import copyreg
|
||||
import sys
|
||||
import warnings
|
||||
from time import sleep
|
||||
|
||||
try:
|
||||
WindowsError
|
||||
except NameError:
|
||||
WindowsError = type(None)
|
||||
|
||||
from io import BytesIO
|
||||
|
||||
# We need the class definition to derive from it, not the multiprocessing.Pool
|
||||
# factory function
|
||||
from multiprocessing.pool import Pool
|
||||
from pickle import HIGHEST_PROTOCOL, Pickler
|
||||
|
||||
from ._memmapping_reducer import TemporaryResourcesManager, get_memmapping_reducers
|
||||
from ._multiprocessing_helpers import assert_spawning, mp
|
||||
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError:
|
||||
np = None
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Enable custom pickling in Pool queues
|
||||
|
||||
|
||||
class CustomizablePickler(Pickler):
|
||||
"""Pickler that accepts custom reducers.
|
||||
|
||||
TODO python2_drop : can this be simplified ?
|
||||
|
||||
HIGHEST_PROTOCOL is selected by default as this pickler is used
|
||||
to pickle ephemeral datastructures for interprocess communication
|
||||
hence no backward compatibility is required.
|
||||
|
||||
`reducers` is expected to be a dictionary with key/values
|
||||
being `(type, callable)` pairs where `callable` is a function that
|
||||
give an instance of `type` will return a tuple `(constructor,
|
||||
tuple_of_objects)` to rebuild an instance out of the pickled
|
||||
`tuple_of_objects` as would return a `__reduce__` method. See the
|
||||
standard library documentation on pickling for more details.
|
||||
|
||||
"""
|
||||
|
||||
# We override the pure Python pickler as its the only way to be able to
|
||||
# customize the dispatch table without side effects in Python 2.7
|
||||
# to 3.2. For Python 3.3+ leverage the new dispatch_table
|
||||
# feature from https://bugs.python.org/issue14166 that makes it possible
|
||||
# to use the C implementation of the Pickler which is faster.
|
||||
|
||||
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
|
||||
Pickler.__init__(self, writer, protocol=protocol)
|
||||
if reducers is None:
|
||||
reducers = {}
|
||||
if hasattr(Pickler, "dispatch"):
|
||||
# Make the dispatch registry an instance level attribute instead of
|
||||
# a reference to the class dictionary under Python 2
|
||||
self.dispatch = Pickler.dispatch.copy()
|
||||
else:
|
||||
# Under Python 3 initialize the dispatch table with a copy of the
|
||||
# default registry
|
||||
self.dispatch_table = copyreg.dispatch_table.copy()
|
||||
for type, reduce_func in reducers.items():
|
||||
self.register(type, reduce_func)
|
||||
|
||||
def register(self, type, reduce_func):
|
||||
"""Attach a reducer function to a given type in the dispatch table."""
|
||||
if hasattr(Pickler, "dispatch"):
|
||||
# Python 2 pickler dispatching is not explicitly customizable.
|
||||
# Let us use a closure to workaround this limitation.
|
||||
def dispatcher(self, obj):
|
||||
reduced = reduce_func(obj)
|
||||
self.save_reduce(obj=obj, *reduced)
|
||||
|
||||
self.dispatch[type] = dispatcher
|
||||
else:
|
||||
self.dispatch_table[type] = reduce_func
|
||||
|
||||
|
||||
class CustomizablePicklingQueue(object):
|
||||
"""Locked Pipe implementation that uses a customizable pickler.
|
||||
|
||||
This class is an alternative to the multiprocessing implementation
|
||||
of SimpleQueue in order to make it possible to pass custom
|
||||
pickling reducers, for instance to avoid memory copy when passing
|
||||
memory mapped datastructures.
|
||||
|
||||
`reducers` is expected to be a dict with key / values being
|
||||
`(type, callable)` pairs where `callable` is a function that, given an
|
||||
instance of `type`, will return a tuple `(constructor, tuple_of_objects)`
|
||||
to rebuild an instance out of the pickled `tuple_of_objects` as would
|
||||
return a `__reduce__` method.
|
||||
|
||||
See the standard library documentation on pickling for more details.
|
||||
"""
|
||||
|
||||
def __init__(self, context, reducers=None):
|
||||
self._reducers = reducers
|
||||
self._reader, self._writer = context.Pipe(duplex=False)
|
||||
self._rlock = context.Lock()
|
||||
if sys.platform == "win32":
|
||||
self._wlock = None
|
||||
else:
|
||||
self._wlock = context.Lock()
|
||||
self._make_methods()
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (self._reader, self._writer, self._rlock, self._wlock, self._reducers)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._reader, self._writer, self._rlock, self._wlock, self._reducers) = state
|
||||
self._make_methods()
|
||||
|
||||
def empty(self):
|
||||
return not self._reader.poll()
|
||||
|
||||
def _make_methods(self):
|
||||
self._recv = recv = self._reader.recv
|
||||
racquire, rrelease = self._rlock.acquire, self._rlock.release
|
||||
|
||||
def get():
|
||||
racquire()
|
||||
try:
|
||||
return recv()
|
||||
finally:
|
||||
rrelease()
|
||||
|
||||
self.get = get
|
||||
|
||||
if self._reducers:
|
||||
|
||||
def send(obj):
|
||||
buffer = BytesIO()
|
||||
CustomizablePickler(buffer, self._reducers).dump(obj)
|
||||
self._writer.send_bytes(buffer.getvalue())
|
||||
|
||||
self._send = send
|
||||
else:
|
||||
self._send = send = self._writer.send
|
||||
if self._wlock is None:
|
||||
# writes to a message oriented win32 pipe are atomic
|
||||
self.put = send
|
||||
else:
|
||||
wlock_acquire, wlock_release = (self._wlock.acquire, self._wlock.release)
|
||||
|
||||
def put(obj):
|
||||
wlock_acquire()
|
||||
try:
|
||||
return send(obj)
|
||||
finally:
|
||||
wlock_release()
|
||||
|
||||
self.put = put
|
||||
|
||||
|
||||
class PicklingPool(Pool):
|
||||
"""Pool implementation with customizable pickling reducers.
|
||||
|
||||
This is useful to control how data is shipped between processes
|
||||
and makes it possible to use shared memory without useless
|
||||
copies induces by the default pickling methods of the original
|
||||
objects passed as arguments to dispatch.
|
||||
|
||||
`forward_reducers` and `backward_reducers` are expected to be
|
||||
dictionaries with key/values being `(type, callable)` pairs where
|
||||
`callable` is a function that, given an instance of `type`, will return a
|
||||
tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the
|
||||
pickled `tuple_of_objects` as would return a `__reduce__` method.
|
||||
See the standard library documentation about pickling for more details.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, processes=None, forward_reducers=None, backward_reducers=None, **kwargs
|
||||
):
|
||||
if forward_reducers is None:
|
||||
forward_reducers = dict()
|
||||
if backward_reducers is None:
|
||||
backward_reducers = dict()
|
||||
self._forward_reducers = forward_reducers
|
||||
self._backward_reducers = backward_reducers
|
||||
poolargs = dict(processes=processes)
|
||||
poolargs.update(kwargs)
|
||||
super(PicklingPool, self).__init__(**poolargs)
|
||||
|
||||
def _setup_queues(self):
|
||||
context = getattr(self, "_ctx", mp)
|
||||
self._inqueue = CustomizablePicklingQueue(context, self._forward_reducers)
|
||||
self._outqueue = CustomizablePicklingQueue(context, self._backward_reducers)
|
||||
self._quick_put = self._inqueue._send
|
||||
self._quick_get = self._outqueue._recv
|
||||
|
||||
|
||||
class MemmappingPool(PicklingPool):
|
||||
"""Process pool that shares large arrays to avoid memory copy.
|
||||
|
||||
This drop-in replacement for `multiprocessing.pool.Pool` makes
|
||||
it possible to work efficiently with shared memory in a numpy
|
||||
context.
|
||||
|
||||
Existing instances of numpy.memmap are preserved: the child
|
||||
suprocesses will have access to the same shared memory in the
|
||||
original mode except for the 'w+' mode that is automatically
|
||||
transformed as 'r+' to avoid zeroing the original data upon
|
||||
instantiation.
|
||||
|
||||
Furthermore large arrays from the parent process are automatically
|
||||
dumped to a temporary folder on the filesystem such as child
|
||||
processes to access their content via memmapping (file system
|
||||
backed shared memory).
|
||||
|
||||
Note: it is important to call the terminate method to collect
|
||||
the temporary folder used by the pool.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
processes: int, optional
|
||||
Number of worker processes running concurrently in the pool.
|
||||
initializer: callable, optional
|
||||
Callable executed on worker process creation.
|
||||
initargs: tuple, optional
|
||||
Arguments passed to the initializer callable.
|
||||
temp_folder: (str, callable) optional
|
||||
If str:
|
||||
Folder to be used by the pool for memmapping large arrays
|
||||
for sharing memory with worker processes. If None, this will try in
|
||||
order:
|
||||
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
|
||||
- /dev/shm if the folder exists and is writable: this is a RAMdisk
|
||||
filesystem available by default on modern Linux distributions,
|
||||
- the default system temporary folder that can be overridden
|
||||
with TMP, TMPDIR or TEMP environment variables, typically /tmp
|
||||
under Unix operating systems.
|
||||
if callable:
|
||||
An callable in charge of dynamically resolving a temporary folder
|
||||
for memmapping large arrays.
|
||||
max_nbytes int or None, optional, 1e6 by default
|
||||
Threshold on the size of arrays passed to the workers that
|
||||
triggers automated memory mapping in temp_folder.
|
||||
Use None to disable memmapping of large arrays.
|
||||
mmap_mode: {'r+', 'r', 'w+', 'c'}
|
||||
Memmapping mode for numpy arrays passed to workers.
|
||||
See 'max_nbytes' parameter documentation for more details.
|
||||
forward_reducers: dictionary, optional
|
||||
Reducers used to pickle objects passed from main process to worker
|
||||
processes: see below.
|
||||
backward_reducers: dictionary, optional
|
||||
Reducers used to pickle return values from workers back to the
|
||||
main process.
|
||||
verbose: int, optional
|
||||
Make it possible to monitor how the communication of numpy arrays
|
||||
with the subprocess is handled (pickling or memmapping)
|
||||
prewarm: bool or str, optional, "auto" by default.
|
||||
If True, force a read on newly memmapped array to make sure that OS
|
||||
pre-cache it in memory. This can be useful to avoid concurrent disk
|
||||
access when the same data array is passed to different worker
|
||||
processes. If "auto" (by default), prewarm is set to True, unless the
|
||||
Linux shared memory partition /dev/shm is available and used as temp
|
||||
folder.
|
||||
|
||||
`forward_reducers` and `backward_reducers` are expected to be
|
||||
dictionaries with key/values being `(type, callable)` pairs where
|
||||
`callable` is a function that give an instance of `type` will return
|
||||
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
|
||||
of the pickled `tuple_of_objects` as would return a `__reduce__`
|
||||
method. See the standard library documentation on pickling for more
|
||||
details.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
processes=None,
|
||||
temp_folder=None,
|
||||
max_nbytes=1e6,
|
||||
mmap_mode="r",
|
||||
forward_reducers=None,
|
||||
backward_reducers=None,
|
||||
verbose=0,
|
||||
prewarm=False,
|
||||
**kwargs,
|
||||
):
|
||||
manager = TemporaryResourcesManager(temp_folder)
|
||||
self._temp_folder_manager = manager
|
||||
|
||||
# The usage of a temp_folder_resolver over a simple temp_folder is
|
||||
# superfluous for multiprocessing pools, as they don't get reused, see
|
||||
# get_memmapping_executor for more details. We still use it for code
|
||||
# simplicity.
|
||||
forward_reducers, backward_reducers = get_memmapping_reducers(
|
||||
temp_folder_resolver=manager.resolve_temp_folder_name,
|
||||
max_nbytes=max_nbytes,
|
||||
mmap_mode=mmap_mode,
|
||||
forward_reducers=forward_reducers,
|
||||
backward_reducers=backward_reducers,
|
||||
verbose=verbose,
|
||||
unlink_on_gc_collect=False,
|
||||
prewarm=prewarm,
|
||||
)
|
||||
|
||||
poolargs = dict(
|
||||
processes=processes,
|
||||
forward_reducers=forward_reducers,
|
||||
backward_reducers=backward_reducers,
|
||||
)
|
||||
poolargs.update(kwargs)
|
||||
super(MemmappingPool, self).__init__(**poolargs)
|
||||
|
||||
def terminate(self):
|
||||
n_retries = 10
|
||||
for i in range(n_retries):
|
||||
try:
|
||||
super(MemmappingPool, self).terminate()
|
||||
break
|
||||
except OSError as e:
|
||||
if isinstance(e, WindowsError):
|
||||
# Workaround occasional "[Error 5] Access is denied" issue
|
||||
# when trying to terminate a process under windows.
|
||||
sleep(0.1)
|
||||
if i + 1 == n_retries:
|
||||
warnings.warn(
|
||||
"Failed to terminate worker processes in"
|
||||
" multiprocessing pool: %r" % e
|
||||
)
|
||||
|
||||
# Clean up the temporary resources as the workers should now be off.
|
||||
self._temp_folder_manager._clean_temporary_resources()
|
||||
|
||||
@property
|
||||
def _temp_folder(self):
|
||||
# Legacy property in tests. could be removed if we refactored the
|
||||
# memmapping tests. SHOULD ONLY BE USED IN TESTS!
|
||||
# We cache this property because it is called late in the tests - at
|
||||
# this point, all context have been unregistered, and
|
||||
# resolve_temp_folder_name raises an error.
|
||||
if getattr(self, "_cached_temp_folder", None) is not None:
|
||||
return self._cached_temp_folder
|
||||
else:
|
||||
self._cached_temp_folder = (
|
||||
self._temp_folder_manager.resolve_temp_folder_name()
|
||||
) # noqa
|
||||
return self._cached_temp_folder
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user