updates
This commit is contained in:
@@ -0,0 +1,21 @@
|
||||
import sys
|
||||
|
||||
from pip_api._vendor.packaging import version as packaging_version
|
||||
from pip_api._vendor.packaging.version import Version
|
||||
|
||||
# Import this now because we need it below
|
||||
from pip_api._version import version
|
||||
|
||||
PIP_VERSION: Version = packaging_version.parse(version()) # type: ignore
|
||||
PYTHON_VERSION = sys.version_info
|
||||
|
||||
# Import these because they depend on the above
|
||||
from pip_api._hash import hash
|
||||
from pip_api._installed_distributions import installed_distributions
|
||||
|
||||
# Import these whenever, doesn't matter
|
||||
from pip_api._parse_requirements import (
|
||||
Requirement,
|
||||
UnparsedRequirement,
|
||||
parse_requirements,
|
||||
)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
12
Backend/venv/lib/python3.12/site-packages/pip_api/_call.py
Normal file
12
Backend/venv/lib/python3.12/site-packages/pip_api/_call.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
def call(*args, cwd=None):
|
||||
python_location = os.environ.get("PIPAPI_PYTHON_LOCATION", sys.executable)
|
||||
env = {**os.environ, **{"PIP_YES": "true", "PIP_DISABLE_PIP_VERSION_CHECK": "true"}}
|
||||
result = subprocess.check_output(
|
||||
[python_location, "-m", "pip"] + list(args), cwd=cwd, env=env
|
||||
)
|
||||
return result.decode()
|
||||
26
Backend/venv/lib/python3.12/site-packages/pip_api/_hash.py
Normal file
26
Backend/venv/lib/python3.12/site-packages/pip_api/_hash.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import os
|
||||
|
||||
from pip_api._vendor.packaging.version import Version # type: ignore
|
||||
|
||||
import pip_api
|
||||
from pip_api._call import call
|
||||
from pip_api.exceptions import Incompatible, InvalidArguments
|
||||
|
||||
incompatible = pip_api.PIP_VERSION < Version("8.0.0")
|
||||
|
||||
|
||||
def hash(filename: os.PathLike, algorithm: str = "sha256") -> str:
|
||||
"""
|
||||
Hash the given filename. Unavailable in `pip<8.0.0`
|
||||
"""
|
||||
if incompatible:
|
||||
raise Incompatible
|
||||
|
||||
if algorithm not in ["sha256", "sha384", "sha512"]:
|
||||
raise InvalidArguments("Algorithm {} not supported".format(algorithm))
|
||||
|
||||
result = call("hash", "--algorithm", algorithm, filename)
|
||||
|
||||
# result is of the form:
|
||||
# <filename>:\n--hash=<algorithm>:<hash>\n
|
||||
return result.strip().split(":")[-1]
|
||||
@@ -0,0 +1,116 @@
|
||||
import json
|
||||
import re
|
||||
import os
|
||||
from typing import Dict, Optional, List
|
||||
|
||||
import pip_api
|
||||
from pip_api._call import call
|
||||
from pip_api.exceptions import PipError
|
||||
|
||||
from pip_api._vendor.packaging.version import parse # type: ignore
|
||||
|
||||
|
||||
class Distribution:
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
version: str,
|
||||
location: Optional[str] = None,
|
||||
editable_project_location: Optional[str] = None,
|
||||
):
|
||||
self.name = name
|
||||
self.version = parse(version)
|
||||
self.location = location
|
||||
self.editable_project_location = editable_project_location
|
||||
|
||||
if pip_api.PIP_VERSION >= parse("21.3"):
|
||||
self.editable = bool(self.editable_project_location)
|
||||
else:
|
||||
self.editable = bool(self.location)
|
||||
|
||||
def __repr__(self):
|
||||
return "<Distribution(name='{}', version='{}'{}{})>".format(
|
||||
self.name,
|
||||
self.version,
|
||||
", location='{}'".format(self.location) if self.location else "",
|
||||
(
|
||||
", editable_project_location='{}'".format(
|
||||
self.editable_project_location
|
||||
)
|
||||
if self.editable_project_location
|
||||
else ""
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _old_installed_distributions(local: bool):
|
||||
list_args = ["list"]
|
||||
if local:
|
||||
list_args.append("--local")
|
||||
result = call(*list_args)
|
||||
|
||||
# result is of the form:
|
||||
# <package_name> (<version>)
|
||||
#
|
||||
# or, if editable
|
||||
# <package_name> (<version>, <location>)
|
||||
#
|
||||
# or, could be a warning line
|
||||
|
||||
ret = {}
|
||||
|
||||
pattern = re.compile(r"(.*) \((.*)\)")
|
||||
|
||||
for line in result.strip().split("\n"):
|
||||
match = re.match(pattern, line)
|
||||
|
||||
if match:
|
||||
name, paren = match.groups()
|
||||
version, location = (paren.split(", ") + [None])[:2]
|
||||
|
||||
ret[name] = Distribution(name, version, location)
|
||||
else:
|
||||
# This is a warning line or some other output
|
||||
pass
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def _new_installed_distributions(local: bool, paths: List[os.PathLike]):
|
||||
list_args = ["list", "-v", "--format=json"]
|
||||
if local:
|
||||
list_args.append("--local")
|
||||
for path in paths:
|
||||
list_args.extend(["--path", str(path)])
|
||||
result = call(*list_args)
|
||||
|
||||
ret = {}
|
||||
|
||||
# The returned JSON is an array of objects, each of which looks like this:
|
||||
# { "name": "some-package", "version": "0.0.1", "location": "/path/", ... }
|
||||
# The location key was introduced with pip 10.0.0b1, so we don't assume its
|
||||
# presence. The editable_project_location key was introduced with pip 21.3,
|
||||
# so we also don't assume its presence.
|
||||
for raw_dist in json.loads(result):
|
||||
dist = Distribution(
|
||||
raw_dist["name"],
|
||||
raw_dist["version"],
|
||||
raw_dist.get("location"),
|
||||
raw_dist.get("editable_project_location"),
|
||||
)
|
||||
ret[dist.name] = dist
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def installed_distributions(
|
||||
local: bool = False, paths: List[os.PathLike] = []
|
||||
) -> Dict[str, Distribution]:
|
||||
# Check whether our version of pip supports the `--path` parameter
|
||||
if pip_api.PIP_VERSION < parse("19.2") and paths:
|
||||
raise PipError(
|
||||
f"pip {pip_api.PIP_VERSION} does not support the `paths` argument"
|
||||
)
|
||||
if pip_api.PIP_VERSION < parse("9.0.0"):
|
||||
return _old_installed_distributions(local)
|
||||
return _new_installed_distributions(local, paths)
|
||||
@@ -0,0 +1,585 @@
|
||||
import argparse
|
||||
import ast
|
||||
import os
|
||||
import posixpath
|
||||
import re
|
||||
import string
|
||||
import sys
|
||||
import traceback
|
||||
from collections import defaultdict
|
||||
from typing import Any, Dict, Optional, Union
|
||||
from urllib.parse import unquote, urljoin, urlsplit
|
||||
from urllib.request import pathname2url, url2pathname
|
||||
|
||||
from pip_api._vendor import tomli
|
||||
from pip_api._vendor.packaging import requirements, specifiers # type: ignore
|
||||
from pip_api.exceptions import PipError
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("req", nargs="*")
|
||||
parser.add_argument("-r", "--requirement")
|
||||
parser.add_argument("-e", "--editable")
|
||||
# Consume index url params to avoid trying to treat them as packages.
|
||||
parser.add_argument("-i", "--index-url")
|
||||
parser.add_argument("--extra-index-url")
|
||||
parser.add_argument("-f", "--find-links")
|
||||
parser.add_argument("--hash", action="append", dest="hashes")
|
||||
parser.add_argument("--trusted-host")
|
||||
|
||||
operators = specifiers.Specifier._operators.keys()
|
||||
|
||||
COMMENT_RE = re.compile(r"(^|\s)+#.*$")
|
||||
VCS_SCHEMES = ["ssh", "git", "hg", "bzr", "sftp", "svn"]
|
||||
WHEEL_EXTENSION = ".whl"
|
||||
WHEEL_FILE_RE = re.compile(
|
||||
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.*?))
|
||||
((-(?P<build>\d[^-]*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
|
||||
\.whl|\.dist-info)$""",
|
||||
re.VERBOSE,
|
||||
)
|
||||
WINDOWS = sys.platform.startswith("win") or (sys.platform == "cli" and os.name == "nt")
|
||||
# https://pip.pypa.io/en/stable/cli/pip_hash/
|
||||
VALID_HASHES = {"sha256", "sha384", "sha512"}
|
||||
|
||||
|
||||
class Link:
|
||||
def __init__(self, url):
|
||||
# url can be a UNC windows share
|
||||
if url.startswith("\\\\"):
|
||||
url = _path_to_url(url)
|
||||
|
||||
self._parsed_url = urlsplit(url)
|
||||
# Store the url as a private attribute to prevent accidentally
|
||||
# trying to set a new value.
|
||||
self._url = url
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
return self._url
|
||||
|
||||
@property
|
||||
def filename(self):
|
||||
path = self.path.rstrip("/")
|
||||
name = posixpath.basename(path)
|
||||
if not name:
|
||||
# Make sure we don't leak auth information if the netloc
|
||||
# includes a username and password.
|
||||
netloc, _ = _split_auth_from_netloc(self.netloc)
|
||||
return netloc
|
||||
|
||||
name = unquote(name)
|
||||
assert name, f"URL {self._url!r} produced no filename"
|
||||
return name
|
||||
|
||||
@property
|
||||
def file_path(self):
|
||||
return _url_to_path(self.url)
|
||||
|
||||
@property
|
||||
def scheme(self):
|
||||
return self._parsed_url.scheme
|
||||
|
||||
@property
|
||||
def netloc(self):
|
||||
return self._parsed_url.netloc
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return unquote(self._parsed_url.path)
|
||||
|
||||
def splitext(self):
|
||||
return _splitext(posixpath.basename(self.path.rstrip("/")))
|
||||
|
||||
@property
|
||||
def ext(self):
|
||||
return self.splitext()[1]
|
||||
|
||||
@property
|
||||
def show_url(self):
|
||||
return posixpath.basename(self._url.split("#", 1)[0].split("?", 1)[0])
|
||||
|
||||
@property
|
||||
def is_wheel(self):
|
||||
return self.ext == WHEEL_EXTENSION
|
||||
|
||||
@property
|
||||
def is_vcs(self):
|
||||
return self.scheme in VCS_SCHEMES
|
||||
|
||||
|
||||
def _splitext(path):
|
||||
base, ext = posixpath.splitext(path)
|
||||
if base.lower().endswith(".tar"):
|
||||
ext = base[-4:] + ext
|
||||
base = base[:-4]
|
||||
return base, ext
|
||||
|
||||
|
||||
def _split_auth_from_netloc(netloc):
|
||||
if "@" not in netloc:
|
||||
return netloc, (None, None)
|
||||
|
||||
# Split from the right because that's how urllib.parse.urlsplit()
|
||||
# behaves if more than one @ is present (which can be checked using
|
||||
# the password attribute of urlsplit()'s return value).
|
||||
auth, netloc = netloc.rsplit("@", 1)
|
||||
pw: Optional[str] = None
|
||||
if ":" in auth:
|
||||
# Split from the left because that's how urllib.parse.urlsplit()
|
||||
# behaves if more than one : is present (which again can be checked
|
||||
# using the password attribute of the return value)
|
||||
user, pw = auth.split(":", 1)
|
||||
else:
|
||||
user, pw = auth, None
|
||||
|
||||
user = unquote(user)
|
||||
if pw is not None:
|
||||
pw = unquote(pw)
|
||||
|
||||
return netloc, (user, pw)
|
||||
|
||||
|
||||
def _url_to_path(url):
|
||||
assert url.startswith(
|
||||
"file:"
|
||||
), f"You can only turn file: urls into filenames (not {url!r})"
|
||||
|
||||
_, netloc, path, _, _ = urlsplit(url)
|
||||
|
||||
if not netloc or netloc == "localhost":
|
||||
# According to RFC 8089, same as empty authority.
|
||||
netloc = ""
|
||||
elif WINDOWS:
|
||||
# If we have a UNC path, prepend UNC share notation.
|
||||
netloc = "\\\\" + netloc
|
||||
else:
|
||||
raise ValueError(
|
||||
f"non-local file URIs are not supported on this platform: {url!r}"
|
||||
)
|
||||
|
||||
path = url2pathname(netloc + path)
|
||||
|
||||
# On Windows, urlsplit parses the path as something like "/C:/Users/foo".
|
||||
# This creates issues for path-related functions like io.open(), so we try
|
||||
# to detect and strip the leading slash.
|
||||
if (
|
||||
WINDOWS
|
||||
and not netloc # Not UNC.
|
||||
and len(path) >= 3
|
||||
and path[0] == "/" # Leading slash to strip.
|
||||
and path[1] in string.ascii_letters # Drive letter.
|
||||
and path[2:4] in (":", ":/") # Colon + end of string, or colon + absolute path.
|
||||
):
|
||||
path = path[1:]
|
||||
|
||||
return path
|
||||
|
||||
|
||||
class Requirement(requirements.Requirement):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.hashes = kwargs.pop("hashes", None)
|
||||
self.editable = kwargs.pop("editable", False)
|
||||
self.filename = kwargs.pop("filename")
|
||||
self.lineno = kwargs.pop("lineno")
|
||||
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class UnparsedRequirement(object):
|
||||
def __init__(self, name, msg, filename, lineno):
|
||||
self.name = name
|
||||
self.msg = msg
|
||||
self.exception = msg
|
||||
self.filename = filename
|
||||
self.lineno = lineno
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
|
||||
def _read_file(filename):
|
||||
with open(filename) as f:
|
||||
return f.readlines()
|
||||
|
||||
|
||||
def _check_invalid_requirement(req):
|
||||
if os.path.sep in req:
|
||||
add_msg = "It looks like a path."
|
||||
if os.path.exists(req):
|
||||
add_msg += " It does exist."
|
||||
else:
|
||||
add_msg += " File '%s' does not exist." % (req)
|
||||
elif "=" in req and not any(op in req for op in operators):
|
||||
add_msg = "= is not a valid operator. Did you mean == ?"
|
||||
else:
|
||||
add_msg = traceback.format_exc()
|
||||
raise PipError("Invalid requirement: '%s'\n%s" % (req, add_msg))
|
||||
|
||||
|
||||
def _strip_extras(path):
|
||||
m = re.match(r"^(.+)(\[[^\]]+\])$", path)
|
||||
extras = None
|
||||
if m:
|
||||
path_no_extras = m.group(1)
|
||||
extras = m.group(2)
|
||||
else:
|
||||
path_no_extras = path
|
||||
|
||||
return path_no_extras, extras
|
||||
|
||||
|
||||
def _egg_fragment(url):
|
||||
_egg_fragment_re = re.compile(r"[#&]egg=([^&]*)")
|
||||
match = _egg_fragment_re.search(url)
|
||||
if not match:
|
||||
return None
|
||||
return match.group(1)
|
||||
|
||||
|
||||
def _path_to_url(path):
|
||||
path = os.path.normpath(os.path.abspath(path))
|
||||
url = urljoin("file:", pathname2url(path))
|
||||
return url
|
||||
|
||||
|
||||
def _parse_local_package_name(path):
|
||||
# Determine the package name from a local directory
|
||||
pyproject_toml = os.path.join(path, "pyproject.toml")
|
||||
setup_py = os.path.join(path, "setup.py")
|
||||
has_pyproject = os.path.isfile(pyproject_toml)
|
||||
has_setup = os.path.isfile(setup_py)
|
||||
|
||||
if not has_pyproject and not has_setup:
|
||||
raise PipError(
|
||||
f"{path} does not appear to be a Python project: "
|
||||
f"neither 'setup.py' nor 'pyproject.toml' found."
|
||||
)
|
||||
|
||||
# Prefer the name in `pyproject.toml`
|
||||
if has_pyproject:
|
||||
with open(pyproject_toml, encoding="utf-8") as f:
|
||||
pp_toml = tomli.loads(f.read())
|
||||
name = pp_toml.get("project", {}).get("name")
|
||||
if name is not None:
|
||||
return name
|
||||
|
||||
# Fall back on tokenizing setup.py and walk the syntax tree to find the
|
||||
# package name
|
||||
try:
|
||||
with open(os.path.join(path, "setup.py")) as f:
|
||||
tree = ast.parse(f.read())
|
||||
setup_kwargs = [
|
||||
expr.value.keywords
|
||||
for expr in tree.body
|
||||
if isinstance(expr, ast.Expr)
|
||||
and isinstance(expr.value, ast.Call)
|
||||
and expr.value.func.id == "setup"
|
||||
][0]
|
||||
value = [kw.value for kw in setup_kwargs if kw.arg == "name"][0]
|
||||
return value.s
|
||||
except (IndexError, AttributeError, IOError, OSError):
|
||||
raise PipError(
|
||||
"Directory %r is not installable. "
|
||||
"Could not parse package name from 'setup.py'." % path
|
||||
)
|
||||
|
||||
|
||||
def _parse_editable(editable_req):
|
||||
url = editable_req
|
||||
|
||||
# If a file path is specified with extras, strip off the extras.
|
||||
url_no_extras, extras = _strip_extras(url)
|
||||
original_url = url_no_extras
|
||||
|
||||
if os.path.isdir(original_url):
|
||||
if not os.path.exists(os.path.join(original_url, "setup.py")):
|
||||
raise PipError(
|
||||
"Directory %r is not installable. File 'setup.py' not found."
|
||||
% original_url
|
||||
)
|
||||
# Treating it as code that has already been checked out
|
||||
url_no_extras = _path_to_url(url_no_extras)
|
||||
|
||||
if url_no_extras.lower().startswith("file:"):
|
||||
# NOTE: url_no_extras may contain escaped characters here, meaning that
|
||||
# it may no longer be a literal package path. So we pass original_url.
|
||||
return _parse_local_package_name(original_url), url_no_extras
|
||||
|
||||
if "+" not in url:
|
||||
raise PipError(
|
||||
"%s should either be a path to a local project or a VCS url "
|
||||
"beginning with svn+, git+, hg+, or bzr+" % editable_req
|
||||
)
|
||||
|
||||
package_name = _egg_fragment(url)
|
||||
if not package_name:
|
||||
raise PipError(
|
||||
"Could not detect requirement name for '%s', please specify one "
|
||||
"with #egg=your_package_name" % editable_req
|
||||
)
|
||||
|
||||
return package_name, url
|
||||
|
||||
|
||||
def _filterfalse(predicate, iterable):
|
||||
if predicate is None:
|
||||
predicate = bool
|
||||
for x in iterable:
|
||||
if not predicate(x):
|
||||
yield x
|
||||
|
||||
|
||||
def _skip_regex(lines_enum, options):
|
||||
skip_regex = options.skip_requirements_regex if options else None
|
||||
if skip_regex:
|
||||
pattern = re.compile(skip_regex)
|
||||
lines_enum = _filterfalse(lambda e: pattern.search(e[1]), lines_enum)
|
||||
return lines_enum
|
||||
|
||||
|
||||
def _ignore_comments(lines_enum):
|
||||
"""
|
||||
Strips comments and filter empty lines.
|
||||
"""
|
||||
for line_number, line in lines_enum:
|
||||
line = COMMENT_RE.sub("", line)
|
||||
line = line.strip()
|
||||
if line:
|
||||
yield line_number, line
|
||||
|
||||
|
||||
def _get_url_scheme(url):
|
||||
if ":" not in url:
|
||||
return None
|
||||
return url.split(":", 1)[0].lower()
|
||||
|
||||
|
||||
def _is_url(name):
|
||||
scheme = _get_url_scheme(name)
|
||||
if scheme is None:
|
||||
return False
|
||||
return scheme in ["http", "https", "file", "ftp"] + VCS_SCHEMES
|
||||
|
||||
|
||||
def _looks_like_path(name):
|
||||
if os.path.sep in name:
|
||||
return True
|
||||
if os.path.altsep is not None and os.path.altsep in name:
|
||||
return True
|
||||
if name.startswith("."):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _is_installable_dir(path):
|
||||
if not os.path.isdir(path):
|
||||
return False
|
||||
if os.path.isfile(os.path.join(path, "pyproject.toml")):
|
||||
return True
|
||||
if os.path.isfile(os.path.join(path, "setup.py")):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _is_archive_file(name):
|
||||
ext = _splitext(name)[1].lower()
|
||||
if ext in (
|
||||
# ZIP extensions
|
||||
".zip",
|
||||
WHEEL_EXTENSION,
|
||||
# BZ2 extensions
|
||||
".tar.bz2",
|
||||
".tbz",
|
||||
# TAR extensions
|
||||
".tar.gz",
|
||||
".tgz",
|
||||
".tar",
|
||||
# XZ extensions
|
||||
".tar.xz",
|
||||
".txz",
|
||||
".tlz",
|
||||
".tar.lz",
|
||||
".tar.lzma",
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _get_url_from_path(path, name):
|
||||
if _looks_like_path(name) and os.path.isdir(path):
|
||||
if _is_installable_dir(path):
|
||||
return _path_to_url(path)
|
||||
# TODO: The is_installable_dir test here might not be necessary
|
||||
# now that it is done in load_pyproject_toml too.
|
||||
raise PipError(
|
||||
f"Directory {name!r} is not installable. Neither 'setup.py' "
|
||||
"nor 'pyproject.toml' found."
|
||||
)
|
||||
if not _is_archive_file(path):
|
||||
return None
|
||||
if os.path.isfile(path):
|
||||
return _path_to_url(path)
|
||||
urlreq_parts = name.split("@", 1)
|
||||
if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]):
|
||||
# If the path contains '@' and the part before it does not look
|
||||
# like a path, try to treat it as a PEP 440 URL req instead.
|
||||
return None
|
||||
return _path_to_url(path)
|
||||
|
||||
|
||||
def _parse_requirement_url(req_str):
|
||||
original_req_str = req_str
|
||||
|
||||
# Some requirements lines begin with a `git+` or similar to indicate the VCS. If this is the
|
||||
# case, remove this before proceeding any further.
|
||||
for v in VCS_SCHEMES:
|
||||
if req_str.startswith(v + "+"):
|
||||
req_str = req_str[len(v) + 1 :]
|
||||
break
|
||||
|
||||
# Strip out the marker temporarily while we parse out any potential URLs
|
||||
marker_sep = "; " if _is_url(req_str) else ";"
|
||||
marker_str = None
|
||||
link = None
|
||||
if ";" in req_str:
|
||||
req_str, marker_str = req_str.split(marker_sep, 1)
|
||||
|
||||
if _is_url(req_str):
|
||||
link = Link(req_str)
|
||||
else:
|
||||
path = os.path.normpath(os.path.abspath(req_str))
|
||||
p, _ = _strip_extras(path)
|
||||
url = _get_url_from_path(p, req_str)
|
||||
if url is not None:
|
||||
link = Link(url)
|
||||
|
||||
# it's a local file, dir, or url
|
||||
if link is not None:
|
||||
# Handle relative file URLs
|
||||
if link.scheme == "file" and re.search(r"\.\./", link.url):
|
||||
link = Link(_path_to_url(os.path.normpath(os.path.abspath(link.path))))
|
||||
# wheel file
|
||||
if link.is_wheel:
|
||||
wheel_info = WHEEL_FILE_RE.match(link.filename)
|
||||
if wheel_info is None:
|
||||
raise PipError(f"Invalid wheel name: {link.filename}")
|
||||
wheel_name = wheel_info.group("name").replace("_", "-")
|
||||
wheel_version = wheel_info.group("ver").replace("_", "-")
|
||||
req_str = f"{wheel_name}=={wheel_version}"
|
||||
else:
|
||||
# set the req to the egg fragment. when it's not there, this
|
||||
# will become an 'unnamed' requirement
|
||||
req_str = _egg_fragment(link.url)
|
||||
if req_str is None:
|
||||
raise PipError(f"Missing egg fragment in URL: {original_req_str}")
|
||||
req_str = f"{req_str}@{link.url}"
|
||||
|
||||
# Reassemble the requirement string with the original marker
|
||||
if marker_str is not None:
|
||||
req_str = f"{req_str}{marker_sep}{marker_str}"
|
||||
|
||||
return req_str
|
||||
|
||||
|
||||
def parse_requirements(
|
||||
filename: os.PathLike,
|
||||
options: Optional[Any] = None,
|
||||
include_invalid: bool = False,
|
||||
strict_hashes: bool = False,
|
||||
) -> Dict[str, Union[Requirement, UnparsedRequirement]]:
|
||||
to_parse = {filename}
|
||||
parsed = set()
|
||||
name_to_req = {}
|
||||
|
||||
while to_parse:
|
||||
filename = to_parse.pop()
|
||||
dirname = os.path.dirname(filename)
|
||||
parsed.add(filename)
|
||||
|
||||
# Combine multi-line commands
|
||||
lines = "".join(_read_file(filename)).replace("\\\n", "").splitlines()
|
||||
lines_enum = enumerate(lines, 1)
|
||||
lines_enum = _ignore_comments(lines_enum)
|
||||
lines_enum = _skip_regex(lines_enum, options)
|
||||
|
||||
for lineno, line in lines_enum:
|
||||
req: Optional[Union[Requirement, UnparsedRequirement]] = None
|
||||
known, _ = parser.parse_known_args(line.strip().split())
|
||||
|
||||
hashes_by_kind = defaultdict(list)
|
||||
if known.hashes:
|
||||
for hsh in known.hashes:
|
||||
kind, hsh = hsh.split(":", 1)
|
||||
if kind not in VALID_HASHES:
|
||||
raise PipError(
|
||||
"Invalid --hash kind %s, expected one of %s"
|
||||
% (kind, VALID_HASHES)
|
||||
)
|
||||
hashes_by_kind[kind].append(hsh)
|
||||
|
||||
if known.req:
|
||||
req_str = str().join(known.req)
|
||||
try:
|
||||
parsed_req_str = _parse_requirement_url(req_str)
|
||||
except PipError as e:
|
||||
if include_invalid:
|
||||
req = UnparsedRequirement(req_str, str(e), filename, lineno)
|
||||
else:
|
||||
raise
|
||||
|
||||
try: # Try to parse this as a requirement specification
|
||||
if req is None:
|
||||
req = Requirement(
|
||||
parsed_req_str,
|
||||
hashes=dict(hashes_by_kind),
|
||||
filename=filename,
|
||||
lineno=lineno,
|
||||
)
|
||||
except requirements.InvalidRequirement:
|
||||
try:
|
||||
_check_invalid_requirement(req_str)
|
||||
except PipError as e:
|
||||
if include_invalid:
|
||||
req = UnparsedRequirement(req_str, str(e), filename, lineno)
|
||||
else:
|
||||
raise
|
||||
|
||||
elif known.requirement:
|
||||
full_path = os.path.join(dirname, known.requirement)
|
||||
if full_path not in parsed:
|
||||
to_parse.add(full_path)
|
||||
elif known.editable:
|
||||
name, url = _parse_editable(known.editable)
|
||||
req = Requirement(
|
||||
"%s @ %s" % (name, url),
|
||||
filename=filename,
|
||||
lineno=lineno,
|
||||
editable=True,
|
||||
)
|
||||
else:
|
||||
pass # This is an invalid requirement
|
||||
|
||||
# If we've found a requirement, add it
|
||||
if req:
|
||||
if not isinstance(req, UnparsedRequirement):
|
||||
req.comes_from = "-r {} (line {})".format(filename, lineno) # type: ignore
|
||||
if req.marker is not None and not req.marker.evaluate():
|
||||
continue
|
||||
|
||||
if req.name not in name_to_req:
|
||||
name_to_req[req.name.lower()] = req
|
||||
else:
|
||||
raise PipError(
|
||||
"Double requirement given: %s (already in %s, name=%r)"
|
||||
% (req, name_to_req[req.name], req.name)
|
||||
)
|
||||
|
||||
if strict_hashes:
|
||||
missing_hashes = [req for req in name_to_req.values() if not req.hashes]
|
||||
if len(missing_hashes) > 0:
|
||||
raise PipError(
|
||||
"Missing hashes for requirement in %s, line %s"
|
||||
% (missing_hashes[0].filename, missing_hashes[0].lineno)
|
||||
)
|
||||
|
||||
return name_to_req
|
||||
42
Backend/venv/lib/python3.12/site-packages/pip_api/_pep650.py
Normal file
42
Backend/venv/lib/python3.12/site-packages/pip_api/_pep650.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import subprocess
|
||||
|
||||
from pip_api._call import call
|
||||
|
||||
|
||||
def invoke_install(path, *, dependency_group=None, **kwargs):
|
||||
try:
|
||||
call(
|
||||
"install", "--requirement", dependency_group or "requirements.txt", cwd=path
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
return e.returncode
|
||||
return 0
|
||||
|
||||
|
||||
def invoke_uninstall(path, *, dependency_group=None, **kwargs):
|
||||
try:
|
||||
call(
|
||||
"uninstall",
|
||||
"--requirement",
|
||||
dependency_group or "requirements.txt",
|
||||
cwd=path,
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
return e.returncode
|
||||
return 0
|
||||
|
||||
|
||||
def get_dependencies_to_install(path, *, dependency_group=None, **kwargs):
|
||||
# See https://github.com/pypa/pip/issues/53
|
||||
raise Exception("pip is unable to do a dry run")
|
||||
|
||||
|
||||
def get_dependency_groups(path, **kwargs):
|
||||
raise Exception("pip is unable to discover dependency groups")
|
||||
|
||||
|
||||
def update_dependencies(
|
||||
path, dependency_specifiers, *, dependency_group=None, **kwargs
|
||||
):
|
||||
# See https://github.com/pypa/pip/issues/1479
|
||||
raise Exception("pip is unable to update dependency files")
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,26 @@
|
||||
# This file is dual licensed under the terms of the Apache License, Version
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
|
||||
__all__ = [
|
||||
"__title__",
|
||||
"__summary__",
|
||||
"__uri__",
|
||||
"__version__",
|
||||
"__author__",
|
||||
"__email__",
|
||||
"__license__",
|
||||
"__copyright__",
|
||||
]
|
||||
|
||||
__title__ = "packaging"
|
||||
__summary__ = "Core utilities for Python packages"
|
||||
__uri__ = "https://github.com/pypa/packaging"
|
||||
|
||||
__version__ = "21.0"
|
||||
|
||||
__author__ = "Donald Stufft and individual contributors"
|
||||
__email__ = "donald@stufft.io"
|
||||
|
||||
__license__ = "BSD-2-Clause or Apache-2.0"
|
||||
__copyright__ = "2014-2019 %s" % __author__
|
||||
@@ -0,0 +1,25 @@
|
||||
# This file is dual licensed under the terms of the Apache License, Version
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
|
||||
from .__about__ import (
|
||||
__author__,
|
||||
__copyright__,
|
||||
__email__,
|
||||
__license__,
|
||||
__summary__,
|
||||
__title__,
|
||||
__uri__,
|
||||
__version__,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"__title__",
|
||||
"__summary__",
|
||||
"__uri__",
|
||||
"__version__",
|
||||
"__author__",
|
||||
"__email__",
|
||||
"__license__",
|
||||
"__copyright__",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,301 @@
|
||||
import collections
|
||||
import functools
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
import sys
|
||||
import warnings
|
||||
from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
|
||||
|
||||
|
||||
# Python does not provide platform information at sufficient granularity to
|
||||
# identify the architecture of the running executable in some cases, so we
|
||||
# determine it dynamically by reading the information from the running
|
||||
# process. This only applies on Linux, which uses the ELF format.
|
||||
class _ELFFileHeader:
|
||||
# https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
|
||||
class _InvalidELFFileHeader(ValueError):
|
||||
"""
|
||||
An invalid ELF file header was found.
|
||||
"""
|
||||
|
||||
ELF_MAGIC_NUMBER = 0x7F454C46
|
||||
ELFCLASS32 = 1
|
||||
ELFCLASS64 = 2
|
||||
ELFDATA2LSB = 1
|
||||
ELFDATA2MSB = 2
|
||||
EM_386 = 3
|
||||
EM_S390 = 22
|
||||
EM_ARM = 40
|
||||
EM_X86_64 = 62
|
||||
EF_ARM_ABIMASK = 0xFF000000
|
||||
EF_ARM_ABI_VER5 = 0x05000000
|
||||
EF_ARM_ABI_FLOAT_HARD = 0x00000400
|
||||
|
||||
def __init__(self, file: IO[bytes]) -> None:
|
||||
def unpack(fmt: str) -> int:
|
||||
try:
|
||||
data = file.read(struct.calcsize(fmt))
|
||||
result: Tuple[int, ...] = struct.unpack(fmt, data)
|
||||
except struct.error:
|
||||
raise _ELFFileHeader._InvalidELFFileHeader()
|
||||
return result[0]
|
||||
|
||||
self.e_ident_magic = unpack(">I")
|
||||
if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
|
||||
raise _ELFFileHeader._InvalidELFFileHeader()
|
||||
self.e_ident_class = unpack("B")
|
||||
if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
|
||||
raise _ELFFileHeader._InvalidELFFileHeader()
|
||||
self.e_ident_data = unpack("B")
|
||||
if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
|
||||
raise _ELFFileHeader._InvalidELFFileHeader()
|
||||
self.e_ident_version = unpack("B")
|
||||
self.e_ident_osabi = unpack("B")
|
||||
self.e_ident_abiversion = unpack("B")
|
||||
self.e_ident_pad = file.read(7)
|
||||
format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
|
||||
format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
|
||||
format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
|
||||
format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
|
||||
self.e_type = unpack(format_h)
|
||||
self.e_machine = unpack(format_h)
|
||||
self.e_version = unpack(format_i)
|
||||
self.e_entry = unpack(format_p)
|
||||
self.e_phoff = unpack(format_p)
|
||||
self.e_shoff = unpack(format_p)
|
||||
self.e_flags = unpack(format_i)
|
||||
self.e_ehsize = unpack(format_h)
|
||||
self.e_phentsize = unpack(format_h)
|
||||
self.e_phnum = unpack(format_h)
|
||||
self.e_shentsize = unpack(format_h)
|
||||
self.e_shnum = unpack(format_h)
|
||||
self.e_shstrndx = unpack(format_h)
|
||||
|
||||
|
||||
def _get_elf_header() -> Optional[_ELFFileHeader]:
|
||||
try:
|
||||
with open(sys.executable, "rb") as f:
|
||||
elf_header = _ELFFileHeader(f)
|
||||
except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
|
||||
return None
|
||||
return elf_header
|
||||
|
||||
|
||||
def _is_linux_armhf() -> bool:
|
||||
# hard-float ABI can be detected from the ELF header of the running
|
||||
# process
|
||||
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
|
||||
elf_header = _get_elf_header()
|
||||
if elf_header is None:
|
||||
return False
|
||||
result = elf_header.e_ident_class == elf_header.ELFCLASS32
|
||||
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
|
||||
result &= elf_header.e_machine == elf_header.EM_ARM
|
||||
result &= (
|
||||
elf_header.e_flags & elf_header.EF_ARM_ABIMASK
|
||||
) == elf_header.EF_ARM_ABI_VER5
|
||||
result &= (
|
||||
elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
|
||||
) == elf_header.EF_ARM_ABI_FLOAT_HARD
|
||||
return result
|
||||
|
||||
|
||||
def _is_linux_i686() -> bool:
|
||||
elf_header = _get_elf_header()
|
||||
if elf_header is None:
|
||||
return False
|
||||
result = elf_header.e_ident_class == elf_header.ELFCLASS32
|
||||
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
|
||||
result &= elf_header.e_machine == elf_header.EM_386
|
||||
return result
|
||||
|
||||
|
||||
def _have_compatible_abi(arch: str) -> bool:
|
||||
if arch == "armv7l":
|
||||
return _is_linux_armhf()
|
||||
if arch == "i686":
|
||||
return _is_linux_i686()
|
||||
return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
|
||||
|
||||
|
||||
# If glibc ever changes its major version, we need to know what the last
|
||||
# minor version was, so we can build the complete list of all versions.
|
||||
# For now, guess what the highest minor version might be, assume it will
|
||||
# be 50 for testing. Once this actually happens, update the dictionary
|
||||
# with the actual value.
|
||||
_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
|
||||
|
||||
|
||||
class _GLibCVersion(NamedTuple):
|
||||
major: int
|
||||
minor: int
|
||||
|
||||
|
||||
def _glibc_version_string_confstr() -> Optional[str]:
|
||||
"""
|
||||
Primary implementation of glibc_version_string using os.confstr.
|
||||
"""
|
||||
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
|
||||
# to be broken or missing. This strategy is used in the standard library
|
||||
# platform module.
|
||||
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
|
||||
try:
|
||||
# os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
|
||||
version_string = os.confstr("CS_GNU_LIBC_VERSION")
|
||||
assert version_string is not None
|
||||
_, version = version_string.split()
|
||||
except (AssertionError, AttributeError, OSError, ValueError):
|
||||
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
|
||||
return None
|
||||
return version
|
||||
|
||||
|
||||
def _glibc_version_string_ctypes() -> Optional[str]:
|
||||
"""
|
||||
Fallback implementation of glibc_version_string using ctypes.
|
||||
"""
|
||||
try:
|
||||
import ctypes
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
|
||||
# manpage says, "If filename is NULL, then the returned handle is for the
|
||||
# main program". This way we can let the linker do the work to figure out
|
||||
# which libc our process is actually using.
|
||||
#
|
||||
# We must also handle the special case where the executable is not a
|
||||
# dynamically linked executable. This can occur when using musl libc,
|
||||
# for example. In this situation, dlopen() will error, leading to an
|
||||
# OSError. Interestingly, at least in the case of musl, there is no
|
||||
# errno set on the OSError. The single string argument used to construct
|
||||
# OSError comes from libc itself and is therefore not portable to
|
||||
# hard code here. In any case, failure to call dlopen() means we
|
||||
# can proceed, so we bail on our attempt.
|
||||
try:
|
||||
process_namespace = ctypes.CDLL(None)
|
||||
except OSError:
|
||||
return None
|
||||
|
||||
try:
|
||||
gnu_get_libc_version = process_namespace.gnu_get_libc_version
|
||||
except AttributeError:
|
||||
# Symbol doesn't exist -> therefore, we are not linked to
|
||||
# glibc.
|
||||
return None
|
||||
|
||||
# Call gnu_get_libc_version, which returns a string like "2.5"
|
||||
gnu_get_libc_version.restype = ctypes.c_char_p
|
||||
version_str: str = gnu_get_libc_version()
|
||||
# py2 / py3 compatibility:
|
||||
if not isinstance(version_str, str):
|
||||
version_str = version_str.decode("ascii")
|
||||
|
||||
return version_str
|
||||
|
||||
|
||||
def _glibc_version_string() -> Optional[str]:
|
||||
"""Returns glibc version string, or None if not using glibc."""
|
||||
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
|
||||
|
||||
|
||||
def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
|
||||
"""Parse glibc version.
|
||||
|
||||
We use a regexp instead of str.split because we want to discard any
|
||||
random junk that might come after the minor version -- this might happen
|
||||
in patched/forked versions of glibc (e.g. Linaro's version of glibc
|
||||
uses version strings like "2.20-2014.11"). See gh-3588.
|
||||
"""
|
||||
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
|
||||
if not m:
|
||||
warnings.warn(
|
||||
"Expected glibc version with 2 components major.minor,"
|
||||
" got: %s" % version_str,
|
||||
RuntimeWarning,
|
||||
)
|
||||
return -1, -1
|
||||
return int(m.group("major")), int(m.group("minor"))
|
||||
|
||||
|
||||
@functools.lru_cache()
|
||||
def _get_glibc_version() -> Tuple[int, int]:
|
||||
version_str = _glibc_version_string()
|
||||
if version_str is None:
|
||||
return (-1, -1)
|
||||
return _parse_glibc_version(version_str)
|
||||
|
||||
|
||||
# From PEP 513, PEP 600
|
||||
def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
|
||||
sys_glibc = _get_glibc_version()
|
||||
if sys_glibc < version:
|
||||
return False
|
||||
# Check for presence of _manylinux module.
|
||||
try:
|
||||
import _manylinux # type: ignore # noqa
|
||||
except ImportError:
|
||||
return True
|
||||
if hasattr(_manylinux, "manylinux_compatible"):
|
||||
result = _manylinux.manylinux_compatible(version[0], version[1], arch)
|
||||
if result is not None:
|
||||
return bool(result)
|
||||
return True
|
||||
if version == _GLibCVersion(2, 5):
|
||||
if hasattr(_manylinux, "manylinux1_compatible"):
|
||||
return bool(_manylinux.manylinux1_compatible)
|
||||
if version == _GLibCVersion(2, 12):
|
||||
if hasattr(_manylinux, "manylinux2010_compatible"):
|
||||
return bool(_manylinux.manylinux2010_compatible)
|
||||
if version == _GLibCVersion(2, 17):
|
||||
if hasattr(_manylinux, "manylinux2014_compatible"):
|
||||
return bool(_manylinux.manylinux2014_compatible)
|
||||
return True
|
||||
|
||||
|
||||
_LEGACY_MANYLINUX_MAP = {
|
||||
# CentOS 7 w/ glibc 2.17 (PEP 599)
|
||||
(2, 17): "manylinux2014",
|
||||
# CentOS 6 w/ glibc 2.12 (PEP 571)
|
||||
(2, 12): "manylinux2010",
|
||||
# CentOS 5 w/ glibc 2.5 (PEP 513)
|
||||
(2, 5): "manylinux1",
|
||||
}
|
||||
|
||||
|
||||
def platform_tags(linux: str, arch: str) -> Iterator[str]:
|
||||
if not _have_compatible_abi(arch):
|
||||
return
|
||||
# Oldest glibc to be supported regardless of architecture is (2, 17).
|
||||
too_old_glibc2 = _GLibCVersion(2, 16)
|
||||
if arch in {"x86_64", "i686"}:
|
||||
# On x86/i686 also oldest glibc to be supported is (2, 5).
|
||||
too_old_glibc2 = _GLibCVersion(2, 4)
|
||||
current_glibc = _GLibCVersion(*_get_glibc_version())
|
||||
glibc_max_list = [current_glibc]
|
||||
# We can assume compatibility across glibc major versions.
|
||||
# https://sourceware.org/bugzilla/show_bug.cgi?id=24636
|
||||
#
|
||||
# Build a list of maximum glibc versions so that we can
|
||||
# output the canonical list of all glibc from current_glibc
|
||||
# down to too_old_glibc2, including all intermediary versions.
|
||||
for glibc_major in range(current_glibc.major - 1, 1, -1):
|
||||
glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
|
||||
glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
|
||||
for glibc_max in glibc_max_list:
|
||||
if glibc_max.major == too_old_glibc2.major:
|
||||
min_minor = too_old_glibc2.minor
|
||||
else:
|
||||
# For other glibc major versions oldest supported is (x, 0).
|
||||
min_minor = -1
|
||||
for glibc_minor in range(glibc_max.minor, min_minor, -1):
|
||||
glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
|
||||
tag = "manylinux_{}_{}".format(*glibc_version)
|
||||
if _is_compatible(tag, arch, glibc_version):
|
||||
yield linux.replace("linux", tag)
|
||||
# Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
|
||||
if glibc_version in _LEGACY_MANYLINUX_MAP:
|
||||
legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
|
||||
if _is_compatible(legacy_tag, arch, glibc_version):
|
||||
yield linux.replace("linux", legacy_tag)
|
||||
@@ -0,0 +1,136 @@
|
||||
"""PEP 656 support.
|
||||
|
||||
This module implements logic to detect if the currently running Python is
|
||||
linked against musl, and what musl version is used.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import functools
|
||||
import operator
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import IO, Iterator, NamedTuple, Optional, Tuple
|
||||
|
||||
|
||||
def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
|
||||
return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
|
||||
|
||||
|
||||
def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
|
||||
"""Detect musl libc location by parsing the Python executable.
|
||||
|
||||
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
|
||||
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
|
||||
"""
|
||||
f.seek(0)
|
||||
try:
|
||||
ident = _read_unpacked(f, "16B")
|
||||
except struct.error:
|
||||
return None
|
||||
if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
|
||||
return None
|
||||
f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
|
||||
|
||||
try:
|
||||
# e_fmt: Format for program header.
|
||||
# p_fmt: Format for section header.
|
||||
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
|
||||
e_fmt, p_fmt, p_idx = {
|
||||
1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
|
||||
2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
|
||||
}[ident[4]]
|
||||
except KeyError:
|
||||
return None
|
||||
else:
|
||||
p_get = operator.itemgetter(*p_idx)
|
||||
|
||||
# Find the interpreter section and return its content.
|
||||
try:
|
||||
_, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
|
||||
except struct.error:
|
||||
return None
|
||||
for i in range(e_phnum + 1):
|
||||
f.seek(e_phoff + e_phentsize * i)
|
||||
try:
|
||||
p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
|
||||
except struct.error:
|
||||
return None
|
||||
if p_type != 3: # Not PT_INTERP.
|
||||
continue
|
||||
f.seek(p_offset)
|
||||
interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
|
||||
if "musl" not in interpreter:
|
||||
return None
|
||||
return interpreter
|
||||
return None
|
||||
|
||||
|
||||
class _MuslVersion(NamedTuple):
|
||||
major: int
|
||||
minor: int
|
||||
|
||||
|
||||
def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
|
||||
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
|
||||
if len(lines) < 2 or lines[0][:4] != "musl":
|
||||
return None
|
||||
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
|
||||
if not m:
|
||||
return None
|
||||
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
|
||||
|
||||
|
||||
@functools.lru_cache()
|
||||
def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
|
||||
"""Detect currently-running musl runtime version.
|
||||
|
||||
This is done by checking the specified executable's dynamic linking
|
||||
information, and invoking the loader to parse its output for a version
|
||||
string. If the loader is musl, the output would be something like::
|
||||
|
||||
musl libc (x86_64)
|
||||
Version 1.2.2
|
||||
Dynamic Program Loader
|
||||
"""
|
||||
with contextlib.ExitStack() as stack:
|
||||
try:
|
||||
f = stack.enter_context(open(executable, "rb"))
|
||||
except IOError:
|
||||
return None
|
||||
ld = _parse_ld_musl_from_elf(f)
|
||||
if not ld:
|
||||
return None
|
||||
proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
|
||||
return _parse_musl_version(proc.stderr)
|
||||
|
||||
|
||||
def platform_tags(arch: str) -> Iterator[str]:
|
||||
"""Generate musllinux tags compatible to the current platform.
|
||||
|
||||
:param arch: Should be the part of platform tag after the ``linux_``
|
||||
prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
|
||||
prerequisite for the current platform to be musllinux-compatible.
|
||||
|
||||
:returns: An iterator of compatible musllinux tags.
|
||||
"""
|
||||
sys_musl = _get_musl_version(sys.executable)
|
||||
if sys_musl is None: # Python not dynamically linked against musl.
|
||||
return
|
||||
for minor in range(sys_musl.minor, -1, -1):
|
||||
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
|
||||
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
import sysconfig
|
||||
|
||||
plat = sysconfig.get_platform()
|
||||
assert plat.startswith("linux-"), "not linux"
|
||||
|
||||
print("plat:", plat)
|
||||
print("musl:", _get_musl_version(sys.executable))
|
||||
print("tags:", end=" ")
|
||||
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
|
||||
print(t, end="\n ")
|
||||
@@ -0,0 +1,67 @@
|
||||
# This file is dual licensed under the terms of the Apache License, Version
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
|
||||
|
||||
class InfinityType:
|
||||
def __repr__(self) -> str:
|
||||
return "Infinity"
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(repr(self))
|
||||
|
||||
def __lt__(self, other: object) -> bool:
|
||||
return False
|
||||
|
||||
def __le__(self, other: object) -> bool:
|
||||
return False
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return isinstance(other, self.__class__)
|
||||
|
||||
def __ne__(self, other: object) -> bool:
|
||||
return not isinstance(other, self.__class__)
|
||||
|
||||
def __gt__(self, other: object) -> bool:
|
||||
return True
|
||||
|
||||
def __ge__(self, other: object) -> bool:
|
||||
return True
|
||||
|
||||
def __neg__(self: object) -> "NegativeInfinityType":
|
||||
return NegativeInfinity
|
||||
|
||||
|
||||
Infinity = InfinityType()
|
||||
|
||||
|
||||
class NegativeInfinityType:
|
||||
def __repr__(self) -> str:
|
||||
return "-Infinity"
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(repr(self))
|
||||
|
||||
def __lt__(self, other: object) -> bool:
|
||||
return True
|
||||
|
||||
def __le__(self, other: object) -> bool:
|
||||
return True
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return isinstance(other, self.__class__)
|
||||
|
||||
def __ne__(self, other: object) -> bool:
|
||||
return not isinstance(other, self.__class__)
|
||||
|
||||
def __gt__(self, other: object) -> bool:
|
||||
return False
|
||||
|
||||
def __ge__(self, other: object) -> bool:
|
||||
return False
|
||||
|
||||
def __neg__(self: object) -> InfinityType:
|
||||
return Infinity
|
||||
|
||||
|
||||
NegativeInfinity = NegativeInfinityType()
|
||||
@@ -0,0 +1,304 @@
|
||||
# This file is dual licensed under the terms of the Apache License, Version
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
|
||||
import operator
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from pip_api._vendor.pyparsing import ( # noqa: N817
|
||||
Forward,
|
||||
Group,
|
||||
Literal as L,
|
||||
ParseException,
|
||||
ParseResults,
|
||||
QuotedString,
|
||||
ZeroOrMore,
|
||||
stringEnd,
|
||||
stringStart,
|
||||
)
|
||||
|
||||
from .specifiers import InvalidSpecifier, Specifier
|
||||
|
||||
__all__ = [
|
||||
"InvalidMarker",
|
||||
"UndefinedComparison",
|
||||
"UndefinedEnvironmentName",
|
||||
"Marker",
|
||||
"default_environment",
|
||||
]
|
||||
|
||||
Operator = Callable[[str, str], bool]
|
||||
|
||||
|
||||
class InvalidMarker(ValueError):
|
||||
"""
|
||||
An invalid marker was found, users should refer to PEP 508.
|
||||
"""
|
||||
|
||||
|
||||
class UndefinedComparison(ValueError):
|
||||
"""
|
||||
An invalid operation was attempted on a value that doesn't support it.
|
||||
"""
|
||||
|
||||
|
||||
class UndefinedEnvironmentName(ValueError):
|
||||
"""
|
||||
A name was attempted to be used that does not exist inside of the
|
||||
environment.
|
||||
"""
|
||||
|
||||
|
||||
class Node:
|
||||
def __init__(self, value: Any) -> None:
|
||||
self.value = value
|
||||
|
||||
def __str__(self) -> str:
|
||||
return str(self.value)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__}('{self}')>"
|
||||
|
||||
def serialize(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Variable(Node):
|
||||
def serialize(self) -> str:
|
||||
return str(self)
|
||||
|
||||
|
||||
class Value(Node):
|
||||
def serialize(self) -> str:
|
||||
return f'"{self}"'
|
||||
|
||||
|
||||
class Op(Node):
|
||||
def serialize(self) -> str:
|
||||
return str(self)
|
||||
|
||||
|
||||
VARIABLE = (
|
||||
L("implementation_version")
|
||||
| L("platform_python_implementation")
|
||||
| L("implementation_name")
|
||||
| L("python_full_version")
|
||||
| L("platform_release")
|
||||
| L("platform_version")
|
||||
| L("platform_machine")
|
||||
| L("platform_system")
|
||||
| L("python_version")
|
||||
| L("sys_platform")
|
||||
| L("os_name")
|
||||
| L("os.name") # PEP-345
|
||||
| L("sys.platform") # PEP-345
|
||||
| L("platform.version") # PEP-345
|
||||
| L("platform.machine") # PEP-345
|
||||
| L("platform.python_implementation") # PEP-345
|
||||
| L("python_implementation") # undocumented setuptools legacy
|
||||
| L("extra") # PEP-508
|
||||
)
|
||||
ALIASES = {
|
||||
"os.name": "os_name",
|
||||
"sys.platform": "sys_platform",
|
||||
"platform.version": "platform_version",
|
||||
"platform.machine": "platform_machine",
|
||||
"platform.python_implementation": "platform_python_implementation",
|
||||
"python_implementation": "platform_python_implementation",
|
||||
}
|
||||
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
|
||||
|
||||
VERSION_CMP = (
|
||||
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
|
||||
)
|
||||
|
||||
MARKER_OP = VERSION_CMP | L("not in") | L("in")
|
||||
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
|
||||
|
||||
MARKER_VALUE = QuotedString("'") | QuotedString('"')
|
||||
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
|
||||
|
||||
BOOLOP = L("and") | L("or")
|
||||
|
||||
MARKER_VAR = VARIABLE | MARKER_VALUE
|
||||
|
||||
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
|
||||
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
|
||||
|
||||
LPAREN = L("(").suppress()
|
||||
RPAREN = L(")").suppress()
|
||||
|
||||
MARKER_EXPR = Forward()
|
||||
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
|
||||
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
|
||||
|
||||
MARKER = stringStart + MARKER_EXPR + stringEnd
|
||||
|
||||
|
||||
def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
|
||||
if isinstance(results, ParseResults):
|
||||
return [_coerce_parse_result(i) for i in results]
|
||||
else:
|
||||
return results
|
||||
|
||||
|
||||
def _format_marker(
|
||||
marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
|
||||
) -> str:
|
||||
|
||||
assert isinstance(marker, (list, tuple, str))
|
||||
|
||||
# Sometimes we have a structure like [[...]] which is a single item list
|
||||
# where the single item is itself it's own list. In that case we want skip
|
||||
# the rest of this function so that we don't get extraneous () on the
|
||||
# outside.
|
||||
if (
|
||||
isinstance(marker, list)
|
||||
and len(marker) == 1
|
||||
and isinstance(marker[0], (list, tuple))
|
||||
):
|
||||
return _format_marker(marker[0])
|
||||
|
||||
if isinstance(marker, list):
|
||||
inner = (_format_marker(m, first=False) for m in marker)
|
||||
if first:
|
||||
return " ".join(inner)
|
||||
else:
|
||||
return "(" + " ".join(inner) + ")"
|
||||
elif isinstance(marker, tuple):
|
||||
return " ".join([m.serialize() for m in marker])
|
||||
else:
|
||||
return marker
|
||||
|
||||
|
||||
_operators: Dict[str, Operator] = {
|
||||
"in": lambda lhs, rhs: lhs in rhs,
|
||||
"not in": lambda lhs, rhs: lhs not in rhs,
|
||||
"<": operator.lt,
|
||||
"<=": operator.le,
|
||||
"==": operator.eq,
|
||||
"!=": operator.ne,
|
||||
">=": operator.ge,
|
||||
">": operator.gt,
|
||||
}
|
||||
|
||||
|
||||
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
|
||||
try:
|
||||
spec = Specifier("".join([op.serialize(), rhs]))
|
||||
except InvalidSpecifier:
|
||||
pass
|
||||
else:
|
||||
return spec.contains(lhs)
|
||||
|
||||
oper: Optional[Operator] = _operators.get(op.serialize())
|
||||
if oper is None:
|
||||
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
|
||||
|
||||
return oper(lhs, rhs)
|
||||
|
||||
|
||||
class Undefined:
|
||||
pass
|
||||
|
||||
|
||||
_undefined = Undefined()
|
||||
|
||||
|
||||
def _get_env(environment: Dict[str, str], name: str) -> str:
|
||||
value: Union[str, Undefined] = environment.get(name, _undefined)
|
||||
|
||||
if isinstance(value, Undefined):
|
||||
raise UndefinedEnvironmentName(
|
||||
f"{name!r} does not exist in evaluation environment."
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
|
||||
groups: List[List[bool]] = [[]]
|
||||
|
||||
for marker in markers:
|
||||
assert isinstance(marker, (list, tuple, str))
|
||||
|
||||
if isinstance(marker, list):
|
||||
groups[-1].append(_evaluate_markers(marker, environment))
|
||||
elif isinstance(marker, tuple):
|
||||
lhs, op, rhs = marker
|
||||
|
||||
if isinstance(lhs, Variable):
|
||||
lhs_value = _get_env(environment, lhs.value)
|
||||
rhs_value = rhs.value
|
||||
else:
|
||||
lhs_value = lhs.value
|
||||
rhs_value = _get_env(environment, rhs.value)
|
||||
|
||||
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
|
||||
else:
|
||||
assert marker in ["and", "or"]
|
||||
if marker == "or":
|
||||
groups.append([])
|
||||
|
||||
return any(all(item) for item in groups)
|
||||
|
||||
|
||||
def format_full_version(info: "sys._version_info") -> str:
|
||||
version = "{0.major}.{0.minor}.{0.micro}".format(info)
|
||||
kind = info.releaselevel
|
||||
if kind != "final":
|
||||
version += kind[0] + str(info.serial)
|
||||
return version
|
||||
|
||||
|
||||
def default_environment() -> Dict[str, str]:
|
||||
iver = format_full_version(sys.implementation.version)
|
||||
implementation_name = sys.implementation.name
|
||||
return {
|
||||
"implementation_name": implementation_name,
|
||||
"implementation_version": iver,
|
||||
"os_name": os.name,
|
||||
"platform_machine": platform.machine(),
|
||||
"platform_release": platform.release(),
|
||||
"platform_system": platform.system(),
|
||||
"platform_version": platform.version(),
|
||||
"python_full_version": platform.python_version(),
|
||||
"platform_python_implementation": platform.python_implementation(),
|
||||
"python_version": ".".join(platform.python_version_tuple()[:2]),
|
||||
"sys_platform": sys.platform,
|
||||
}
|
||||
|
||||
|
||||
class Marker:
|
||||
def __init__(self, marker: str) -> None:
|
||||
try:
|
||||
self._markers = _coerce_parse_result(MARKER.parseString(marker))
|
||||
except ParseException as e:
|
||||
raise InvalidMarker(
|
||||
f"Invalid marker: {marker!r}, parse error at "
|
||||
f"{marker[e.loc : e.loc + 8]!r}"
|
||||
)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return _format_marker(self._markers)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<Marker('{self}')>"
|
||||
|
||||
def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
|
||||
"""Evaluate a marker.
|
||||
|
||||
Return the boolean from evaluating the given marker against the
|
||||
environment. environment is an optional argument to override all or
|
||||
part of the determined environment.
|
||||
|
||||
The environment is determined from the current Python process.
|
||||
"""
|
||||
current_environment = default_environment()
|
||||
if environment is not None:
|
||||
current_environment.update(environment)
|
||||
|
||||
return _evaluate_markers(self._markers, current_environment)
|
||||
@@ -0,0 +1,146 @@
|
||||
# This file is dual licensed under the terms of the Apache License, Version
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
|
||||
import re
|
||||
import string
|
||||
import urllib.parse
|
||||
from typing import List, Optional as TOptional, Set
|
||||
|
||||
from pip_api._vendor.pyparsing import ( # noqa
|
||||
Combine,
|
||||
Literal as L,
|
||||
Optional,
|
||||
ParseException,
|
||||
Regex,
|
||||
Word,
|
||||
ZeroOrMore,
|
||||
originalTextFor,
|
||||
stringEnd,
|
||||
stringStart,
|
||||
)
|
||||
|
||||
from .markers import MARKER_EXPR, Marker
|
||||
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
|
||||
|
||||
|
||||
class InvalidRequirement(ValueError):
|
||||
"""
|
||||
An invalid requirement was found, users should refer to PEP 508.
|
||||
"""
|
||||
|
||||
|
||||
ALPHANUM = Word(string.ascii_letters + string.digits)
|
||||
|
||||
LBRACKET = L("[").suppress()
|
||||
RBRACKET = L("]").suppress()
|
||||
LPAREN = L("(").suppress()
|
||||
RPAREN = L(")").suppress()
|
||||
COMMA = L(",").suppress()
|
||||
SEMICOLON = L(";").suppress()
|
||||
AT = L("@").suppress()
|
||||
|
||||
PUNCTUATION = Word("-_.")
|
||||
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
|
||||
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
|
||||
|
||||
NAME = IDENTIFIER("name")
|
||||
EXTRA = IDENTIFIER
|
||||
|
||||
URI = Regex(r"[^ ]+")("url")
|
||||
URL = AT + URI
|
||||
|
||||
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
|
||||
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
|
||||
|
||||
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
|
||||
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
|
||||
|
||||
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
|
||||
VERSION_MANY = Combine(
|
||||
VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
|
||||
)("_raw_spec")
|
||||
_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
|
||||
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
|
||||
|
||||
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
|
||||
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
|
||||
|
||||
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
|
||||
MARKER_EXPR.setParseAction(
|
||||
lambda s, l, t: Marker(s[t._original_start : t._original_end])
|
||||
)
|
||||
MARKER_SEPARATOR = SEMICOLON
|
||||
MARKER = MARKER_SEPARATOR + MARKER_EXPR
|
||||
|
||||
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
|
||||
URL_AND_MARKER = URL + Optional(MARKER)
|
||||
|
||||
NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
|
||||
|
||||
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
|
||||
# pyparsing isn't thread safe during initialization, so we do it eagerly, see
|
||||
# issue #104
|
||||
REQUIREMENT.parseString("x[]")
|
||||
|
||||
|
||||
class Requirement:
|
||||
"""Parse a requirement.
|
||||
|
||||
Parse a given requirement string into its parts, such as name, specifier,
|
||||
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
|
||||
string.
|
||||
"""
|
||||
|
||||
# TODO: Can we test whether something is contained within a requirement?
|
||||
# If so how do we do that? Do we need to test against the _name_ of
|
||||
# the thing as well as the version? What about the markers?
|
||||
# TODO: Can we normalize the name and extra name?
|
||||
|
||||
def __init__(self, requirement_string: str) -> None:
|
||||
try:
|
||||
req = REQUIREMENT.parseString(requirement_string)
|
||||
except ParseException as e:
|
||||
raise InvalidRequirement(
|
||||
f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
|
||||
)
|
||||
|
||||
self.name: str = req.name
|
||||
if req.url:
|
||||
parsed_url = urllib.parse.urlparse(req.url)
|
||||
if parsed_url.scheme == "file":
|
||||
if urllib.parse.urlunparse(parsed_url) != req.url:
|
||||
raise InvalidRequirement("Invalid URL given")
|
||||
elif not (parsed_url.scheme and parsed_url.netloc) or (
|
||||
not parsed_url.scheme and not parsed_url.netloc
|
||||
):
|
||||
raise InvalidRequirement(f"Invalid URL: {req.url}")
|
||||
self.url: TOptional[str] = req.url
|
||||
else:
|
||||
self.url = None
|
||||
self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
|
||||
self.specifier: SpecifierSet = SpecifierSet(req.specifier)
|
||||
self.marker: TOptional[Marker] = req.marker if req.marker else None
|
||||
|
||||
def __str__(self) -> str:
|
||||
parts: List[str] = [self.name]
|
||||
|
||||
if self.extras:
|
||||
formatted_extras = ",".join(sorted(self.extras))
|
||||
parts.append(f"[{formatted_extras}]")
|
||||
|
||||
if self.specifier:
|
||||
parts.append(str(self.specifier))
|
||||
|
||||
if self.url:
|
||||
parts.append(f"@ {self.url}")
|
||||
if self.marker:
|
||||
parts.append(" ")
|
||||
|
||||
if self.marker:
|
||||
parts.append(f"; {self.marker}")
|
||||
|
||||
return "".join(parts)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<Requirement('{self}')>"
|
||||
@@ -0,0 +1,828 @@
|
||||
# This file is dual licensed under the terms of the Apache License, Version
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
|
||||
import abc
|
||||
import functools
|
||||
import itertools
|
||||
import re
|
||||
import warnings
|
||||
from typing import (
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Pattern,
|
||||
Set,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from .utils import canonicalize_version
|
||||
from .version import LegacyVersion, Version, parse
|
||||
|
||||
ParsedVersion = Union[Version, LegacyVersion]
|
||||
UnparsedVersion = Union[Version, LegacyVersion, str]
|
||||
VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
|
||||
CallableOperator = Callable[[ParsedVersion, str], bool]
|
||||
|
||||
|
||||
class InvalidSpecifier(ValueError):
|
||||
"""
|
||||
An invalid specifier was found, users should refer to PEP 440.
|
||||
"""
|
||||
|
||||
|
||||
class BaseSpecifier(metaclass=abc.ABCMeta):
|
||||
@abc.abstractmethod
|
||||
def __str__(self) -> str:
|
||||
"""
|
||||
Returns the str representation of this Specifier like object. This
|
||||
should be representative of the Specifier itself.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def __hash__(self) -> int:
|
||||
"""
|
||||
Returns a hash value for this Specifier like object.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def __eq__(self, other: object) -> bool:
|
||||
"""
|
||||
Returns a boolean representing whether or not the two Specifier like
|
||||
objects are equal.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def __ne__(self, other: object) -> bool:
|
||||
"""
|
||||
Returns a boolean representing whether or not the two Specifier like
|
||||
objects are not equal.
|
||||
"""
|
||||
|
||||
@abc.abstractproperty
|
||||
def prereleases(self) -> Optional[bool]:
|
||||
"""
|
||||
Returns whether or not pre-releases as a whole are allowed by this
|
||||
specifier.
|
||||
"""
|
||||
|
||||
@prereleases.setter
|
||||
def prereleases(self, value: bool) -> None:
|
||||
"""
|
||||
Sets whether or not pre-releases as a whole are allowed by this
|
||||
specifier.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
|
||||
"""
|
||||
Determines if the given item is contained within this specifier.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def filter(
|
||||
self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
|
||||
) -> Iterable[VersionTypeVar]:
|
||||
"""
|
||||
Takes an iterable of items and filters them so that only items which
|
||||
are contained within this specifier are allowed in it.
|
||||
"""
|
||||
|
||||
|
||||
class _IndividualSpecifier(BaseSpecifier):
|
||||
|
||||
_operators: Dict[str, str] = {}
|
||||
_regex: Pattern[str]
|
||||
|
||||
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
|
||||
match = self._regex.search(spec)
|
||||
if not match:
|
||||
raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
|
||||
|
||||
self._spec: Tuple[str, str] = (
|
||||
match.group("operator").strip(),
|
||||
match.group("version").strip(),
|
||||
)
|
||||
|
||||
# Store whether or not this Specifier should accept prereleases
|
||||
self._prereleases = prereleases
|
||||
|
||||
def __repr__(self) -> str:
|
||||
pre = (
|
||||
f", prereleases={self.prereleases!r}"
|
||||
if self._prereleases is not None
|
||||
else ""
|
||||
)
|
||||
|
||||
return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "{}{}".format(*self._spec)
|
||||
|
||||
@property
|
||||
def _canonical_spec(self) -> Tuple[str, str]:
|
||||
return self._spec[0], canonicalize_version(self._spec[1])
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(self._canonical_spec)
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if isinstance(other, str):
|
||||
try:
|
||||
other = self.__class__(str(other))
|
||||
except InvalidSpecifier:
|
||||
return NotImplemented
|
||||
elif not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
|
||||
return self._canonical_spec == other._canonical_spec
|
||||
|
||||
def __ne__(self, other: object) -> bool:
|
||||
if isinstance(other, str):
|
||||
try:
|
||||
other = self.__class__(str(other))
|
||||
except InvalidSpecifier:
|
||||
return NotImplemented
|
||||
elif not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
|
||||
return self._spec != other._spec
|
||||
|
||||
def _get_operator(self, op: str) -> CallableOperator:
|
||||
operator_callable: CallableOperator = getattr(
|
||||
self, f"_compare_{self._operators[op]}"
|
||||
)
|
||||
return operator_callable
|
||||
|
||||
def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
|
||||
if not isinstance(version, (LegacyVersion, Version)):
|
||||
version = parse(version)
|
||||
return version
|
||||
|
||||
@property
|
||||
def operator(self) -> str:
|
||||
return self._spec[0]
|
||||
|
||||
@property
|
||||
def version(self) -> str:
|
||||
return self._spec[1]
|
||||
|
||||
@property
|
||||
def prereleases(self) -> Optional[bool]:
|
||||
return self._prereleases
|
||||
|
||||
@prereleases.setter
|
||||
def prereleases(self, value: bool) -> None:
|
||||
self._prereleases = value
|
||||
|
||||
def __contains__(self, item: str) -> bool:
|
||||
return self.contains(item)
|
||||
|
||||
def contains(
|
||||
self, item: UnparsedVersion, prereleases: Optional[bool] = None
|
||||
) -> bool:
|
||||
|
||||
# Determine if prereleases are to be allowed or not.
|
||||
if prereleases is None:
|
||||
prereleases = self.prereleases
|
||||
|
||||
# Normalize item to a Version or LegacyVersion, this allows us to have
|
||||
# a shortcut for ``"2.0" in Specifier(">=2")
|
||||
normalized_item = self._coerce_version(item)
|
||||
|
||||
# Determine if we should be supporting prereleases in this specifier
|
||||
# or not, if we do not support prereleases than we can short circuit
|
||||
# logic if this version is a prereleases.
|
||||
if normalized_item.is_prerelease and not prereleases:
|
||||
return False
|
||||
|
||||
# Actually do the comparison to determine if this item is contained
|
||||
# within this Specifier or not.
|
||||
operator_callable: CallableOperator = self._get_operator(self.operator)
|
||||
return operator_callable(normalized_item, self.version)
|
||||
|
||||
def filter(
|
||||
self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
|
||||
) -> Iterable[VersionTypeVar]:
|
||||
|
||||
yielded = False
|
||||
found_prereleases = []
|
||||
|
||||
kw = {"prereleases": prereleases if prereleases is not None else True}
|
||||
|
||||
# Attempt to iterate over all the values in the iterable and if any of
|
||||
# them match, yield them.
|
||||
for version in iterable:
|
||||
parsed_version = self._coerce_version(version)
|
||||
|
||||
if self.contains(parsed_version, **kw):
|
||||
# If our version is a prerelease, and we were not set to allow
|
||||
# prereleases, then we'll store it for later in case nothing
|
||||
# else matches this specifier.
|
||||
if parsed_version.is_prerelease and not (
|
||||
prereleases or self.prereleases
|
||||
):
|
||||
found_prereleases.append(version)
|
||||
# Either this is not a prerelease, or we should have been
|
||||
# accepting prereleases from the beginning.
|
||||
else:
|
||||
yielded = True
|
||||
yield version
|
||||
|
||||
# Now that we've iterated over everything, determine if we've yielded
|
||||
# any values, and if we have not and we have any prereleases stored up
|
||||
# then we will go ahead and yield the prereleases.
|
||||
if not yielded and found_prereleases:
|
||||
for version in found_prereleases:
|
||||
yield version
|
||||
|
||||
|
||||
class LegacySpecifier(_IndividualSpecifier):
|
||||
|
||||
_regex_str = r"""
|
||||
(?P<operator>(==|!=|<=|>=|<|>))
|
||||
\s*
|
||||
(?P<version>
|
||||
[^,;\s)]* # Since this is a "legacy" specifier, and the version
|
||||
# string can be just about anything, we match everything
|
||||
# except for whitespace, a semi-colon for marker support,
|
||||
# a closing paren since versions can be enclosed in
|
||||
# them, and a comma since it's a version separator.
|
||||
)
|
||||
"""
|
||||
|
||||
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
||||
|
||||
_operators = {
|
||||
"==": "equal",
|
||||
"!=": "not_equal",
|
||||
"<=": "less_than_equal",
|
||||
">=": "greater_than_equal",
|
||||
"<": "less_than",
|
||||
">": "greater_than",
|
||||
}
|
||||
|
||||
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
|
||||
super().__init__(spec, prereleases)
|
||||
|
||||
warnings.warn(
|
||||
"Creating a LegacyVersion has been deprecated and will be "
|
||||
"removed in the next major release",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
|
||||
if not isinstance(version, LegacyVersion):
|
||||
version = LegacyVersion(str(version))
|
||||
return version
|
||||
|
||||
def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
|
||||
return prospective == self._coerce_version(spec)
|
||||
|
||||
def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
|
||||
return prospective != self._coerce_version(spec)
|
||||
|
||||
def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
|
||||
return prospective <= self._coerce_version(spec)
|
||||
|
||||
def _compare_greater_than_equal(
|
||||
self, prospective: LegacyVersion, spec: str
|
||||
) -> bool:
|
||||
return prospective >= self._coerce_version(spec)
|
||||
|
||||
def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
|
||||
return prospective < self._coerce_version(spec)
|
||||
|
||||
def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
|
||||
return prospective > self._coerce_version(spec)
|
||||
|
||||
|
||||
def _require_version_compare(
|
||||
fn: Callable[["Specifier", ParsedVersion, str], bool]
|
||||
) -> Callable[["Specifier", ParsedVersion, str], bool]:
|
||||
@functools.wraps(fn)
|
||||
def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
|
||||
if not isinstance(prospective, Version):
|
||||
return False
|
||||
return fn(self, prospective, spec)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
class Specifier(_IndividualSpecifier):
|
||||
|
||||
_regex_str = r"""
|
||||
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
|
||||
(?P<version>
|
||||
(?:
|
||||
# The identity operators allow for an escape hatch that will
|
||||
# do an exact string match of the version you wish to install.
|
||||
# This will not be parsed by PEP 440 and we cannot determine
|
||||
# any semantic meaning from it. This operator is discouraged
|
||||
# but included entirely as an escape hatch.
|
||||
(?<====) # Only match for the identity operator
|
||||
\s*
|
||||
[^\s]* # We just match everything, except for whitespace
|
||||
# since we are only testing for strict identity.
|
||||
)
|
||||
|
|
||||
(?:
|
||||
# The (non)equality operators allow for wild card and local
|
||||
# versions to be specified so we have to define these two
|
||||
# operators separately to enable that.
|
||||
(?<===|!=) # Only match for equals and not equals
|
||||
|
||||
\s*
|
||||
v?
|
||||
(?:[0-9]+!)? # epoch
|
||||
[0-9]+(?:\.[0-9]+)* # release
|
||||
(?: # pre release
|
||||
[-_\.]?
|
||||
(a|b|c|rc|alpha|beta|pre|preview)
|
||||
[-_\.]?
|
||||
[0-9]*
|
||||
)?
|
||||
(?: # post release
|
||||
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
|
||||
)?
|
||||
|
||||
# You cannot use a wild card and a dev or local version
|
||||
# together so group them with a | and make them optional.
|
||||
(?:
|
||||
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
|
||||
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
||||
|
|
||||
\.\* # Wild card syntax of .*
|
||||
)?
|
||||
)
|
||||
|
|
||||
(?:
|
||||
# The compatible operator requires at least two digits in the
|
||||
# release segment.
|
||||
(?<=~=) # Only match for the compatible operator
|
||||
|
||||
\s*
|
||||
v?
|
||||
(?:[0-9]+!)? # epoch
|
||||
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
|
||||
(?: # pre release
|
||||
[-_\.]?
|
||||
(a|b|c|rc|alpha|beta|pre|preview)
|
||||
[-_\.]?
|
||||
[0-9]*
|
||||
)?
|
||||
(?: # post release
|
||||
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
|
||||
)?
|
||||
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
|
||||
)
|
||||
|
|
||||
(?:
|
||||
# All other operators only allow a sub set of what the
|
||||
# (non)equality operators do. Specifically they do not allow
|
||||
# local versions to be specified nor do they allow the prefix
|
||||
# matching wild cards.
|
||||
(?<!==|!=|~=) # We have special cases for these
|
||||
# operators so we want to make sure they
|
||||
# don't match here.
|
||||
|
||||
\s*
|
||||
v?
|
||||
(?:[0-9]+!)? # epoch
|
||||
[0-9]+(?:\.[0-9]+)* # release
|
||||
(?: # pre release
|
||||
[-_\.]?
|
||||
(a|b|c|rc|alpha|beta|pre|preview)
|
||||
[-_\.]?
|
||||
[0-9]*
|
||||
)?
|
||||
(?: # post release
|
||||
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
|
||||
)?
|
||||
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
|
||||
)
|
||||
)
|
||||
"""
|
||||
|
||||
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
||||
|
||||
_operators = {
|
||||
"~=": "compatible",
|
||||
"==": "equal",
|
||||
"!=": "not_equal",
|
||||
"<=": "less_than_equal",
|
||||
">=": "greater_than_equal",
|
||||
"<": "less_than",
|
||||
">": "greater_than",
|
||||
"===": "arbitrary",
|
||||
}
|
||||
|
||||
@_require_version_compare
|
||||
def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
|
||||
|
||||
# Compatible releases have an equivalent combination of >= and ==. That
|
||||
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
|
||||
# implement this in terms of the other specifiers instead of
|
||||
# implementing it ourselves. The only thing we need to do is construct
|
||||
# the other specifiers.
|
||||
|
||||
# We want everything but the last item in the version, but we want to
|
||||
# ignore suffix segments.
|
||||
prefix = ".".join(
|
||||
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
|
||||
)
|
||||
|
||||
# Add the prefix notation to the end of our string
|
||||
prefix += ".*"
|
||||
|
||||
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
|
||||
prospective, prefix
|
||||
)
|
||||
|
||||
@_require_version_compare
|
||||
def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
|
||||
|
||||
# We need special logic to handle prefix matching
|
||||
if spec.endswith(".*"):
|
||||
# In the case of prefix matching we want to ignore local segment.
|
||||
prospective = Version(prospective.public)
|
||||
# Split the spec out by dots, and pretend that there is an implicit
|
||||
# dot in between a release segment and a pre-release segment.
|
||||
split_spec = _version_split(spec[:-2]) # Remove the trailing .*
|
||||
|
||||
# Split the prospective version out by dots, and pretend that there
|
||||
# is an implicit dot in between a release segment and a pre-release
|
||||
# segment.
|
||||
split_prospective = _version_split(str(prospective))
|
||||
|
||||
# Shorten the prospective version to be the same length as the spec
|
||||
# so that we can determine if the specifier is a prefix of the
|
||||
# prospective version or not.
|
||||
shortened_prospective = split_prospective[: len(split_spec)]
|
||||
|
||||
# Pad out our two sides with zeros so that they both equal the same
|
||||
# length.
|
||||
padded_spec, padded_prospective = _pad_version(
|
||||
split_spec, shortened_prospective
|
||||
)
|
||||
|
||||
return padded_prospective == padded_spec
|
||||
else:
|
||||
# Convert our spec string into a Version
|
||||
spec_version = Version(spec)
|
||||
|
||||
# If the specifier does not have a local segment, then we want to
|
||||
# act as if the prospective version also does not have a local
|
||||
# segment.
|
||||
if not spec_version.local:
|
||||
prospective = Version(prospective.public)
|
||||
|
||||
return prospective == spec_version
|
||||
|
||||
@_require_version_compare
|
||||
def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
|
||||
return not self._compare_equal(prospective, spec)
|
||||
|
||||
@_require_version_compare
|
||||
def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
|
||||
|
||||
# NB: Local version identifiers are NOT permitted in the version
|
||||
# specifier, so local version labels can be universally removed from
|
||||
# the prospective version.
|
||||
return Version(prospective.public) <= Version(spec)
|
||||
|
||||
@_require_version_compare
|
||||
def _compare_greater_than_equal(
|
||||
self, prospective: ParsedVersion, spec: str
|
||||
) -> bool:
|
||||
|
||||
# NB: Local version identifiers are NOT permitted in the version
|
||||
# specifier, so local version labels can be universally removed from
|
||||
# the prospective version.
|
||||
return Version(prospective.public) >= Version(spec)
|
||||
|
||||
@_require_version_compare
|
||||
def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
|
||||
|
||||
# Convert our spec to a Version instance, since we'll want to work with
|
||||
# it as a version.
|
||||
spec = Version(spec_str)
|
||||
|
||||
# Check to see if the prospective version is less than the spec
|
||||
# version. If it's not we can short circuit and just return False now
|
||||
# instead of doing extra unneeded work.
|
||||
if not prospective < spec:
|
||||
return False
|
||||
|
||||
# This special case is here so that, unless the specifier itself
|
||||
# includes is a pre-release version, that we do not accept pre-release
|
||||
# versions for the version mentioned in the specifier (e.g. <3.1 should
|
||||
# not match 3.1.dev0, but should match 3.0.dev0).
|
||||
if not spec.is_prerelease and prospective.is_prerelease:
|
||||
if Version(prospective.base_version) == Version(spec.base_version):
|
||||
return False
|
||||
|
||||
# If we've gotten to here, it means that prospective version is both
|
||||
# less than the spec version *and* it's not a pre-release of the same
|
||||
# version in the spec.
|
||||
return True
|
||||
|
||||
@_require_version_compare
|
||||
def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
|
||||
|
||||
# Convert our spec to a Version instance, since we'll want to work with
|
||||
# it as a version.
|
||||
spec = Version(spec_str)
|
||||
|
||||
# Check to see if the prospective version is greater than the spec
|
||||
# version. If it's not we can short circuit and just return False now
|
||||
# instead of doing extra unneeded work.
|
||||
if not prospective > spec:
|
||||
return False
|
||||
|
||||
# This special case is here so that, unless the specifier itself
|
||||
# includes is a post-release version, that we do not accept
|
||||
# post-release versions for the version mentioned in the specifier
|
||||
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
|
||||
if not spec.is_postrelease and prospective.is_postrelease:
|
||||
if Version(prospective.base_version) == Version(spec.base_version):
|
||||
return False
|
||||
|
||||
# Ensure that we do not allow a local version of the version mentioned
|
||||
# in the specifier, which is technically greater than, to match.
|
||||
if prospective.local is not None:
|
||||
if Version(prospective.base_version) == Version(spec.base_version):
|
||||
return False
|
||||
|
||||
# If we've gotten to here, it means that prospective version is both
|
||||
# greater than the spec version *and* it's not a pre-release of the
|
||||
# same version in the spec.
|
||||
return True
|
||||
|
||||
def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
|
||||
return str(prospective).lower() == str(spec).lower()
|
||||
|
||||
@property
|
||||
def prereleases(self) -> bool:
|
||||
|
||||
# If there is an explicit prereleases set for this, then we'll just
|
||||
# blindly use that.
|
||||
if self._prereleases is not None:
|
||||
return self._prereleases
|
||||
|
||||
# Look at all of our specifiers and determine if they are inclusive
|
||||
# operators, and if they are if they are including an explicit
|
||||
# prerelease.
|
||||
operator, version = self._spec
|
||||
if operator in ["==", ">=", "<=", "~=", "==="]:
|
||||
# The == specifier can include a trailing .*, if it does we
|
||||
# want to remove before parsing.
|
||||
if operator == "==" and version.endswith(".*"):
|
||||
version = version[:-2]
|
||||
|
||||
# Parse the version, and if it is a pre-release than this
|
||||
# specifier allows pre-releases.
|
||||
if parse(version).is_prerelease:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@prereleases.setter
|
||||
def prereleases(self, value: bool) -> None:
|
||||
self._prereleases = value
|
||||
|
||||
|
||||
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
|
||||
|
||||
|
||||
def _version_split(version: str) -> List[str]:
|
||||
result: List[str] = []
|
||||
for item in version.split("."):
|
||||
match = _prefix_regex.search(item)
|
||||
if match:
|
||||
result.extend(match.groups())
|
||||
else:
|
||||
result.append(item)
|
||||
return result
|
||||
|
||||
|
||||
def _is_not_suffix(segment: str) -> bool:
|
||||
return not any(
|
||||
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
|
||||
)
|
||||
|
||||
|
||||
def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
|
||||
left_split, right_split = [], []
|
||||
|
||||
# Get the release segment of our versions
|
||||
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
|
||||
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
|
||||
|
||||
# Get the rest of our versions
|
||||
left_split.append(left[len(left_split[0]) :])
|
||||
right_split.append(right[len(right_split[0]) :])
|
||||
|
||||
# Insert our padding
|
||||
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
|
||||
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
|
||||
|
||||
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
|
||||
|
||||
|
||||
class SpecifierSet(BaseSpecifier):
|
||||
def __init__(
|
||||
self, specifiers: str = "", prereleases: Optional[bool] = None
|
||||
) -> None:
|
||||
|
||||
# Split on , to break each individual specifier into it's own item, and
|
||||
# strip each item to remove leading/trailing whitespace.
|
||||
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
|
||||
|
||||
# Parsed each individual specifier, attempting first to make it a
|
||||
# Specifier and falling back to a LegacySpecifier.
|
||||
parsed: Set[_IndividualSpecifier] = set()
|
||||
for specifier in split_specifiers:
|
||||
try:
|
||||
parsed.add(Specifier(specifier))
|
||||
except InvalidSpecifier:
|
||||
parsed.add(LegacySpecifier(specifier))
|
||||
|
||||
# Turn our parsed specifiers into a frozen set and save them for later.
|
||||
self._specs = frozenset(parsed)
|
||||
|
||||
# Store our prereleases value so we can use it later to determine if
|
||||
# we accept prereleases or not.
|
||||
self._prereleases = prereleases
|
||||
|
||||
def __repr__(self) -> str:
|
||||
pre = (
|
||||
f", prereleases={self.prereleases!r}"
|
||||
if self._prereleases is not None
|
||||
else ""
|
||||
)
|
||||
|
||||
return "<SpecifierSet({!r}{})>".format(str(self), pre)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return ",".join(sorted(str(s) for s in self._specs))
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(self._specs)
|
||||
|
||||
def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
|
||||
if isinstance(other, str):
|
||||
other = SpecifierSet(other)
|
||||
elif not isinstance(other, SpecifierSet):
|
||||
return NotImplemented
|
||||
|
||||
specifier = SpecifierSet()
|
||||
specifier._specs = frozenset(self._specs | other._specs)
|
||||
|
||||
if self._prereleases is None and other._prereleases is not None:
|
||||
specifier._prereleases = other._prereleases
|
||||
elif self._prereleases is not None and other._prereleases is None:
|
||||
specifier._prereleases = self._prereleases
|
||||
elif self._prereleases == other._prereleases:
|
||||
specifier._prereleases = self._prereleases
|
||||
else:
|
||||
raise ValueError(
|
||||
"Cannot combine SpecifierSets with True and False prerelease "
|
||||
"overrides."
|
||||
)
|
||||
|
||||
return specifier
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if isinstance(other, (str, _IndividualSpecifier)):
|
||||
other = SpecifierSet(str(other))
|
||||
elif not isinstance(other, SpecifierSet):
|
||||
return NotImplemented
|
||||
|
||||
return self._specs == other._specs
|
||||
|
||||
def __ne__(self, other: object) -> bool:
|
||||
if isinstance(other, (str, _IndividualSpecifier)):
|
||||
other = SpecifierSet(str(other))
|
||||
elif not isinstance(other, SpecifierSet):
|
||||
return NotImplemented
|
||||
|
||||
return self._specs != other._specs
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._specs)
|
||||
|
||||
def __iter__(self) -> Iterator[_IndividualSpecifier]:
|
||||
return iter(self._specs)
|
||||
|
||||
@property
|
||||
def prereleases(self) -> Optional[bool]:
|
||||
|
||||
# If we have been given an explicit prerelease modifier, then we'll
|
||||
# pass that through here.
|
||||
if self._prereleases is not None:
|
||||
return self._prereleases
|
||||
|
||||
# If we don't have any specifiers, and we don't have a forced value,
|
||||
# then we'll just return None since we don't know if this should have
|
||||
# pre-releases or not.
|
||||
if not self._specs:
|
||||
return None
|
||||
|
||||
# Otherwise we'll see if any of the given specifiers accept
|
||||
# prereleases, if any of them do we'll return True, otherwise False.
|
||||
return any(s.prereleases for s in self._specs)
|
||||
|
||||
@prereleases.setter
|
||||
def prereleases(self, value: bool) -> None:
|
||||
self._prereleases = value
|
||||
|
||||
def __contains__(self, item: UnparsedVersion) -> bool:
|
||||
return self.contains(item)
|
||||
|
||||
def contains(
|
||||
self, item: UnparsedVersion, prereleases: Optional[bool] = None
|
||||
) -> bool:
|
||||
|
||||
# Ensure that our item is a Version or LegacyVersion instance.
|
||||
if not isinstance(item, (LegacyVersion, Version)):
|
||||
item = parse(item)
|
||||
|
||||
# Determine if we're forcing a prerelease or not, if we're not forcing
|
||||
# one for this particular filter call, then we'll use whatever the
|
||||
# SpecifierSet thinks for whether or not we should support prereleases.
|
||||
if prereleases is None:
|
||||
prereleases = self.prereleases
|
||||
|
||||
# We can determine if we're going to allow pre-releases by looking to
|
||||
# see if any of the underlying items supports them. If none of them do
|
||||
# and this item is a pre-release then we do not allow it and we can
|
||||
# short circuit that here.
|
||||
# Note: This means that 1.0.dev1 would not be contained in something
|
||||
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
|
||||
if not prereleases and item.is_prerelease:
|
||||
return False
|
||||
|
||||
# We simply dispatch to the underlying specs here to make sure that the
|
||||
# given version is contained within all of them.
|
||||
# Note: This use of all() here means that an empty set of specifiers
|
||||
# will always return True, this is an explicit design decision.
|
||||
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
|
||||
|
||||
def filter(
|
||||
self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
|
||||
) -> Iterable[VersionTypeVar]:
|
||||
|
||||
# Determine if we're forcing a prerelease or not, if we're not forcing
|
||||
# one for this particular filter call, then we'll use whatever the
|
||||
# SpecifierSet thinks for whether or not we should support prereleases.
|
||||
if prereleases is None:
|
||||
prereleases = self.prereleases
|
||||
|
||||
# If we have any specifiers, then we want to wrap our iterable in the
|
||||
# filter method for each one, this will act as a logical AND amongst
|
||||
# each specifier.
|
||||
if self._specs:
|
||||
for spec in self._specs:
|
||||
iterable = spec.filter(iterable, prereleases=bool(prereleases))
|
||||
return iterable
|
||||
# If we do not have any specifiers, then we need to have a rough filter
|
||||
# which will filter out any pre-releases, unless there are no final
|
||||
# releases, and which will filter out LegacyVersion in general.
|
||||
else:
|
||||
filtered: List[VersionTypeVar] = []
|
||||
found_prereleases: List[VersionTypeVar] = []
|
||||
|
||||
item: UnparsedVersion
|
||||
parsed_version: Union[Version, LegacyVersion]
|
||||
|
||||
for item in iterable:
|
||||
# Ensure that we some kind of Version class for this item.
|
||||
if not isinstance(item, (LegacyVersion, Version)):
|
||||
parsed_version = parse(item)
|
||||
else:
|
||||
parsed_version = item
|
||||
|
||||
# Filter out any item which is parsed as a LegacyVersion
|
||||
if isinstance(parsed_version, LegacyVersion):
|
||||
continue
|
||||
|
||||
# Store any item which is a pre-release for later unless we've
|
||||
# already found a final version or we are accepting prereleases
|
||||
if parsed_version.is_prerelease and not prereleases:
|
||||
if not filtered:
|
||||
found_prereleases.append(item)
|
||||
else:
|
||||
filtered.append(item)
|
||||
|
||||
# If we've found no items except for pre-releases, then we'll go
|
||||
# ahead and use the pre-releases
|
||||
if not filtered and found_prereleases and prereleases is None:
|
||||
return found_prereleases
|
||||
|
||||
return filtered
|
||||
@@ -0,0 +1,484 @@
|
||||
# This file is dual licensed under the terms of the Apache License, Version
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
|
||||
import logging
|
||||
import platform
|
||||
import sys
|
||||
import sysconfig
|
||||
from importlib.machinery import EXTENSION_SUFFIXES
|
||||
from typing import (
|
||||
Dict,
|
||||
FrozenSet,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from . import _manylinux, _musllinux
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
PythonVersion = Sequence[int]
|
||||
MacVersion = Tuple[int, int]
|
||||
|
||||
INTERPRETER_SHORT_NAMES: Dict[str, str] = {
|
||||
"python": "py", # Generic.
|
||||
"cpython": "cp",
|
||||
"pypy": "pp",
|
||||
"ironpython": "ip",
|
||||
"jython": "jy",
|
||||
}
|
||||
|
||||
|
||||
_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
|
||||
|
||||
|
||||
class Tag:
|
||||
"""
|
||||
A representation of the tag triple for a wheel.
|
||||
|
||||
Instances are considered immutable and thus are hashable. Equality checking
|
||||
is also supported.
|
||||
"""
|
||||
|
||||
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
|
||||
|
||||
def __init__(self, interpreter: str, abi: str, platform: str) -> None:
|
||||
self._interpreter = interpreter.lower()
|
||||
self._abi = abi.lower()
|
||||
self._platform = platform.lower()
|
||||
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
|
||||
# that a set calls its `.disjoint()` method, which may be called hundreds of
|
||||
# times when scanning a page of links for packages with tags matching that
|
||||
# Set[Tag]. Pre-computing the value here produces significant speedups for
|
||||
# downstream consumers.
|
||||
self._hash = hash((self._interpreter, self._abi, self._platform))
|
||||
|
||||
@property
|
||||
def interpreter(self) -> str:
|
||||
return self._interpreter
|
||||
|
||||
@property
|
||||
def abi(self) -> str:
|
||||
return self._abi
|
||||
|
||||
@property
|
||||
def platform(self) -> str:
|
||||
return self._platform
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, Tag):
|
||||
return NotImplemented
|
||||
|
||||
return (
|
||||
(self._hash == other._hash) # Short-circuit ASAP for perf reasons.
|
||||
and (self._platform == other._platform)
|
||||
and (self._abi == other._abi)
|
||||
and (self._interpreter == other._interpreter)
|
||||
)
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return self._hash
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"{self._interpreter}-{self._abi}-{self._platform}"
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
|
||||
|
||||
|
||||
def parse_tag(tag: str) -> FrozenSet[Tag]:
|
||||
"""
|
||||
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
|
||||
|
||||
Returning a set is required due to the possibility that the tag is a
|
||||
compressed tag set.
|
||||
"""
|
||||
tags = set()
|
||||
interpreters, abis, platforms = tag.split("-")
|
||||
for interpreter in interpreters.split("."):
|
||||
for abi in abis.split("."):
|
||||
for platform_ in platforms.split("."):
|
||||
tags.add(Tag(interpreter, abi, platform_))
|
||||
return frozenset(tags)
|
||||
|
||||
|
||||
def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
|
||||
value = sysconfig.get_config_var(name)
|
||||
if value is None and warn:
|
||||
logger.debug(
|
||||
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
|
||||
)
|
||||
return value
|
||||
|
||||
|
||||
def _normalize_string(string: str) -> str:
|
||||
return string.replace(".", "_").replace("-", "_")
|
||||
|
||||
|
||||
def _abi3_applies(python_version: PythonVersion) -> bool:
|
||||
"""
|
||||
Determine if the Python version supports abi3.
|
||||
|
||||
PEP 384 was first implemented in Python 3.2.
|
||||
"""
|
||||
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
|
||||
|
||||
|
||||
def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
|
||||
py_version = tuple(py_version) # To allow for version comparison.
|
||||
abis = []
|
||||
version = _version_nodot(py_version[:2])
|
||||
debug = pymalloc = ucs4 = ""
|
||||
with_debug = _get_config_var("Py_DEBUG", warn)
|
||||
has_refcount = hasattr(sys, "gettotalrefcount")
|
||||
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
|
||||
# extension modules is the best option.
|
||||
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
|
||||
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
|
||||
if with_debug or (with_debug is None and (has_refcount or has_ext)):
|
||||
debug = "d"
|
||||
if py_version < (3, 8):
|
||||
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
|
||||
if with_pymalloc or with_pymalloc is None:
|
||||
pymalloc = "m"
|
||||
if py_version < (3, 3):
|
||||
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
|
||||
if unicode_size == 4 or (
|
||||
unicode_size is None and sys.maxunicode == 0x10FFFF
|
||||
):
|
||||
ucs4 = "u"
|
||||
elif debug:
|
||||
# Debug builds can also load "normal" extension modules.
|
||||
# We can also assume no UCS-4 or pymalloc requirement.
|
||||
abis.append(f"cp{version}")
|
||||
abis.insert(
|
||||
0,
|
||||
"cp{version}{debug}{pymalloc}{ucs4}".format(
|
||||
version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
|
||||
),
|
||||
)
|
||||
return abis
|
||||
|
||||
|
||||
def cpython_tags(
|
||||
python_version: Optional[PythonVersion] = None,
|
||||
abis: Optional[Iterable[str]] = None,
|
||||
platforms: Optional[Iterable[str]] = None,
|
||||
*,
|
||||
warn: bool = False,
|
||||
) -> Iterator[Tag]:
|
||||
"""
|
||||
Yields the tags for a CPython interpreter.
|
||||
|
||||
The tags consist of:
|
||||
- cp<python_version>-<abi>-<platform>
|
||||
- cp<python_version>-abi3-<platform>
|
||||
- cp<python_version>-none-<platform>
|
||||
- cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
|
||||
|
||||
If python_version only specifies a major version then user-provided ABIs and
|
||||
the 'none' ABItag will be used.
|
||||
|
||||
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
|
||||
their normal position and not at the beginning.
|
||||
"""
|
||||
if not python_version:
|
||||
python_version = sys.version_info[:2]
|
||||
|
||||
interpreter = "cp{}".format(_version_nodot(python_version[:2]))
|
||||
|
||||
if abis is None:
|
||||
if len(python_version) > 1:
|
||||
abis = _cpython_abis(python_version, warn)
|
||||
else:
|
||||
abis = []
|
||||
abis = list(abis)
|
||||
# 'abi3' and 'none' are explicitly handled later.
|
||||
for explicit_abi in ("abi3", "none"):
|
||||
try:
|
||||
abis.remove(explicit_abi)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
platforms = list(platforms or _platform_tags())
|
||||
for abi in abis:
|
||||
for platform_ in platforms:
|
||||
yield Tag(interpreter, abi, platform_)
|
||||
if _abi3_applies(python_version):
|
||||
yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
|
||||
yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
|
||||
|
||||
if _abi3_applies(python_version):
|
||||
for minor_version in range(python_version[1] - 1, 1, -1):
|
||||
for platform_ in platforms:
|
||||
interpreter = "cp{version}".format(
|
||||
version=_version_nodot((python_version[0], minor_version))
|
||||
)
|
||||
yield Tag(interpreter, "abi3", platform_)
|
||||
|
||||
|
||||
def _generic_abi() -> Iterator[str]:
|
||||
abi = sysconfig.get_config_var("SOABI")
|
||||
if abi:
|
||||
yield _normalize_string(abi)
|
||||
|
||||
|
||||
def generic_tags(
|
||||
interpreter: Optional[str] = None,
|
||||
abis: Optional[Iterable[str]] = None,
|
||||
platforms: Optional[Iterable[str]] = None,
|
||||
*,
|
||||
warn: bool = False,
|
||||
) -> Iterator[Tag]:
|
||||
"""
|
||||
Yields the tags for a generic interpreter.
|
||||
|
||||
The tags consist of:
|
||||
- <interpreter>-<abi>-<platform>
|
||||
|
||||
The "none" ABI will be added if it was not explicitly provided.
|
||||
"""
|
||||
if not interpreter:
|
||||
interp_name = interpreter_name()
|
||||
interp_version = interpreter_version(warn=warn)
|
||||
interpreter = "".join([interp_name, interp_version])
|
||||
if abis is None:
|
||||
abis = _generic_abi()
|
||||
platforms = list(platforms or _platform_tags())
|
||||
abis = list(abis)
|
||||
if "none" not in abis:
|
||||
abis.append("none")
|
||||
for abi in abis:
|
||||
for platform_ in platforms:
|
||||
yield Tag(interpreter, abi, platform_)
|
||||
|
||||
|
||||
def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
|
||||
"""
|
||||
Yields Python versions in descending order.
|
||||
|
||||
After the latest version, the major-only version will be yielded, and then
|
||||
all previous versions of that major version.
|
||||
"""
|
||||
if len(py_version) > 1:
|
||||
yield "py{version}".format(version=_version_nodot(py_version[:2]))
|
||||
yield "py{major}".format(major=py_version[0])
|
||||
if len(py_version) > 1:
|
||||
for minor in range(py_version[1] - 1, -1, -1):
|
||||
yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
|
||||
|
||||
|
||||
def compatible_tags(
|
||||
python_version: Optional[PythonVersion] = None,
|
||||
interpreter: Optional[str] = None,
|
||||
platforms: Optional[Iterable[str]] = None,
|
||||
) -> Iterator[Tag]:
|
||||
"""
|
||||
Yields the sequence of tags that are compatible with a specific version of Python.
|
||||
|
||||
The tags consist of:
|
||||
- py*-none-<platform>
|
||||
- <interpreter>-none-any # ... if `interpreter` is provided.
|
||||
- py*-none-any
|
||||
"""
|
||||
if not python_version:
|
||||
python_version = sys.version_info[:2]
|
||||
platforms = list(platforms or _platform_tags())
|
||||
for version in _py_interpreter_range(python_version):
|
||||
for platform_ in platforms:
|
||||
yield Tag(version, "none", platform_)
|
||||
if interpreter:
|
||||
yield Tag(interpreter, "none", "any")
|
||||
for version in _py_interpreter_range(python_version):
|
||||
yield Tag(version, "none", "any")
|
||||
|
||||
|
||||
def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
|
||||
if not is_32bit:
|
||||
return arch
|
||||
|
||||
if arch.startswith("ppc"):
|
||||
return "ppc"
|
||||
|
||||
return "i386"
|
||||
|
||||
|
||||
def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
|
||||
formats = [cpu_arch]
|
||||
if cpu_arch == "x86_64":
|
||||
if version < (10, 4):
|
||||
return []
|
||||
formats.extend(["intel", "fat64", "fat32"])
|
||||
|
||||
elif cpu_arch == "i386":
|
||||
if version < (10, 4):
|
||||
return []
|
||||
formats.extend(["intel", "fat32", "fat"])
|
||||
|
||||
elif cpu_arch == "ppc64":
|
||||
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
|
||||
if version > (10, 5) or version < (10, 4):
|
||||
return []
|
||||
formats.append("fat64")
|
||||
|
||||
elif cpu_arch == "ppc":
|
||||
if version > (10, 6):
|
||||
return []
|
||||
formats.extend(["fat32", "fat"])
|
||||
|
||||
if cpu_arch in {"arm64", "x86_64"}:
|
||||
formats.append("universal2")
|
||||
|
||||
if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
|
||||
formats.append("universal")
|
||||
|
||||
return formats
|
||||
|
||||
|
||||
def mac_platforms(
|
||||
version: Optional[MacVersion] = None, arch: Optional[str] = None
|
||||
) -> Iterator[str]:
|
||||
"""
|
||||
Yields the platform tags for a macOS system.
|
||||
|
||||
The `version` parameter is a two-item tuple specifying the macOS version to
|
||||
generate platform tags for. The `arch` parameter is the CPU architecture to
|
||||
generate platform tags for. Both parameters default to the appropriate value
|
||||
for the current system.
|
||||
"""
|
||||
version_str, _, cpu_arch = platform.mac_ver()
|
||||
if version is None:
|
||||
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
|
||||
else:
|
||||
version = version
|
||||
if arch is None:
|
||||
arch = _mac_arch(cpu_arch)
|
||||
else:
|
||||
arch = arch
|
||||
|
||||
if (10, 0) <= version and version < (11, 0):
|
||||
# Prior to Mac OS 11, each yearly release of Mac OS bumped the
|
||||
# "minor" version number. The major version was always 10.
|
||||
for minor_version in range(version[1], -1, -1):
|
||||
compat_version = 10, minor_version
|
||||
binary_formats = _mac_binary_formats(compat_version, arch)
|
||||
for binary_format in binary_formats:
|
||||
yield "macosx_{major}_{minor}_{binary_format}".format(
|
||||
major=10, minor=minor_version, binary_format=binary_format
|
||||
)
|
||||
|
||||
if version >= (11, 0):
|
||||
# Starting with Mac OS 11, each yearly release bumps the major version
|
||||
# number. The minor versions are now the midyear updates.
|
||||
for major_version in range(version[0], 10, -1):
|
||||
compat_version = major_version, 0
|
||||
binary_formats = _mac_binary_formats(compat_version, arch)
|
||||
for binary_format in binary_formats:
|
||||
yield "macosx_{major}_{minor}_{binary_format}".format(
|
||||
major=major_version, minor=0, binary_format=binary_format
|
||||
)
|
||||
|
||||
if version >= (11, 0):
|
||||
# Mac OS 11 on x86_64 is compatible with binaries from previous releases.
|
||||
# Arm64 support was introduced in 11.0, so no Arm binaries from previous
|
||||
# releases exist.
|
||||
#
|
||||
# However, the "universal2" binary format can have a
|
||||
# macOS version earlier than 11.0 when the x86_64 part of the binary supports
|
||||
# that version of macOS.
|
||||
if arch == "x86_64":
|
||||
for minor_version in range(16, 3, -1):
|
||||
compat_version = 10, minor_version
|
||||
binary_formats = _mac_binary_formats(compat_version, arch)
|
||||
for binary_format in binary_formats:
|
||||
yield "macosx_{major}_{minor}_{binary_format}".format(
|
||||
major=compat_version[0],
|
||||
minor=compat_version[1],
|
||||
binary_format=binary_format,
|
||||
)
|
||||
else:
|
||||
for minor_version in range(16, 3, -1):
|
||||
compat_version = 10, minor_version
|
||||
binary_format = "universal2"
|
||||
yield "macosx_{major}_{minor}_{binary_format}".format(
|
||||
major=compat_version[0],
|
||||
minor=compat_version[1],
|
||||
binary_format=binary_format,
|
||||
)
|
||||
|
||||
|
||||
def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
|
||||
linux = _normalize_string(sysconfig.get_platform())
|
||||
if is_32bit:
|
||||
if linux == "linux_x86_64":
|
||||
linux = "linux_i686"
|
||||
elif linux == "linux_aarch64":
|
||||
linux = "linux_armv7l"
|
||||
_, arch = linux.split("_", 1)
|
||||
yield from _manylinux.platform_tags(linux, arch)
|
||||
yield from _musllinux.platform_tags(arch)
|
||||
yield linux
|
||||
|
||||
|
||||
def _generic_platforms() -> Iterator[str]:
|
||||
yield _normalize_string(sysconfig.get_platform())
|
||||
|
||||
|
||||
def _platform_tags() -> Iterator[str]:
|
||||
"""
|
||||
Provides the platform tags for this installation.
|
||||
"""
|
||||
if platform.system() == "Darwin":
|
||||
return mac_platforms()
|
||||
elif platform.system() == "Linux":
|
||||
return _linux_platforms()
|
||||
else:
|
||||
return _generic_platforms()
|
||||
|
||||
|
||||
def interpreter_name() -> str:
|
||||
"""
|
||||
Returns the name of the running interpreter.
|
||||
"""
|
||||
name = sys.implementation.name
|
||||
return INTERPRETER_SHORT_NAMES.get(name) or name
|
||||
|
||||
|
||||
def interpreter_version(*, warn: bool = False) -> str:
|
||||
"""
|
||||
Returns the version of the running interpreter.
|
||||
"""
|
||||
version = _get_config_var("py_version_nodot", warn=warn)
|
||||
if version:
|
||||
version = str(version)
|
||||
else:
|
||||
version = _version_nodot(sys.version_info[:2])
|
||||
return version
|
||||
|
||||
|
||||
def _version_nodot(version: PythonVersion) -> str:
|
||||
return "".join(map(str, version))
|
||||
|
||||
|
||||
def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
|
||||
"""
|
||||
Returns the sequence of tag triples for the running interpreter.
|
||||
|
||||
The order of the sequence corresponds to priority order for the
|
||||
interpreter, from most to least important.
|
||||
"""
|
||||
|
||||
interp_name = interpreter_name()
|
||||
if interp_name == "cp":
|
||||
yield from cpython_tags(warn=warn)
|
||||
else:
|
||||
yield from generic_tags()
|
||||
|
||||
yield from compatible_tags()
|
||||
@@ -0,0 +1,136 @@
|
||||
# This file is dual licensed under the terms of the Apache License, Version
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
|
||||
import re
|
||||
from typing import FrozenSet, NewType, Tuple, Union, cast
|
||||
|
||||
from .tags import Tag, parse_tag
|
||||
from .version import InvalidVersion, Version
|
||||
|
||||
BuildTag = Union[Tuple[()], Tuple[int, str]]
|
||||
NormalizedName = NewType("NormalizedName", str)
|
||||
|
||||
|
||||
class InvalidWheelFilename(ValueError):
|
||||
"""
|
||||
An invalid wheel filename was found, users should refer to PEP 427.
|
||||
"""
|
||||
|
||||
|
||||
class InvalidSdistFilename(ValueError):
|
||||
"""
|
||||
An invalid sdist filename was found, users should refer to the packaging user guide.
|
||||
"""
|
||||
|
||||
|
||||
_canonicalize_regex = re.compile(r"[-_.]+")
|
||||
# PEP 427: The build number must start with a digit.
|
||||
_build_tag_regex = re.compile(r"(\d+)(.*)")
|
||||
|
||||
|
||||
def canonicalize_name(name: str) -> NormalizedName:
|
||||
# This is taken from PEP 503.
|
||||
value = _canonicalize_regex.sub("-", name).lower()
|
||||
return cast(NormalizedName, value)
|
||||
|
||||
|
||||
def canonicalize_version(version: Union[Version, str]) -> str:
|
||||
"""
|
||||
This is very similar to Version.__str__, but has one subtle difference
|
||||
with the way it handles the release segment.
|
||||
"""
|
||||
if isinstance(version, str):
|
||||
try:
|
||||
parsed = Version(version)
|
||||
except InvalidVersion:
|
||||
# Legacy versions cannot be normalized
|
||||
return version
|
||||
else:
|
||||
parsed = version
|
||||
|
||||
parts = []
|
||||
|
||||
# Epoch
|
||||
if parsed.epoch != 0:
|
||||
parts.append(f"{parsed.epoch}!")
|
||||
|
||||
# Release segment
|
||||
# NB: This strips trailing '.0's to normalize
|
||||
parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
|
||||
|
||||
# Pre-release
|
||||
if parsed.pre is not None:
|
||||
parts.append("".join(str(x) for x in parsed.pre))
|
||||
|
||||
# Post-release
|
||||
if parsed.post is not None:
|
||||
parts.append(f".post{parsed.post}")
|
||||
|
||||
# Development release
|
||||
if parsed.dev is not None:
|
||||
parts.append(f".dev{parsed.dev}")
|
||||
|
||||
# Local version segment
|
||||
if parsed.local is not None:
|
||||
parts.append(f"+{parsed.local}")
|
||||
|
||||
return "".join(parts)
|
||||
|
||||
|
||||
def parse_wheel_filename(
|
||||
filename: str,
|
||||
) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
|
||||
if not filename.endswith(".whl"):
|
||||
raise InvalidWheelFilename(
|
||||
f"Invalid wheel filename (extension must be '.whl'): {filename}"
|
||||
)
|
||||
|
||||
filename = filename[:-4]
|
||||
dashes = filename.count("-")
|
||||
if dashes not in (4, 5):
|
||||
raise InvalidWheelFilename(
|
||||
f"Invalid wheel filename (wrong number of parts): {filename}"
|
||||
)
|
||||
|
||||
parts = filename.split("-", dashes - 2)
|
||||
name_part = parts[0]
|
||||
# See PEP 427 for the rules on escaping the project name
|
||||
if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
|
||||
raise InvalidWheelFilename(f"Invalid project name: {filename}")
|
||||
name = canonicalize_name(name_part)
|
||||
version = Version(parts[1])
|
||||
if dashes == 5:
|
||||
build_part = parts[2]
|
||||
build_match = _build_tag_regex.match(build_part)
|
||||
if build_match is None:
|
||||
raise InvalidWheelFilename(
|
||||
f"Invalid build number: {build_part} in '{filename}'"
|
||||
)
|
||||
build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
|
||||
else:
|
||||
build = ()
|
||||
tags = parse_tag(parts[-1])
|
||||
return (name, version, build, tags)
|
||||
|
||||
|
||||
def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
|
||||
if filename.endswith(".tar.gz"):
|
||||
file_stem = filename[: -len(".tar.gz")]
|
||||
elif filename.endswith(".zip"):
|
||||
file_stem = filename[: -len(".zip")]
|
||||
else:
|
||||
raise InvalidSdistFilename(
|
||||
f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
|
||||
f" {filename}"
|
||||
)
|
||||
|
||||
# We are requiring a PEP 440 version, which cannot contain dashes,
|
||||
# so we split on the last dash.
|
||||
name_part, sep, version_part = file_stem.rpartition("-")
|
||||
if not sep:
|
||||
raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
|
||||
|
||||
name = canonicalize_name(name_part)
|
||||
version = Version(version_part)
|
||||
return (name, version)
|
||||
@@ -0,0 +1,504 @@
|
||||
# This file is dual licensed under the terms of the Apache License, Version
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
|
||||
import collections
|
||||
import itertools
|
||||
import re
|
||||
import warnings
|
||||
from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
|
||||
|
||||
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
|
||||
|
||||
__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
|
||||
|
||||
InfiniteTypes = Union[InfinityType, NegativeInfinityType]
|
||||
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
|
||||
SubLocalType = Union[InfiniteTypes, int, str]
|
||||
LocalType = Union[
|
||||
NegativeInfinityType,
|
||||
Tuple[
|
||||
Union[
|
||||
SubLocalType,
|
||||
Tuple[SubLocalType, str],
|
||||
Tuple[NegativeInfinityType, SubLocalType],
|
||||
],
|
||||
...,
|
||||
],
|
||||
]
|
||||
CmpKey = Tuple[
|
||||
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
|
||||
]
|
||||
LegacyCmpKey = Tuple[int, Tuple[str, ...]]
|
||||
VersionComparisonMethod = Callable[
|
||||
[Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
|
||||
]
|
||||
|
||||
_Version = collections.namedtuple(
|
||||
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
|
||||
)
|
||||
|
||||
|
||||
def parse(version: str) -> Union["LegacyVersion", "Version"]:
|
||||
"""
|
||||
Parse the given version string and return either a :class:`Version` object
|
||||
or a :class:`LegacyVersion` object depending on if the given version is
|
||||
a valid PEP 440 version or a legacy version.
|
||||
"""
|
||||
try:
|
||||
return Version(version)
|
||||
except InvalidVersion:
|
||||
return LegacyVersion(version)
|
||||
|
||||
|
||||
class InvalidVersion(ValueError):
|
||||
"""
|
||||
An invalid version was found, users should refer to PEP 440.
|
||||
"""
|
||||
|
||||
|
||||
class _BaseVersion:
|
||||
_key: Union[CmpKey, LegacyCmpKey]
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(self._key)
|
||||
|
||||
# Please keep the duplicated `isinstance` check
|
||||
# in the six comparisons hereunder
|
||||
# unless you find a way to avoid adding overhead function calls.
|
||||
def __lt__(self, other: "_BaseVersion") -> bool:
|
||||
if not isinstance(other, _BaseVersion):
|
||||
return NotImplemented
|
||||
|
||||
return self._key < other._key
|
||||
|
||||
def __le__(self, other: "_BaseVersion") -> bool:
|
||||
if not isinstance(other, _BaseVersion):
|
||||
return NotImplemented
|
||||
|
||||
return self._key <= other._key
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, _BaseVersion):
|
||||
return NotImplemented
|
||||
|
||||
return self._key == other._key
|
||||
|
||||
def __ge__(self, other: "_BaseVersion") -> bool:
|
||||
if not isinstance(other, _BaseVersion):
|
||||
return NotImplemented
|
||||
|
||||
return self._key >= other._key
|
||||
|
||||
def __gt__(self, other: "_BaseVersion") -> bool:
|
||||
if not isinstance(other, _BaseVersion):
|
||||
return NotImplemented
|
||||
|
||||
return self._key > other._key
|
||||
|
||||
def __ne__(self, other: object) -> bool:
|
||||
if not isinstance(other, _BaseVersion):
|
||||
return NotImplemented
|
||||
|
||||
return self._key != other._key
|
||||
|
||||
|
||||
class LegacyVersion(_BaseVersion):
|
||||
def __init__(self, version: str) -> None:
|
||||
self._version = str(version)
|
||||
self._key = _legacy_cmpkey(self._version)
|
||||
|
||||
warnings.warn(
|
||||
"Creating a LegacyVersion has been deprecated and will be "
|
||||
"removed in the next major release",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self._version
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<LegacyVersion('{self}')>"
|
||||
|
||||
@property
|
||||
def public(self) -> str:
|
||||
return self._version
|
||||
|
||||
@property
|
||||
def base_version(self) -> str:
|
||||
return self._version
|
||||
|
||||
@property
|
||||
def epoch(self) -> int:
|
||||
return -1
|
||||
|
||||
@property
|
||||
def release(self) -> None:
|
||||
return None
|
||||
|
||||
@property
|
||||
def pre(self) -> None:
|
||||
return None
|
||||
|
||||
@property
|
||||
def post(self) -> None:
|
||||
return None
|
||||
|
||||
@property
|
||||
def dev(self) -> None:
|
||||
return None
|
||||
|
||||
@property
|
||||
def local(self) -> None:
|
||||
return None
|
||||
|
||||
@property
|
||||
def is_prerelease(self) -> bool:
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_postrelease(self) -> bool:
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_devrelease(self) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
|
||||
|
||||
_legacy_version_replacement_map = {
|
||||
"pre": "c",
|
||||
"preview": "c",
|
||||
"-": "final-",
|
||||
"rc": "c",
|
||||
"dev": "@",
|
||||
}
|
||||
|
||||
|
||||
def _parse_version_parts(s: str) -> Iterator[str]:
|
||||
for part in _legacy_version_component_re.split(s):
|
||||
part = _legacy_version_replacement_map.get(part, part)
|
||||
|
||||
if not part or part == ".":
|
||||
continue
|
||||
|
||||
if part[:1] in "0123456789":
|
||||
# pad for numeric comparison
|
||||
yield part.zfill(8)
|
||||
else:
|
||||
yield "*" + part
|
||||
|
||||
# ensure that alpha/beta/candidate are before final
|
||||
yield "*final"
|
||||
|
||||
|
||||
def _legacy_cmpkey(version: str) -> LegacyCmpKey:
|
||||
|
||||
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
|
||||
# greater than or equal to 0. This will effectively put the LegacyVersion,
|
||||
# which uses the defacto standard originally implemented by setuptools,
|
||||
# as before all PEP 440 versions.
|
||||
epoch = -1
|
||||
|
||||
# This scheme is taken from pkg_resources.parse_version setuptools prior to
|
||||
# it's adoption of the packaging library.
|
||||
parts: List[str] = []
|
||||
for part in _parse_version_parts(version.lower()):
|
||||
if part.startswith("*"):
|
||||
# remove "-" before a prerelease tag
|
||||
if part < "*final":
|
||||
while parts and parts[-1] == "*final-":
|
||||
parts.pop()
|
||||
|
||||
# remove trailing zeros from each series of numeric parts
|
||||
while parts and parts[-1] == "00000000":
|
||||
parts.pop()
|
||||
|
||||
parts.append(part)
|
||||
|
||||
return epoch, tuple(parts)
|
||||
|
||||
|
||||
# Deliberately not anchored to the start and end of the string, to make it
|
||||
# easier for 3rd party code to reuse
|
||||
VERSION_PATTERN = r"""
|
||||
v?
|
||||
(?:
|
||||
(?:(?P<epoch>[0-9]+)!)? # epoch
|
||||
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
|
||||
(?P<pre> # pre-release
|
||||
[-_\.]?
|
||||
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
|
||||
[-_\.]?
|
||||
(?P<pre_n>[0-9]+)?
|
||||
)?
|
||||
(?P<post> # post release
|
||||
(?:-(?P<post_n1>[0-9]+))
|
||||
|
|
||||
(?:
|
||||
[-_\.]?
|
||||
(?P<post_l>post|rev|r)
|
||||
[-_\.]?
|
||||
(?P<post_n2>[0-9]+)?
|
||||
)
|
||||
)?
|
||||
(?P<dev> # dev release
|
||||
[-_\.]?
|
||||
(?P<dev_l>dev)
|
||||
[-_\.]?
|
||||
(?P<dev_n>[0-9]+)?
|
||||
)?
|
||||
)
|
||||
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
|
||||
"""
|
||||
|
||||
|
||||
class Version(_BaseVersion):
|
||||
|
||||
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
||||
|
||||
def __init__(self, version: str) -> None:
|
||||
|
||||
# Validate the version and parse it into pieces
|
||||
match = self._regex.search(version)
|
||||
if not match:
|
||||
raise InvalidVersion(f"Invalid version: '{version}'")
|
||||
|
||||
# Store the parsed out pieces of the version
|
||||
self._version = _Version(
|
||||
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
|
||||
release=tuple(int(i) for i in match.group("release").split(".")),
|
||||
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
|
||||
post=_parse_letter_version(
|
||||
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
|
||||
),
|
||||
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
|
||||
local=_parse_local_version(match.group("local")),
|
||||
)
|
||||
|
||||
# Generate a key which will be used for sorting
|
||||
self._key = _cmpkey(
|
||||
self._version.epoch,
|
||||
self._version.release,
|
||||
self._version.pre,
|
||||
self._version.post,
|
||||
self._version.dev,
|
||||
self._version.local,
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<Version('{self}')>"
|
||||
|
||||
def __str__(self) -> str:
|
||||
parts = []
|
||||
|
||||
# Epoch
|
||||
if self.epoch != 0:
|
||||
parts.append(f"{self.epoch}!")
|
||||
|
||||
# Release segment
|
||||
parts.append(".".join(str(x) for x in self.release))
|
||||
|
||||
# Pre-release
|
||||
if self.pre is not None:
|
||||
parts.append("".join(str(x) for x in self.pre))
|
||||
|
||||
# Post-release
|
||||
if self.post is not None:
|
||||
parts.append(f".post{self.post}")
|
||||
|
||||
# Development release
|
||||
if self.dev is not None:
|
||||
parts.append(f".dev{self.dev}")
|
||||
|
||||
# Local version segment
|
||||
if self.local is not None:
|
||||
parts.append(f"+{self.local}")
|
||||
|
||||
return "".join(parts)
|
||||
|
||||
@property
|
||||
def epoch(self) -> int:
|
||||
_epoch: int = self._version.epoch
|
||||
return _epoch
|
||||
|
||||
@property
|
||||
def release(self) -> Tuple[int, ...]:
|
||||
_release: Tuple[int, ...] = self._version.release
|
||||
return _release
|
||||
|
||||
@property
|
||||
def pre(self) -> Optional[Tuple[str, int]]:
|
||||
_pre: Optional[Tuple[str, int]] = self._version.pre
|
||||
return _pre
|
||||
|
||||
@property
|
||||
def post(self) -> Optional[int]:
|
||||
return self._version.post[1] if self._version.post else None
|
||||
|
||||
@property
|
||||
def dev(self) -> Optional[int]:
|
||||
return self._version.dev[1] if self._version.dev else None
|
||||
|
||||
@property
|
||||
def local(self) -> Optional[str]:
|
||||
if self._version.local:
|
||||
return ".".join(str(x) for x in self._version.local)
|
||||
else:
|
||||
return None
|
||||
|
||||
@property
|
||||
def public(self) -> str:
|
||||
return str(self).split("+", 1)[0]
|
||||
|
||||
@property
|
||||
def base_version(self) -> str:
|
||||
parts = []
|
||||
|
||||
# Epoch
|
||||
if self.epoch != 0:
|
||||
parts.append(f"{self.epoch}!")
|
||||
|
||||
# Release segment
|
||||
parts.append(".".join(str(x) for x in self.release))
|
||||
|
||||
return "".join(parts)
|
||||
|
||||
@property
|
||||
def is_prerelease(self) -> bool:
|
||||
return self.dev is not None or self.pre is not None
|
||||
|
||||
@property
|
||||
def is_postrelease(self) -> bool:
|
||||
return self.post is not None
|
||||
|
||||
@property
|
||||
def is_devrelease(self) -> bool:
|
||||
return self.dev is not None
|
||||
|
||||
@property
|
||||
def major(self) -> int:
|
||||
return self.release[0] if len(self.release) >= 1 else 0
|
||||
|
||||
@property
|
||||
def minor(self) -> int:
|
||||
return self.release[1] if len(self.release) >= 2 else 0
|
||||
|
||||
@property
|
||||
def micro(self) -> int:
|
||||
return self.release[2] if len(self.release) >= 3 else 0
|
||||
|
||||
|
||||
def _parse_letter_version(
|
||||
letter: str, number: Union[str, bytes, SupportsInt]
|
||||
) -> Optional[Tuple[str, int]]:
|
||||
|
||||
if letter:
|
||||
# We consider there to be an implicit 0 in a pre-release if there is
|
||||
# not a numeral associated with it.
|
||||
if number is None:
|
||||
number = 0
|
||||
|
||||
# We normalize any letters to their lower case form
|
||||
letter = letter.lower()
|
||||
|
||||
# We consider some words to be alternate spellings of other words and
|
||||
# in those cases we want to normalize the spellings to our preferred
|
||||
# spelling.
|
||||
if letter == "alpha":
|
||||
letter = "a"
|
||||
elif letter == "beta":
|
||||
letter = "b"
|
||||
elif letter in ["c", "pre", "preview"]:
|
||||
letter = "rc"
|
||||
elif letter in ["rev", "r"]:
|
||||
letter = "post"
|
||||
|
||||
return letter, int(number)
|
||||
if not letter and number:
|
||||
# We assume if we are given a number, but we are not given a letter
|
||||
# then this is using the implicit post release syntax (e.g. 1.0-1)
|
||||
letter = "post"
|
||||
|
||||
return letter, int(number)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
_local_version_separators = re.compile(r"[\._-]")
|
||||
|
||||
|
||||
def _parse_local_version(local: str) -> Optional[LocalType]:
|
||||
"""
|
||||
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
|
||||
"""
|
||||
if local is not None:
|
||||
return tuple(
|
||||
part.lower() if not part.isdigit() else int(part)
|
||||
for part in _local_version_separators.split(local)
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def _cmpkey(
|
||||
epoch: int,
|
||||
release: Tuple[int, ...],
|
||||
pre: Optional[Tuple[str, int]],
|
||||
post: Optional[Tuple[str, int]],
|
||||
dev: Optional[Tuple[str, int]],
|
||||
local: Optional[Tuple[SubLocalType]],
|
||||
) -> CmpKey:
|
||||
|
||||
# When we compare a release version, we want to compare it with all of the
|
||||
# trailing zeros removed. So we'll use a reverse the list, drop all the now
|
||||
# leading zeros until we come to something non zero, then take the rest
|
||||
# re-reverse it back into the correct order and make it a tuple and use
|
||||
# that for our sorting key.
|
||||
_release = tuple(
|
||||
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
|
||||
)
|
||||
|
||||
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
|
||||
# We'll do this by abusing the pre segment, but we _only_ want to do this
|
||||
# if there is not a pre or a post segment. If we have one of those then
|
||||
# the normal sorting rules will handle this case correctly.
|
||||
if pre is None and post is None and dev is not None:
|
||||
_pre: PrePostDevType = NegativeInfinity
|
||||
# Versions without a pre-release (except as noted above) should sort after
|
||||
# those with one.
|
||||
elif pre is None:
|
||||
_pre = Infinity
|
||||
else:
|
||||
_pre = pre
|
||||
|
||||
# Versions without a post segment should sort before those with one.
|
||||
if post is None:
|
||||
_post: PrePostDevType = NegativeInfinity
|
||||
|
||||
else:
|
||||
_post = post
|
||||
|
||||
# Versions without a development segment should sort after those with one.
|
||||
if dev is None:
|
||||
_dev: PrePostDevType = Infinity
|
||||
|
||||
else:
|
||||
_dev = dev
|
||||
|
||||
if local is None:
|
||||
# Versions without a local segment should sort before those with one.
|
||||
_local: LocalType = NegativeInfinity
|
||||
else:
|
||||
# Versions with a local segment need that segment parsed to implement
|
||||
# the sorting rules in PEP440.
|
||||
# - Alpha numeric segments sort before numeric segments
|
||||
# - Alpha numeric segments sort lexicographically
|
||||
# - Numeric segments sort numerically
|
||||
# - Shorter versions sort before longer versions when the prefixes
|
||||
# match exactly
|
||||
_local = tuple(
|
||||
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
|
||||
)
|
||||
|
||||
return epoch, _release, _pre, _post, _dev, _local
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,11 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
__all__ = ("loads", "load", "TOMLDecodeError")
|
||||
__version__ = "2.0.1" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
|
||||
|
||||
from ._parser import TOMLDecodeError, load, loads
|
||||
|
||||
# Pretend this exception was created here.
|
||||
TOMLDecodeError.__module__ = __name__
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,691 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterable
|
||||
import string
|
||||
from types import MappingProxyType
|
||||
from typing import Any, BinaryIO, NamedTuple
|
||||
|
||||
from ._re import (
|
||||
RE_DATETIME,
|
||||
RE_LOCALTIME,
|
||||
RE_NUMBER,
|
||||
match_to_datetime,
|
||||
match_to_localtime,
|
||||
match_to_number,
|
||||
)
|
||||
from ._types import Key, ParseFloat, Pos
|
||||
|
||||
ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127))
|
||||
|
||||
# Neither of these sets include quotation mark or backslash. They are
|
||||
# currently handled as separate cases in the parser functions.
|
||||
ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t")
|
||||
ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n")
|
||||
|
||||
ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS
|
||||
ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS
|
||||
|
||||
ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS
|
||||
|
||||
TOML_WS = frozenset(" \t")
|
||||
TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n")
|
||||
BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_")
|
||||
KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'")
|
||||
HEXDIGIT_CHARS = frozenset(string.hexdigits)
|
||||
|
||||
BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType(
|
||||
{
|
||||
"\\b": "\u0008", # backspace
|
||||
"\\t": "\u0009", # tab
|
||||
"\\n": "\u000A", # linefeed
|
||||
"\\f": "\u000C", # form feed
|
||||
"\\r": "\u000D", # carriage return
|
||||
'\\"': "\u0022", # quote
|
||||
"\\\\": "\u005C", # backslash
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class TOMLDecodeError(ValueError):
|
||||
"""An error raised if a document is not valid TOML."""
|
||||
|
||||
|
||||
def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]:
|
||||
"""Parse TOML from a binary file object."""
|
||||
b = __fp.read()
|
||||
try:
|
||||
s = b.decode()
|
||||
except AttributeError:
|
||||
raise TypeError(
|
||||
"File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`"
|
||||
) from None
|
||||
return loads(s, parse_float=parse_float)
|
||||
|
||||
|
||||
def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901
|
||||
"""Parse TOML from a string."""
|
||||
|
||||
# The spec allows converting "\r\n" to "\n", even in string
|
||||
# literals. Let's do so to simplify parsing.
|
||||
src = __s.replace("\r\n", "\n")
|
||||
pos = 0
|
||||
out = Output(NestedDict(), Flags())
|
||||
header: Key = ()
|
||||
parse_float = make_safe_parse_float(parse_float)
|
||||
|
||||
# Parse one statement at a time
|
||||
# (typically means one line in TOML source)
|
||||
while True:
|
||||
# 1. Skip line leading whitespace
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
|
||||
# 2. Parse rules. Expect one of the following:
|
||||
# - end of file
|
||||
# - end of line
|
||||
# - comment
|
||||
# - key/value pair
|
||||
# - append dict to list (and move to its namespace)
|
||||
# - create dict (and move to its namespace)
|
||||
# Skip trailing whitespace when applicable.
|
||||
try:
|
||||
char = src[pos]
|
||||
except IndexError:
|
||||
break
|
||||
if char == "\n":
|
||||
pos += 1
|
||||
continue
|
||||
if char in KEY_INITIAL_CHARS:
|
||||
pos = key_value_rule(src, pos, out, header, parse_float)
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
elif char == "[":
|
||||
try:
|
||||
second_char: str | None = src[pos + 1]
|
||||
except IndexError:
|
||||
second_char = None
|
||||
out.flags.finalize_pending()
|
||||
if second_char == "[":
|
||||
pos, header = create_list_rule(src, pos, out)
|
||||
else:
|
||||
pos, header = create_dict_rule(src, pos, out)
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
elif char != "#":
|
||||
raise suffixed_err(src, pos, "Invalid statement")
|
||||
|
||||
# 3. Skip comment
|
||||
pos = skip_comment(src, pos)
|
||||
|
||||
# 4. Expect end of line or end of file
|
||||
try:
|
||||
char = src[pos]
|
||||
except IndexError:
|
||||
break
|
||||
if char != "\n":
|
||||
raise suffixed_err(
|
||||
src, pos, "Expected newline or end of document after a statement"
|
||||
)
|
||||
pos += 1
|
||||
|
||||
return out.data.dict
|
||||
|
||||
|
||||
class Flags:
|
||||
"""Flags that map to parsed keys/namespaces."""
|
||||
|
||||
# Marks an immutable namespace (inline array or inline table).
|
||||
FROZEN = 0
|
||||
# Marks a nest that has been explicitly created and can no longer
|
||||
# be opened using the "[table]" syntax.
|
||||
EXPLICIT_NEST = 1
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._flags: dict[str, dict] = {}
|
||||
self._pending_flags: set[tuple[Key, int]] = set()
|
||||
|
||||
def add_pending(self, key: Key, flag: int) -> None:
|
||||
self._pending_flags.add((key, flag))
|
||||
|
||||
def finalize_pending(self) -> None:
|
||||
for key, flag in self._pending_flags:
|
||||
self.set(key, flag, recursive=False)
|
||||
self._pending_flags.clear()
|
||||
|
||||
def unset_all(self, key: Key) -> None:
|
||||
cont = self._flags
|
||||
for k in key[:-1]:
|
||||
if k not in cont:
|
||||
return
|
||||
cont = cont[k]["nested"]
|
||||
cont.pop(key[-1], None)
|
||||
|
||||
def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003
|
||||
cont = self._flags
|
||||
key_parent, key_stem = key[:-1], key[-1]
|
||||
for k in key_parent:
|
||||
if k not in cont:
|
||||
cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
|
||||
cont = cont[k]["nested"]
|
||||
if key_stem not in cont:
|
||||
cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
|
||||
cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)
|
||||
|
||||
def is_(self, key: Key, flag: int) -> bool:
|
||||
if not key:
|
||||
return False # document root has no flags
|
||||
cont = self._flags
|
||||
for k in key[:-1]:
|
||||
if k not in cont:
|
||||
return False
|
||||
inner_cont = cont[k]
|
||||
if flag in inner_cont["recursive_flags"]:
|
||||
return True
|
||||
cont = inner_cont["nested"]
|
||||
key_stem = key[-1]
|
||||
if key_stem in cont:
|
||||
cont = cont[key_stem]
|
||||
return flag in cont["flags"] or flag in cont["recursive_flags"]
|
||||
return False
|
||||
|
||||
|
||||
class NestedDict:
|
||||
def __init__(self) -> None:
|
||||
# The parsed content of the TOML document
|
||||
self.dict: dict[str, Any] = {}
|
||||
|
||||
def get_or_create_nest(
|
||||
self,
|
||||
key: Key,
|
||||
*,
|
||||
access_lists: bool = True,
|
||||
) -> dict:
|
||||
cont: Any = self.dict
|
||||
for k in key:
|
||||
if k not in cont:
|
||||
cont[k] = {}
|
||||
cont = cont[k]
|
||||
if access_lists and isinstance(cont, list):
|
||||
cont = cont[-1]
|
||||
if not isinstance(cont, dict):
|
||||
raise KeyError("There is no nest behind this key")
|
||||
return cont
|
||||
|
||||
def append_nest_to_list(self, key: Key) -> None:
|
||||
cont = self.get_or_create_nest(key[:-1])
|
||||
last_key = key[-1]
|
||||
if last_key in cont:
|
||||
list_ = cont[last_key]
|
||||
if not isinstance(list_, list):
|
||||
raise KeyError("An object other than list found behind this key")
|
||||
list_.append({})
|
||||
else:
|
||||
cont[last_key] = [{}]
|
||||
|
||||
|
||||
class Output(NamedTuple):
|
||||
data: NestedDict
|
||||
flags: Flags
|
||||
|
||||
|
||||
def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos:
|
||||
try:
|
||||
while src[pos] in chars:
|
||||
pos += 1
|
||||
except IndexError:
|
||||
pass
|
||||
return pos
|
||||
|
||||
|
||||
def skip_until(
|
||||
src: str,
|
||||
pos: Pos,
|
||||
expect: str,
|
||||
*,
|
||||
error_on: frozenset[str],
|
||||
error_on_eof: bool,
|
||||
) -> Pos:
|
||||
try:
|
||||
new_pos = src.index(expect, pos)
|
||||
except ValueError:
|
||||
new_pos = len(src)
|
||||
if error_on_eof:
|
||||
raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None
|
||||
|
||||
if not error_on.isdisjoint(src[pos:new_pos]):
|
||||
while src[pos] not in error_on:
|
||||
pos += 1
|
||||
raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}")
|
||||
return new_pos
|
||||
|
||||
|
||||
def skip_comment(src: str, pos: Pos) -> Pos:
|
||||
try:
|
||||
char: str | None = src[pos]
|
||||
except IndexError:
|
||||
char = None
|
||||
if char == "#":
|
||||
return skip_until(
|
||||
src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False
|
||||
)
|
||||
return pos
|
||||
|
||||
|
||||
def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos:
|
||||
while True:
|
||||
pos_before_skip = pos
|
||||
pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
|
||||
pos = skip_comment(src, pos)
|
||||
if pos == pos_before_skip:
|
||||
return pos
|
||||
|
||||
|
||||
def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
|
||||
pos += 1 # Skip "["
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
pos, key = parse_key(src, pos)
|
||||
|
||||
if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN):
|
||||
raise suffixed_err(src, pos, f"Cannot declare {key} twice")
|
||||
out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
|
||||
try:
|
||||
out.data.get_or_create_nest(key)
|
||||
except KeyError:
|
||||
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
|
||||
|
||||
if not src.startswith("]", pos):
|
||||
raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration")
|
||||
return pos + 1, key
|
||||
|
||||
|
||||
def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
|
||||
pos += 2 # Skip "[["
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
pos, key = parse_key(src, pos)
|
||||
|
||||
if out.flags.is_(key, Flags.FROZEN):
|
||||
raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
|
||||
# Free the namespace now that it points to another empty list item...
|
||||
out.flags.unset_all(key)
|
||||
# ...but this key precisely is still prohibited from table declaration
|
||||
out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
|
||||
try:
|
||||
out.data.append_nest_to_list(key)
|
||||
except KeyError:
|
||||
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
|
||||
|
||||
if not src.startswith("]]", pos):
|
||||
raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration")
|
||||
return pos + 2, key
|
||||
|
||||
|
||||
def key_value_rule(
|
||||
src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat
|
||||
) -> Pos:
|
||||
pos, key, value = parse_key_value_pair(src, pos, parse_float)
|
||||
key_parent, key_stem = key[:-1], key[-1]
|
||||
abs_key_parent = header + key_parent
|
||||
|
||||
relative_path_cont_keys = (header + key[:i] for i in range(1, len(key)))
|
||||
for cont_key in relative_path_cont_keys:
|
||||
# Check that dotted key syntax does not redefine an existing table
|
||||
if out.flags.is_(cont_key, Flags.EXPLICIT_NEST):
|
||||
raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}")
|
||||
# Containers in the relative path can't be opened with the table syntax or
|
||||
# dotted key/value syntax in following table sections.
|
||||
out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST)
|
||||
|
||||
if out.flags.is_(abs_key_parent, Flags.FROZEN):
|
||||
raise suffixed_err(
|
||||
src, pos, f"Cannot mutate immutable namespace {abs_key_parent}"
|
||||
)
|
||||
|
||||
try:
|
||||
nest = out.data.get_or_create_nest(abs_key_parent)
|
||||
except KeyError:
|
||||
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
|
||||
if key_stem in nest:
|
||||
raise suffixed_err(src, pos, "Cannot overwrite a value")
|
||||
# Mark inline table and array namespaces recursively immutable
|
||||
if isinstance(value, (dict, list)):
|
||||
out.flags.set(header + key, Flags.FROZEN, recursive=True)
|
||||
nest[key_stem] = value
|
||||
return pos
|
||||
|
||||
|
||||
def parse_key_value_pair(
|
||||
src: str, pos: Pos, parse_float: ParseFloat
|
||||
) -> tuple[Pos, Key, Any]:
|
||||
pos, key = parse_key(src, pos)
|
||||
try:
|
||||
char: str | None = src[pos]
|
||||
except IndexError:
|
||||
char = None
|
||||
if char != "=":
|
||||
raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair")
|
||||
pos += 1
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
pos, value = parse_value(src, pos, parse_float)
|
||||
return pos, key, value
|
||||
|
||||
|
||||
def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]:
|
||||
pos, key_part = parse_key_part(src, pos)
|
||||
key: Key = (key_part,)
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
while True:
|
||||
try:
|
||||
char: str | None = src[pos]
|
||||
except IndexError:
|
||||
char = None
|
||||
if char != ".":
|
||||
return pos, key
|
||||
pos += 1
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
pos, key_part = parse_key_part(src, pos)
|
||||
key += (key_part,)
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
|
||||
|
||||
def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]:
|
||||
try:
|
||||
char: str | None = src[pos]
|
||||
except IndexError:
|
||||
char = None
|
||||
if char in BARE_KEY_CHARS:
|
||||
start_pos = pos
|
||||
pos = skip_chars(src, pos, BARE_KEY_CHARS)
|
||||
return pos, src[start_pos:pos]
|
||||
if char == "'":
|
||||
return parse_literal_str(src, pos)
|
||||
if char == '"':
|
||||
return parse_one_line_basic_str(src, pos)
|
||||
raise suffixed_err(src, pos, "Invalid initial character for a key part")
|
||||
|
||||
|
||||
def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]:
|
||||
pos += 1
|
||||
return parse_basic_str(src, pos, multiline=False)
|
||||
|
||||
|
||||
def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]:
|
||||
pos += 1
|
||||
array: list = []
|
||||
|
||||
pos = skip_comments_and_array_ws(src, pos)
|
||||
if src.startswith("]", pos):
|
||||
return pos + 1, array
|
||||
while True:
|
||||
pos, val = parse_value(src, pos, parse_float)
|
||||
array.append(val)
|
||||
pos = skip_comments_and_array_ws(src, pos)
|
||||
|
||||
c = src[pos : pos + 1]
|
||||
if c == "]":
|
||||
return pos + 1, array
|
||||
if c != ",":
|
||||
raise suffixed_err(src, pos, "Unclosed array")
|
||||
pos += 1
|
||||
|
||||
pos = skip_comments_and_array_ws(src, pos)
|
||||
if src.startswith("]", pos):
|
||||
return pos + 1, array
|
||||
|
||||
|
||||
def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict]:
|
||||
pos += 1
|
||||
nested_dict = NestedDict()
|
||||
flags = Flags()
|
||||
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
if src.startswith("}", pos):
|
||||
return pos + 1, nested_dict.dict
|
||||
while True:
|
||||
pos, key, value = parse_key_value_pair(src, pos, parse_float)
|
||||
key_parent, key_stem = key[:-1], key[-1]
|
||||
if flags.is_(key, Flags.FROZEN):
|
||||
raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
|
||||
try:
|
||||
nest = nested_dict.get_or_create_nest(key_parent, access_lists=False)
|
||||
except KeyError:
|
||||
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
|
||||
if key_stem in nest:
|
||||
raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}")
|
||||
nest[key_stem] = value
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
c = src[pos : pos + 1]
|
||||
if c == "}":
|
||||
return pos + 1, nested_dict.dict
|
||||
if c != ",":
|
||||
raise suffixed_err(src, pos, "Unclosed inline table")
|
||||
if isinstance(value, (dict, list)):
|
||||
flags.set(key, Flags.FROZEN, recursive=True)
|
||||
pos += 1
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
|
||||
|
||||
def parse_basic_str_escape(
|
||||
src: str, pos: Pos, *, multiline: bool = False
|
||||
) -> tuple[Pos, str]:
|
||||
escape_id = src[pos : pos + 2]
|
||||
pos += 2
|
||||
if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
|
||||
# Skip whitespace until next non-whitespace character or end of
|
||||
# the doc. Error if non-whitespace is found before newline.
|
||||
if escape_id != "\\\n":
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
try:
|
||||
char = src[pos]
|
||||
except IndexError:
|
||||
return pos, ""
|
||||
if char != "\n":
|
||||
raise suffixed_err(src, pos, "Unescaped '\\' in a string")
|
||||
pos += 1
|
||||
pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
|
||||
return pos, ""
|
||||
if escape_id == "\\u":
|
||||
return parse_hex_char(src, pos, 4)
|
||||
if escape_id == "\\U":
|
||||
return parse_hex_char(src, pos, 8)
|
||||
try:
|
||||
return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id]
|
||||
except KeyError:
|
||||
raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None
|
||||
|
||||
|
||||
def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]:
|
||||
return parse_basic_str_escape(src, pos, multiline=True)
|
||||
|
||||
|
||||
def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]:
|
||||
hex_str = src[pos : pos + hex_len]
|
||||
if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str):
|
||||
raise suffixed_err(src, pos, "Invalid hex value")
|
||||
pos += hex_len
|
||||
hex_int = int(hex_str, 16)
|
||||
if not is_unicode_scalar_value(hex_int):
|
||||
raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value")
|
||||
return pos, chr(hex_int)
|
||||
|
||||
|
||||
def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]:
|
||||
pos += 1 # Skip starting apostrophe
|
||||
start_pos = pos
|
||||
pos = skip_until(
|
||||
src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True
|
||||
)
|
||||
return pos + 1, src[start_pos:pos] # Skip ending apostrophe
|
||||
|
||||
|
||||
def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]:
|
||||
pos += 3
|
||||
if src.startswith("\n", pos):
|
||||
pos += 1
|
||||
|
||||
if literal:
|
||||
delim = "'"
|
||||
end_pos = skip_until(
|
||||
src,
|
||||
pos,
|
||||
"'''",
|
||||
error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS,
|
||||
error_on_eof=True,
|
||||
)
|
||||
result = src[pos:end_pos]
|
||||
pos = end_pos + 3
|
||||
else:
|
||||
delim = '"'
|
||||
pos, result = parse_basic_str(src, pos, multiline=True)
|
||||
|
||||
# Add at maximum two extra apostrophes/quotes if the end sequence
|
||||
# is 4 or 5 chars long instead of just 3.
|
||||
if not src.startswith(delim, pos):
|
||||
return pos, result
|
||||
pos += 1
|
||||
if not src.startswith(delim, pos):
|
||||
return pos, result + delim
|
||||
pos += 1
|
||||
return pos, result + (delim * 2)
|
||||
|
||||
|
||||
def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]:
|
||||
if multiline:
|
||||
error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS
|
||||
parse_escapes = parse_basic_str_escape_multiline
|
||||
else:
|
||||
error_on = ILLEGAL_BASIC_STR_CHARS
|
||||
parse_escapes = parse_basic_str_escape
|
||||
result = ""
|
||||
start_pos = pos
|
||||
while True:
|
||||
try:
|
||||
char = src[pos]
|
||||
except IndexError:
|
||||
raise suffixed_err(src, pos, "Unterminated string") from None
|
||||
if char == '"':
|
||||
if not multiline:
|
||||
return pos + 1, result + src[start_pos:pos]
|
||||
if src.startswith('"""', pos):
|
||||
return pos + 3, result + src[start_pos:pos]
|
||||
pos += 1
|
||||
continue
|
||||
if char == "\\":
|
||||
result += src[start_pos:pos]
|
||||
pos, parsed_escape = parse_escapes(src, pos)
|
||||
result += parsed_escape
|
||||
start_pos = pos
|
||||
continue
|
||||
if char in error_on:
|
||||
raise suffixed_err(src, pos, f"Illegal character {char!r}")
|
||||
pos += 1
|
||||
|
||||
|
||||
def parse_value( # noqa: C901
|
||||
src: str, pos: Pos, parse_float: ParseFloat
|
||||
) -> tuple[Pos, Any]:
|
||||
try:
|
||||
char: str | None = src[pos]
|
||||
except IndexError:
|
||||
char = None
|
||||
|
||||
# IMPORTANT: order conditions based on speed of checking and likelihood
|
||||
|
||||
# Basic strings
|
||||
if char == '"':
|
||||
if src.startswith('"""', pos):
|
||||
return parse_multiline_str(src, pos, literal=False)
|
||||
return parse_one_line_basic_str(src, pos)
|
||||
|
||||
# Literal strings
|
||||
if char == "'":
|
||||
if src.startswith("'''", pos):
|
||||
return parse_multiline_str(src, pos, literal=True)
|
||||
return parse_literal_str(src, pos)
|
||||
|
||||
# Booleans
|
||||
if char == "t":
|
||||
if src.startswith("true", pos):
|
||||
return pos + 4, True
|
||||
if char == "f":
|
||||
if src.startswith("false", pos):
|
||||
return pos + 5, False
|
||||
|
||||
# Arrays
|
||||
if char == "[":
|
||||
return parse_array(src, pos, parse_float)
|
||||
|
||||
# Inline tables
|
||||
if char == "{":
|
||||
return parse_inline_table(src, pos, parse_float)
|
||||
|
||||
# Dates and times
|
||||
datetime_match = RE_DATETIME.match(src, pos)
|
||||
if datetime_match:
|
||||
try:
|
||||
datetime_obj = match_to_datetime(datetime_match)
|
||||
except ValueError as e:
|
||||
raise suffixed_err(src, pos, "Invalid date or datetime") from e
|
||||
return datetime_match.end(), datetime_obj
|
||||
localtime_match = RE_LOCALTIME.match(src, pos)
|
||||
if localtime_match:
|
||||
return localtime_match.end(), match_to_localtime(localtime_match)
|
||||
|
||||
# Integers and "normal" floats.
|
||||
# The regex will greedily match any type starting with a decimal
|
||||
# char, so needs to be located after handling of dates and times.
|
||||
number_match = RE_NUMBER.match(src, pos)
|
||||
if number_match:
|
||||
return number_match.end(), match_to_number(number_match, parse_float)
|
||||
|
||||
# Special floats
|
||||
first_three = src[pos : pos + 3]
|
||||
if first_three in {"inf", "nan"}:
|
||||
return pos + 3, parse_float(first_three)
|
||||
first_four = src[pos : pos + 4]
|
||||
if first_four in {"-inf", "+inf", "-nan", "+nan"}:
|
||||
return pos + 4, parse_float(first_four)
|
||||
|
||||
raise suffixed_err(src, pos, "Invalid value")
|
||||
|
||||
|
||||
def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError:
|
||||
"""Return a `TOMLDecodeError` where error message is suffixed with
|
||||
coordinates in source."""
|
||||
|
||||
def coord_repr(src: str, pos: Pos) -> str:
|
||||
if pos >= len(src):
|
||||
return "end of document"
|
||||
line = src.count("\n", 0, pos) + 1
|
||||
if line == 1:
|
||||
column = pos + 1
|
||||
else:
|
||||
column = pos - src.rindex("\n", 0, pos)
|
||||
return f"line {line}, column {column}"
|
||||
|
||||
return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})")
|
||||
|
||||
|
||||
def is_unicode_scalar_value(codepoint: int) -> bool:
|
||||
return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111)
|
||||
|
||||
|
||||
def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat:
|
||||
"""A decorator to make `parse_float` safe.
|
||||
|
||||
`parse_float` must not return dicts or lists, because these types
|
||||
would be mixed with parsed TOML tables and arrays, thus confusing
|
||||
the parser. The returned decorated callable raises `ValueError`
|
||||
instead of returning illegal types.
|
||||
"""
|
||||
# The default `float` callable never returns illegal types. Optimize it.
|
||||
if parse_float is float: # type: ignore[comparison-overlap]
|
||||
return float
|
||||
|
||||
def safe_parse_float(float_str: str) -> Any:
|
||||
float_value = parse_float(float_str)
|
||||
if isinstance(float_value, (dict, list)):
|
||||
raise ValueError("parse_float must not return dicts or lists")
|
||||
return float_value
|
||||
|
||||
return safe_parse_float
|
||||
@@ -0,0 +1,107 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import date, datetime, time, timedelta, timezone, tzinfo
|
||||
from functools import lru_cache
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from ._types import ParseFloat
|
||||
|
||||
# E.g.
|
||||
# - 00:32:00.999999
|
||||
# - 00:32:00
|
||||
_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
|
||||
|
||||
RE_NUMBER = re.compile(
|
||||
r"""
|
||||
0
|
||||
(?:
|
||||
x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex
|
||||
|
|
||||
b[01](?:_?[01])* # bin
|
||||
|
|
||||
o[0-7](?:_?[0-7])* # oct
|
||||
)
|
||||
|
|
||||
[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part
|
||||
(?P<floatpart>
|
||||
(?:\.[0-9](?:_?[0-9])*)? # optional fractional part
|
||||
(?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part
|
||||
)
|
||||
""",
|
||||
flags=re.VERBOSE,
|
||||
)
|
||||
RE_LOCALTIME = re.compile(_TIME_RE_STR)
|
||||
RE_DATETIME = re.compile(
|
||||
rf"""
|
||||
([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27
|
||||
(?:
|
||||
[Tt ]
|
||||
{_TIME_RE_STR}
|
||||
(?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset
|
||||
)?
|
||||
""",
|
||||
flags=re.VERBOSE,
|
||||
)
|
||||
|
||||
|
||||
def match_to_datetime(match: re.Match) -> datetime | date:
|
||||
"""Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
|
||||
|
||||
Raises ValueError if the match does not correspond to a valid date
|
||||
or datetime.
|
||||
"""
|
||||
(
|
||||
year_str,
|
||||
month_str,
|
||||
day_str,
|
||||
hour_str,
|
||||
minute_str,
|
||||
sec_str,
|
||||
micros_str,
|
||||
zulu_time,
|
||||
offset_sign_str,
|
||||
offset_hour_str,
|
||||
offset_minute_str,
|
||||
) = match.groups()
|
||||
year, month, day = int(year_str), int(month_str), int(day_str)
|
||||
if hour_str is None:
|
||||
return date(year, month, day)
|
||||
hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
|
||||
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
|
||||
if offset_sign_str:
|
||||
tz: tzinfo | None = cached_tz(
|
||||
offset_hour_str, offset_minute_str, offset_sign_str
|
||||
)
|
||||
elif zulu_time:
|
||||
tz = timezone.utc
|
||||
else: # local date-time
|
||||
tz = None
|
||||
return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
|
||||
|
||||
|
||||
@lru_cache(maxsize=None)
|
||||
def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
|
||||
sign = 1 if sign_str == "+" else -1
|
||||
return timezone(
|
||||
timedelta(
|
||||
hours=sign * int(hour_str),
|
||||
minutes=sign * int(minute_str),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def match_to_localtime(match: re.Match) -> time:
|
||||
hour_str, minute_str, sec_str, micros_str = match.groups()
|
||||
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
|
||||
return time(int(hour_str), int(minute_str), int(sec_str), micros)
|
||||
|
||||
|
||||
def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
|
||||
if match.group("floatpart"):
|
||||
return parse_float(match.group())
|
||||
return int(match.group(), 0)
|
||||
@@ -0,0 +1,10 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
from typing import Any, Callable, Tuple
|
||||
|
||||
# Type annotations
|
||||
ParseFloat = Callable[[str], Any]
|
||||
Key = Tuple[str, ...]
|
||||
Pos = int
|
||||
@@ -0,0 +1 @@
|
||||
# Marker file for PEP 561
|
||||
@@ -0,0 +1,10 @@
|
||||
from pip_api._call import call
|
||||
|
||||
|
||||
def version() -> str:
|
||||
result = call("--version")
|
||||
|
||||
# result is of the form:
|
||||
# pip <version> from <directory> (python <python version>)
|
||||
|
||||
return result.split(" ")[1]
|
||||
@@ -0,0 +1,10 @@
|
||||
class Incompatible(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidArguments(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class PipError(Exception):
|
||||
pass
|
||||
Reference in New Issue
Block a user