Updates
This commit is contained in:
@@ -0,0 +1,20 @@
|
||||
"""Django-Prometheus
|
||||
|
||||
https://github.com/korfuri/django-prometheus
|
||||
"""
|
||||
|
||||
# Import all files that define metrics. This has the effect that
|
||||
# `import django_prometheus` will always instantiate all metric
|
||||
# objects right away.
|
||||
from django_prometheus import middleware, models
|
||||
|
||||
__all__ = ["middleware", "models", "pip_prometheus"]
|
||||
|
||||
__version__ = "2.4.1"
|
||||
|
||||
# Import pip_prometheus to export the pip metrics automatically.
|
||||
try:
|
||||
import pip_prometheus
|
||||
except ImportError:
|
||||
# If people don't have pip, don't export anything.
|
||||
pass
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,24 @@
|
||||
from django.apps import AppConfig
|
||||
from django.conf import settings
|
||||
|
||||
import django_prometheus
|
||||
from django_prometheus.exports import SetupPrometheusExportsFromConfig
|
||||
from django_prometheus.migrations import ExportMigrations
|
||||
|
||||
|
||||
class DjangoPrometheusConfig(AppConfig):
|
||||
name = django_prometheus.__name__
|
||||
verbose_name = "Django-Prometheus"
|
||||
|
||||
def ready(self):
|
||||
"""Initializes the Prometheus exports if they are enabled in the config.
|
||||
|
||||
Note that this is called even for other management commands
|
||||
than `runserver`. As such, it is possible to scrape the
|
||||
metrics of a running `manage.py test` or of another command,
|
||||
which shouldn't be done for real monitoring (since these jobs
|
||||
are usually short-lived), but can be useful for debugging.
|
||||
"""
|
||||
SetupPrometheusExportsFromConfig()
|
||||
if getattr(settings, "PROMETHEUS_EXPORT_MIGRATIONS", False):
|
||||
ExportMigrations()
|
||||
0
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/__init__.py
vendored
Normal file
0
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/__init__.py
vendored
Normal file
Binary file not shown.
Binary file not shown.
0
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/backends/__init__.py
vendored
Normal file
0
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/backends/__init__.py
vendored
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,20 @@
|
||||
from django_memcached_consul import memcached
|
||||
|
||||
from django_prometheus.cache.metrics import (
|
||||
django_cache_get_total,
|
||||
django_cache_hits_total,
|
||||
django_cache_misses_total,
|
||||
)
|
||||
|
||||
|
||||
class MemcachedCache(memcached.MemcachedCache):
|
||||
"""Inherit django_memcached_consul to add metrics about hit/miss ratio"""
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
django_cache_get_total.labels(backend="django_memcached_consul").inc()
|
||||
cached = super().get(key, default=None, version=version)
|
||||
if cached is not None:
|
||||
django_cache_hits_total.labels(backend="django_memcached_consul").inc()
|
||||
else:
|
||||
django_cache_misses_total.labels(backend="django_memcached_consul").inc()
|
||||
return cached or default
|
||||
20
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/backends/filebased.py
vendored
Normal file
20
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/backends/filebased.py
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
from django.core.cache.backends import filebased
|
||||
|
||||
from django_prometheus.cache.metrics import (
|
||||
django_cache_get_total,
|
||||
django_cache_hits_total,
|
||||
django_cache_misses_total,
|
||||
)
|
||||
|
||||
|
||||
class FileBasedCache(filebased.FileBasedCache):
|
||||
"""Inherit filebased cache to add metrics about hit/miss ratio"""
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
django_cache_get_total.labels(backend="filebased").inc()
|
||||
cached = super().get(key, default=None, version=version)
|
||||
if cached is not None:
|
||||
django_cache_hits_total.labels(backend="filebased").inc()
|
||||
else:
|
||||
django_cache_misses_total.labels(backend="filebased").inc()
|
||||
return cached or default
|
||||
20
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/backends/locmem.py
vendored
Normal file
20
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/backends/locmem.py
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
from django.core.cache.backends import locmem
|
||||
|
||||
from django_prometheus.cache.metrics import (
|
||||
django_cache_get_total,
|
||||
django_cache_hits_total,
|
||||
django_cache_misses_total,
|
||||
)
|
||||
|
||||
|
||||
class LocMemCache(locmem.LocMemCache):
|
||||
"""Inherit filebased cache to add metrics about hit/miss ratio"""
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
django_cache_get_total.labels(backend="locmem").inc()
|
||||
cached = super().get(key, default=None, version=version)
|
||||
if cached is not None:
|
||||
django_cache_hits_total.labels(backend="locmem").inc()
|
||||
else:
|
||||
django_cache_misses_total.labels(backend="locmem").inc()
|
||||
return cached or default
|
||||
27
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/backends/memcached.py
vendored
Normal file
27
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/backends/memcached.py
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
from django.core.cache.backends import memcached
|
||||
|
||||
from django_prometheus.cache.metrics import (
|
||||
django_cache_get_total,
|
||||
django_cache_hits_total,
|
||||
django_cache_misses_total,
|
||||
)
|
||||
|
||||
|
||||
class MemcachedPrometheusCacheMixin:
|
||||
def get(self, key, default=None, version=None):
|
||||
django_cache_get_total.labels(backend="memcached").inc()
|
||||
cached = super().get(key, default=None, version=version)
|
||||
if cached is not None:
|
||||
django_cache_hits_total.labels(backend="memcached").inc()
|
||||
return cached
|
||||
|
||||
django_cache_misses_total.labels(backend="memcached").inc()
|
||||
return default
|
||||
|
||||
|
||||
class PyLibMCCache(MemcachedPrometheusCacheMixin, memcached.PyLibMCCache):
|
||||
"""Inherit memcached to add metrics about hit/miss ratio"""
|
||||
|
||||
|
||||
class PyMemcacheCache(MemcachedPrometheusCacheMixin, memcached.PyMemcacheCache):
|
||||
"""Inherit memcached to add metrics about hit/miss ratio"""
|
||||
47
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/backends/redis.py
vendored
Normal file
47
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/backends/redis.py
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
from django.core.cache.backends.redis import RedisCache as DjangoRedisCache
|
||||
from django_redis import cache, exceptions
|
||||
|
||||
from django_prometheus.cache.metrics import (
|
||||
django_cache_get_fail_total,
|
||||
django_cache_get_total,
|
||||
django_cache_hits_total,
|
||||
django_cache_misses_total,
|
||||
)
|
||||
|
||||
|
||||
class RedisCache(cache.RedisCache):
|
||||
"""Inherit redis to add metrics about hit/miss/interruption ratio"""
|
||||
|
||||
@cache.omit_exception
|
||||
def get(self, key, default=None, version=None, client=None):
|
||||
try:
|
||||
django_cache_get_total.labels(backend="redis").inc()
|
||||
cached = self.client.get(key, default=None, version=version, client=client)
|
||||
except exceptions.ConnectionInterrupted as e:
|
||||
django_cache_get_fail_total.labels(backend="redis").inc()
|
||||
if self._ignore_exceptions:
|
||||
if self._log_ignored_exceptions:
|
||||
self.logger.error(str(e))
|
||||
return default
|
||||
raise
|
||||
else:
|
||||
if cached is not None:
|
||||
django_cache_hits_total.labels(backend="redis").inc()
|
||||
return cached
|
||||
django_cache_misses_total.labels(backend="redis").inc()
|
||||
return default
|
||||
|
||||
|
||||
class NativeRedisCache(DjangoRedisCache):
|
||||
def get(self, key, default=None, version=None):
|
||||
django_cache_get_total.labels(backend="native_redis").inc()
|
||||
try:
|
||||
result = super().get(key, default=None, version=version)
|
||||
except Exception:
|
||||
django_cache_get_fail_total.labels(backend="native_redis").inc()
|
||||
raise
|
||||
if result is not None:
|
||||
django_cache_hits_total.labels(backend="native_redis").inc()
|
||||
return result
|
||||
django_cache_misses_total.labels(backend="native_redis").inc()
|
||||
return default
|
||||
28
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/metrics.py
vendored
Normal file
28
ETB-API/venv/lib/python3.12/site-packages/django_prometheus/cache/metrics.py
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
from prometheus_client import Counter
|
||||
|
||||
from django_prometheus.conf import NAMESPACE
|
||||
|
||||
django_cache_get_total = Counter(
|
||||
"django_cache_get_total",
|
||||
"Total get requests on cache",
|
||||
["backend"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
django_cache_hits_total = Counter(
|
||||
"django_cache_get_hits_total",
|
||||
"Total hits on cache",
|
||||
["backend"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
django_cache_misses_total = Counter(
|
||||
"django_cache_get_misses_total",
|
||||
"Total misses on cache",
|
||||
["backend"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
django_cache_get_fail_total = Counter(
|
||||
"django_cache_get_fail_total",
|
||||
"Total get request failures by cache",
|
||||
["backend"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
@@ -0,0 +1,27 @@
|
||||
from django.conf import settings
|
||||
|
||||
NAMESPACE = ""
|
||||
|
||||
PROMETHEUS_LATENCY_BUCKETS = (
|
||||
0.01,
|
||||
0.025,
|
||||
0.05,
|
||||
0.075,
|
||||
0.1,
|
||||
0.25,
|
||||
0.5,
|
||||
0.75,
|
||||
1.0,
|
||||
2.5,
|
||||
5.0,
|
||||
7.5,
|
||||
10.0,
|
||||
25.0,
|
||||
50.0,
|
||||
75.0,
|
||||
float("inf"),
|
||||
)
|
||||
|
||||
if settings.configured:
|
||||
NAMESPACE = getattr(settings, "PROMETHEUS_METRIC_NAMESPACE", NAMESPACE)
|
||||
PROMETHEUS_LATENCY_BUCKETS = getattr(settings, "PROMETHEUS_LATENCY_BUCKETS", PROMETHEUS_LATENCY_BUCKETS)
|
||||
Binary file not shown.
@@ -0,0 +1,20 @@
|
||||
# Import all metrics
|
||||
from django_prometheus.db.metrics import (
|
||||
Counter,
|
||||
connection_errors_total,
|
||||
connections_total,
|
||||
errors_total,
|
||||
execute_many_total,
|
||||
execute_total,
|
||||
query_duration_seconds,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Counter",
|
||||
"connection_errors_total",
|
||||
"connections_total",
|
||||
"errors_total",
|
||||
"execute_many_total",
|
||||
"execute_total",
|
||||
"query_duration_seconds",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,16 @@
|
||||
from django.db.backends.mysql import base
|
||||
|
||||
from django_prometheus.db.common import DatabaseWrapperMixin, ExportingCursorWrapper
|
||||
|
||||
|
||||
class DatabaseFeatures(base.DatabaseFeatures):
|
||||
"""Our database has the exact same features as the base one."""
|
||||
|
||||
|
||||
class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper):
|
||||
CURSOR_CLASS = base.CursorWrapper
|
||||
|
||||
def create_cursor(self, name=None):
|
||||
cursor = self.connection.cursor()
|
||||
CursorWrapper = ExportingCursorWrapper(self.CURSOR_CLASS, self.alias, self.vendor)
|
||||
return CursorWrapper(cursor)
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,21 @@
|
||||
from django.contrib.gis.db.backends.postgis import base
|
||||
from django.db.backends.postgresql.base import Cursor
|
||||
|
||||
from django_prometheus.db.common import DatabaseWrapperMixin, ExportingCursorWrapper
|
||||
|
||||
|
||||
class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper):
|
||||
def get_new_connection(self, *args, **kwargs):
|
||||
conn = super().get_new_connection(*args, **kwargs)
|
||||
conn.cursor_factory = ExportingCursorWrapper(
|
||||
conn.cursor_factory or Cursor(),
|
||||
"postgis",
|
||||
self.vendor,
|
||||
)
|
||||
|
||||
return conn
|
||||
|
||||
def create_cursor(self, name=None):
|
||||
# cursor_factory is a kwarg to connect() so restore create_cursor()'s
|
||||
# default behavior
|
||||
return base.DatabaseWrapper.create_cursor(self, name=name)
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,21 @@
|
||||
from django.db.backends.postgresql import base
|
||||
from django.db.backends.postgresql.base import Cursor
|
||||
|
||||
from django_prometheus.db.common import DatabaseWrapperMixin, ExportingCursorWrapper
|
||||
|
||||
|
||||
class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper):
|
||||
def get_new_connection(self, *args, **kwargs):
|
||||
conn = super().get_new_connection(*args, **kwargs)
|
||||
conn.cursor_factory = ExportingCursorWrapper(
|
||||
conn.cursor_factory or Cursor(),
|
||||
self.alias,
|
||||
self.vendor,
|
||||
)
|
||||
|
||||
return conn
|
||||
|
||||
def create_cursor(self, name=None):
|
||||
# cursor_factory is a kwarg to connect() so restore create_cursor()'s
|
||||
# default behavior
|
||||
return base.DatabaseWrapper.create_cursor(self, name=name)
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,12 @@
|
||||
from django.contrib.gis.db.backends.spatialite import base, features
|
||||
from django.db.backends.sqlite3 import base as sqlite_base
|
||||
|
||||
from django_prometheus.db.common import DatabaseWrapperMixin
|
||||
|
||||
|
||||
class DatabaseFeatures(features.DatabaseFeatures):
|
||||
"""Our database has the exact same features as the base one."""
|
||||
|
||||
|
||||
class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper):
|
||||
CURSOR_CLASS = sqlite_base.SQLiteCursorWrapper
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,11 @@
|
||||
from django.db.backends.sqlite3 import base
|
||||
|
||||
from django_prometheus.db.common import DatabaseWrapperMixin
|
||||
|
||||
|
||||
class DatabaseFeatures(base.DatabaseFeatures):
|
||||
"""Our database has the exact same features as the base one."""
|
||||
|
||||
|
||||
class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper):
|
||||
CURSOR_CLASS = base.SQLiteCursorWrapper
|
||||
@@ -0,0 +1,80 @@
|
||||
from django_prometheus.db import (
|
||||
connection_errors_total,
|
||||
connections_total,
|
||||
errors_total,
|
||||
execute_many_total,
|
||||
execute_total,
|
||||
query_duration_seconds,
|
||||
)
|
||||
|
||||
|
||||
class ExceptionCounterByType:
|
||||
"""A context manager that counts exceptions by type.
|
||||
|
||||
Exceptions increment the provided counter, whose last label's name
|
||||
must match the `type_label` argument.
|
||||
|
||||
In other words:
|
||||
|
||||
c = Counter('http_request_exceptions_total', 'Counter of exceptions',
|
||||
['method', 'type'])
|
||||
with ExceptionCounterByType(c, extra_labels={'method': 'GET'}):
|
||||
handle_get_request()
|
||||
"""
|
||||
|
||||
def __init__(self, counter, type_label="type", extra_labels=None):
|
||||
self._counter = counter
|
||||
self._type_label = type_label
|
||||
self._labels = dict(extra_labels) # Copy labels since we modify them.
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, typ, value, traceback):
|
||||
if typ is not None:
|
||||
self._labels.update({self._type_label: typ.__name__})
|
||||
self._counter.labels(**self._labels).inc()
|
||||
|
||||
|
||||
class DatabaseWrapperMixin:
|
||||
"""Extends the DatabaseWrapper to count connections and cursors."""
|
||||
|
||||
def get_new_connection(self, *args, **kwargs):
|
||||
connections_total.labels(self.alias, self.vendor).inc()
|
||||
try:
|
||||
return super().get_new_connection(*args, **kwargs)
|
||||
except Exception:
|
||||
connection_errors_total.labels(self.alias, self.vendor).inc()
|
||||
raise
|
||||
|
||||
def create_cursor(self, name=None):
|
||||
return self.connection.cursor(factory=ExportingCursorWrapper(self.CURSOR_CLASS, self.alias, self.vendor))
|
||||
|
||||
|
||||
def ExportingCursorWrapper(cursor_class, alias, vendor):
|
||||
"""Returns a CursorWrapper class that knows its database's alias and
|
||||
vendor name.
|
||||
"""
|
||||
labels = {"alias": alias, "vendor": vendor}
|
||||
|
||||
class CursorWrapper(cursor_class):
|
||||
"""Extends the base CursorWrapper to count events."""
|
||||
|
||||
def execute(self, *args, **kwargs):
|
||||
execute_total.labels(alias, vendor).inc()
|
||||
with (
|
||||
query_duration_seconds.labels(**labels).time(),
|
||||
ExceptionCounterByType(errors_total, extra_labels=labels),
|
||||
):
|
||||
return super().execute(*args, **kwargs)
|
||||
|
||||
def executemany(self, query, param_list, *args, **kwargs):
|
||||
execute_total.labels(alias, vendor).inc(len(param_list))
|
||||
execute_many_total.labels(alias, vendor).inc(len(param_list))
|
||||
with (
|
||||
query_duration_seconds.labels(**labels).time(),
|
||||
ExceptionCounterByType(errors_total, extra_labels=labels),
|
||||
):
|
||||
return super().executemany(query, param_list, *args, **kwargs)
|
||||
|
||||
return CursorWrapper
|
||||
@@ -0,0 +1,48 @@
|
||||
from prometheus_client import Counter, Histogram
|
||||
|
||||
from django_prometheus.conf import NAMESPACE, PROMETHEUS_LATENCY_BUCKETS
|
||||
|
||||
connections_total = Counter(
|
||||
"django_db_new_connections_total",
|
||||
"Counter of created connections by database and by vendor.",
|
||||
["alias", "vendor"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
|
||||
connection_errors_total = Counter(
|
||||
"django_db_new_connection_errors_total",
|
||||
"Counter of connection failures by database and by vendor.",
|
||||
["alias", "vendor"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
|
||||
execute_total = Counter(
|
||||
"django_db_execute_total",
|
||||
("Counter of executed statements by database and by vendor, including bulk executions."),
|
||||
["alias", "vendor"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
|
||||
|
||||
execute_many_total = Counter(
|
||||
"django_db_execute_many_total",
|
||||
("Counter of executed statements in bulk operations by database and by vendor."),
|
||||
["alias", "vendor"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
|
||||
|
||||
errors_total = Counter(
|
||||
"django_db_errors_total",
|
||||
("Counter of execution errors by database, vendor and exception type."),
|
||||
["alias", "vendor", "type"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
|
||||
query_duration_seconds = Histogram(
|
||||
"django_db_query_duration_seconds",
|
||||
("Histogram of query duration by database and vendor."),
|
||||
["alias", "vendor"],
|
||||
buckets=PROMETHEUS_LATENCY_BUCKETS,
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
@@ -0,0 +1,122 @@
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
|
||||
import prometheus_client
|
||||
from django.conf import settings
|
||||
from django.http import HttpResponse
|
||||
from prometheus_client import multiprocess
|
||||
|
||||
try:
|
||||
# Python 2
|
||||
from BaseHTTPServer import HTTPServer
|
||||
except ImportError:
|
||||
# Python 3
|
||||
from http.server import HTTPServer
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def SetupPrometheusEndpointOnPort(port, addr=""):
|
||||
"""Exports Prometheus metrics on an HTTPServer running in its own thread.
|
||||
|
||||
The server runs on the given port and is by default listenning on
|
||||
all interfaces. This HTTPServer is fully independent of Django and
|
||||
its stack. This offers the advantage that even if Django becomes
|
||||
unable to respond, the HTTPServer will continue to function and
|
||||
export metrics. However, this also means that the features
|
||||
offered by Django (like middlewares or WSGI) can't be used.
|
||||
|
||||
Now here's the really weird part. When Django runs with the
|
||||
auto-reloader enabled (which is the default, you can disable it
|
||||
with `manage.py runserver --noreload`), it forks and executes
|
||||
manage.py twice. That's wasteful but usually OK. It starts being a
|
||||
problem when you try to open a port, like we do. We can detect
|
||||
that we're running under an autoreloader through the presence of
|
||||
the RUN_MAIN environment variable, so we abort if we're trying to
|
||||
export under an autoreloader and trying to open a port.
|
||||
"""
|
||||
assert os.environ.get("RUN_MAIN") != "true", (
|
||||
"The thread-based exporter can't be safely used when django's "
|
||||
"autoreloader is active. Use the URL exporter, or start django "
|
||||
"with --noreload. See documentation/exports.md."
|
||||
)
|
||||
prometheus_client.start_http_server(port, addr=addr)
|
||||
|
||||
|
||||
class PrometheusEndpointServer(threading.Thread):
|
||||
"""A thread class that holds an http and makes it serve_forever()."""
|
||||
|
||||
def __init__(self, httpd, *args, **kwargs):
|
||||
self.httpd = httpd
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def run(self):
|
||||
self.httpd.serve_forever()
|
||||
|
||||
|
||||
def SetupPrometheusEndpointOnPortRange(port_range, addr=""):
|
||||
"""Like SetupPrometheusEndpointOnPort, but tries several ports.
|
||||
|
||||
This is useful when you're running Django as a WSGI application
|
||||
with multiple processes and you want Prometheus to discover all
|
||||
workers. Each worker will grab a port and you can use Prometheus
|
||||
to aggregate across workers.
|
||||
|
||||
port_range may be any iterable object that contains a list of
|
||||
ports. Typically this would be a `range` of contiguous ports.
|
||||
|
||||
As soon as one port is found that can serve, use this one and stop
|
||||
trying.
|
||||
|
||||
Returns the port chosen (an `int`), or `None` if no port in the
|
||||
supplied range was available.
|
||||
|
||||
The same caveats regarding autoreload apply. Do not use this when
|
||||
Django's autoreloader is active.
|
||||
"""
|
||||
assert os.environ.get("RUN_MAIN") != "true", (
|
||||
"The thread-based exporter can't be safely used when django's "
|
||||
"autoreloader is active. Use the URL exporter, or start django "
|
||||
"with --noreload. See documentation/exports.md."
|
||||
)
|
||||
for port in port_range:
|
||||
try:
|
||||
httpd = HTTPServer((addr, port), prometheus_client.MetricsHandler)
|
||||
except OSError:
|
||||
# Python 2 raises socket.error, in Python 3 socket.error is an
|
||||
# alias for OSError
|
||||
continue # Try next port
|
||||
thread = PrometheusEndpointServer(httpd)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
logger.info(f"Exporting Prometheus /metrics/ on port {port}")
|
||||
return port # Stop trying ports at this point
|
||||
logger.warning("Cannot export Prometheus /metrics/ - no available ports in supplied range")
|
||||
return None
|
||||
|
||||
|
||||
def SetupPrometheusExportsFromConfig():
|
||||
"""Exports metrics so Prometheus can collect them."""
|
||||
port = getattr(settings, "PROMETHEUS_METRICS_EXPORT_PORT", None)
|
||||
port_range = getattr(settings, "PROMETHEUS_METRICS_EXPORT_PORT_RANGE", None)
|
||||
addr = getattr(settings, "PROMETHEUS_METRICS_EXPORT_ADDRESS", "")
|
||||
if port_range:
|
||||
SetupPrometheusEndpointOnPortRange(port_range, addr)
|
||||
elif port:
|
||||
SetupPrometheusEndpointOnPort(port, addr)
|
||||
|
||||
|
||||
def ExportToDjangoView(request):
|
||||
"""Exports /metrics as a Django view.
|
||||
|
||||
You can use django_prometheus.urls to map /metrics to this view.
|
||||
"""
|
||||
if "PROMETHEUS_MULTIPROC_DIR" in os.environ or "prometheus_multiproc_dir" in os.environ:
|
||||
registry = prometheus_client.CollectorRegistry()
|
||||
multiprocess.MultiProcessCollector(registry)
|
||||
else:
|
||||
registry = prometheus_client.REGISTRY
|
||||
metrics_page = prometheus_client.generate_latest(registry)
|
||||
return HttpResponse(metrics_page, content_type=prometheus_client.CONTENT_TYPE_LATEST)
|
||||
@@ -0,0 +1,309 @@
|
||||
from django.utils.deprecation import MiddlewareMixin
|
||||
from prometheus_client import Counter, Histogram
|
||||
|
||||
from django_prometheus.conf import NAMESPACE, PROMETHEUS_LATENCY_BUCKETS
|
||||
from django_prometheus.utils import PowersOf, Time, TimeSince
|
||||
|
||||
|
||||
class Metrics:
|
||||
_instance = None
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls):
|
||||
if not cls._instance:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
def register_metric(self, metric_cls, name, documentation, labelnames=(), **kwargs):
|
||||
return metric_cls(name, documentation, labelnames=labelnames, **kwargs)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.register()
|
||||
|
||||
def register(self):
|
||||
self.requests_total = self.register_metric(
|
||||
Counter,
|
||||
"django_http_requests_before_middlewares_total",
|
||||
"Total count of requests before middlewares run.",
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.responses_total = self.register_metric(
|
||||
Counter,
|
||||
"django_http_responses_before_middlewares_total",
|
||||
"Total count of responses before middlewares run.",
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.requests_latency_before = self.register_metric(
|
||||
Histogram,
|
||||
"django_http_requests_latency_including_middlewares_seconds",
|
||||
("Histogram of requests processing time (including middleware processing time)."),
|
||||
buckets=PROMETHEUS_LATENCY_BUCKETS,
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.requests_unknown_latency_before = self.register_metric(
|
||||
Counter,
|
||||
"django_http_requests_unknown_latency_including_middlewares_total",
|
||||
(
|
||||
"Count of requests for which the latency was unknown (when computing "
|
||||
"django_http_requests_latency_including_middlewares_seconds)."
|
||||
),
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.requests_latency_by_view_method = self.register_metric(
|
||||
Histogram,
|
||||
"django_http_requests_latency_seconds_by_view_method",
|
||||
"Histogram of request processing time labelled by view.",
|
||||
["view", "method"],
|
||||
buckets=PROMETHEUS_LATENCY_BUCKETS,
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.requests_unknown_latency = self.register_metric(
|
||||
Counter,
|
||||
"django_http_requests_unknown_latency_total",
|
||||
"Count of requests for which the latency was unknown.",
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
# Set in process_request
|
||||
self.requests_ajax = self.register_metric(
|
||||
Counter,
|
||||
"django_http_ajax_requests_total",
|
||||
"Count of AJAX requests.",
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.requests_by_method = self.register_metric(
|
||||
Counter,
|
||||
"django_http_requests_total_by_method",
|
||||
"Count of requests by method.",
|
||||
["method"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.requests_by_transport = self.register_metric(
|
||||
Counter,
|
||||
"django_http_requests_total_by_transport",
|
||||
"Count of requests by transport.",
|
||||
["transport"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
# Set in process_view
|
||||
self.requests_by_view_transport_method = self.register_metric(
|
||||
Counter,
|
||||
"django_http_requests_total_by_view_transport_method",
|
||||
"Count of requests by view, transport, method.",
|
||||
["view", "transport", "method"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.requests_body_bytes = self.register_metric(
|
||||
Histogram,
|
||||
"django_http_requests_body_total_bytes",
|
||||
"Histogram of requests by body size.",
|
||||
buckets=PowersOf(2, 30),
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
# Set in process_template_response
|
||||
self.responses_by_templatename = self.register_metric(
|
||||
Counter,
|
||||
"django_http_responses_total_by_templatename",
|
||||
"Count of responses by template name.",
|
||||
["templatename"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
# Set in process_response
|
||||
self.responses_by_status = self.register_metric(
|
||||
Counter,
|
||||
"django_http_responses_total_by_status",
|
||||
"Count of responses by status.",
|
||||
["status"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.responses_by_status_view_method = self.register_metric(
|
||||
Counter,
|
||||
"django_http_responses_total_by_status_view_method",
|
||||
"Count of responses by status, view, method.",
|
||||
["status", "view", "method"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.responses_body_bytes = self.register_metric(
|
||||
Histogram,
|
||||
"django_http_responses_body_total_bytes",
|
||||
"Histogram of responses by body size.",
|
||||
buckets=PowersOf(2, 30),
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.responses_by_charset = self.register_metric(
|
||||
Counter,
|
||||
"django_http_responses_total_by_charset",
|
||||
"Count of responses by charset.",
|
||||
["charset"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.responses_streaming = self.register_metric(
|
||||
Counter,
|
||||
"django_http_responses_streaming_total",
|
||||
"Count of streaming responses.",
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
# Set in process_exception
|
||||
self.exceptions_by_type = self.register_metric(
|
||||
Counter,
|
||||
"django_http_exceptions_total_by_type",
|
||||
"Count of exceptions by object type.",
|
||||
["type"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
self.exceptions_by_view = self.register_metric(
|
||||
Counter,
|
||||
"django_http_exceptions_total_by_view",
|
||||
"Count of exceptions by view.",
|
||||
["view"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
|
||||
|
||||
class PrometheusBeforeMiddleware(MiddlewareMixin):
|
||||
"""Monitoring middleware that should run before other middlewares."""
|
||||
|
||||
metrics_cls = Metrics
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.metrics = self.metrics_cls.get_instance()
|
||||
|
||||
def process_request(self, request):
|
||||
self.metrics.requests_total.inc()
|
||||
request.prometheus_before_middleware_event = Time()
|
||||
|
||||
def process_response(self, request, response):
|
||||
self.metrics.responses_total.inc()
|
||||
if hasattr(request, "prometheus_before_middleware_event"):
|
||||
self.metrics.requests_latency_before.observe(TimeSince(request.prometheus_before_middleware_event))
|
||||
else:
|
||||
self.metrics.requests_unknown_latency_before.inc()
|
||||
return response
|
||||
|
||||
|
||||
class PrometheusAfterMiddleware(MiddlewareMixin):
|
||||
"""Monitoring middleware that should run after other middlewares."""
|
||||
|
||||
metrics_cls = Metrics
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.metrics = self.metrics_cls.get_instance()
|
||||
|
||||
def _transport(self, request):
|
||||
return "https" if request.is_secure() else "http"
|
||||
|
||||
def _method(self, request):
|
||||
m = request.method
|
||||
if m not in (
|
||||
"GET",
|
||||
"HEAD",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"TRACE",
|
||||
"OPTIONS",
|
||||
"CONNECT",
|
||||
"PATCH",
|
||||
):
|
||||
return "<invalid method>"
|
||||
return m
|
||||
|
||||
def label_metric(self, metric, request, response=None, **labels):
|
||||
return metric.labels(**labels) if labels else metric
|
||||
|
||||
def process_request(self, request):
|
||||
transport = self._transport(request)
|
||||
method = self._method(request)
|
||||
self.label_metric(self.metrics.requests_by_method, request, method=method).inc()
|
||||
self.label_metric(self.metrics.requests_by_transport, request, transport=transport).inc()
|
||||
|
||||
# Mimic the behaviour of the deprecated "Request.is_ajax()" method.
|
||||
if request.headers.get("x-requested-with") == "XMLHttpRequest":
|
||||
self.label_metric(self.metrics.requests_ajax, request).inc()
|
||||
|
||||
content_length = int(request.headers.get("content-length") or 0)
|
||||
self.label_metric(self.metrics.requests_body_bytes, request).observe(content_length)
|
||||
request.prometheus_after_middleware_event = Time()
|
||||
|
||||
def _get_view_name(self, request):
|
||||
view_name = "<unnamed view>"
|
||||
if hasattr(request, "resolver_match"):
|
||||
if request.resolver_match is not None:
|
||||
if request.resolver_match.view_name is not None:
|
||||
view_name = request.resolver_match.view_name
|
||||
return view_name
|
||||
|
||||
def process_view(self, request, view_func, *view_args, **view_kwargs):
|
||||
transport = self._transport(request)
|
||||
method = self._method(request)
|
||||
if hasattr(request, "resolver_match"):
|
||||
name = request.resolver_match.view_name or "<unnamed view>"
|
||||
self.label_metric(
|
||||
self.metrics.requests_by_view_transport_method,
|
||||
request,
|
||||
view=name,
|
||||
transport=transport,
|
||||
method=method,
|
||||
).inc()
|
||||
|
||||
def process_template_response(self, request, response):
|
||||
if hasattr(response, "template_name"):
|
||||
self.label_metric(
|
||||
self.metrics.responses_by_templatename,
|
||||
request,
|
||||
response=response,
|
||||
templatename=str(response.template_name),
|
||||
).inc()
|
||||
return response
|
||||
|
||||
def process_response(self, request, response):
|
||||
method = self._method(request)
|
||||
name = self._get_view_name(request)
|
||||
status = str(response.status_code)
|
||||
self.label_metric(self.metrics.responses_by_status, request, response, status=status).inc()
|
||||
self.label_metric(
|
||||
self.metrics.responses_by_status_view_method,
|
||||
request,
|
||||
response,
|
||||
status=status,
|
||||
view=name,
|
||||
method=method,
|
||||
).inc()
|
||||
if hasattr(response, "charset"):
|
||||
self.label_metric(
|
||||
self.metrics.responses_by_charset,
|
||||
request,
|
||||
response,
|
||||
charset=str(response.charset),
|
||||
).inc()
|
||||
if hasattr(response, "streaming") and response.streaming:
|
||||
self.label_metric(self.metrics.responses_streaming, request, response).inc()
|
||||
if hasattr(response, "content"):
|
||||
self.label_metric(self.metrics.responses_body_bytes, request, response).observe(len(response.content))
|
||||
if hasattr(request, "prometheus_after_middleware_event"):
|
||||
self.label_metric(
|
||||
self.metrics.requests_latency_by_view_method,
|
||||
request,
|
||||
response,
|
||||
view=self._get_view_name(request),
|
||||
method=request.method,
|
||||
).observe(TimeSince(request.prometheus_after_middleware_event))
|
||||
else:
|
||||
self.label_metric(self.metrics.requests_unknown_latency, request, response).inc()
|
||||
return response
|
||||
|
||||
def process_exception(self, request, exception):
|
||||
self.label_metric(self.metrics.exceptions_by_type, request, type=type(exception).__name__).inc()
|
||||
if hasattr(request, "resolver_match"):
|
||||
name = request.resolver_match.view_name or "<unnamed view>"
|
||||
self.label_metric(self.metrics.exceptions_by_view, request, view=name).inc()
|
||||
if hasattr(request, "prometheus_after_middleware_event"):
|
||||
self.label_metric(
|
||||
self.metrics.requests_latency_by_view_method,
|
||||
request,
|
||||
view=self._get_view_name(request),
|
||||
method=request.method,
|
||||
).observe(TimeSince(request.prometheus_after_middleware_event))
|
||||
else:
|
||||
self.label_metric(self.metrics.requests_unknown_latency, request).inc()
|
||||
@@ -0,0 +1,50 @@
|
||||
from django.db import connections
|
||||
from django.db.backends.dummy.base import DatabaseWrapper
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from django_prometheus.conf import NAMESPACE
|
||||
|
||||
unapplied_migrations = Gauge(
|
||||
"django_migrations_unapplied_total",
|
||||
"Count of unapplied migrations by database connection",
|
||||
["connection"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
|
||||
applied_migrations = Gauge(
|
||||
"django_migrations_applied_total",
|
||||
"Count of applied migrations by database connection",
|
||||
["connection"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
|
||||
|
||||
def ExportMigrationsForDatabase(alias, executor):
|
||||
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
|
||||
unapplied_migrations.labels(alias).set(len(plan))
|
||||
applied_migrations.labels(alias).set(len(executor.loader.applied_migrations))
|
||||
|
||||
|
||||
def ExportMigrations():
|
||||
"""Exports counts of unapplied migrations.
|
||||
|
||||
This is meant to be called during app startup, ideally by
|
||||
django_prometheus.apps.AppConfig.
|
||||
"""
|
||||
# Import MigrationExecutor lazily. MigrationExecutor checks at
|
||||
# import time that the apps are ready, and they are not when
|
||||
# django_prometheus is imported. ExportMigrations() should be
|
||||
# called in AppConfig.ready(), which signals that all apps are
|
||||
# ready.
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
|
||||
if "default" in connections and (isinstance(connections["default"], DatabaseWrapper)):
|
||||
# This is the case where DATABASES = {} in the configuration,
|
||||
# i.e. the user is not using any databases. Django "helpfully"
|
||||
# adds a dummy database and then throws when you try to
|
||||
# actually use it. So we don't do anything, because trying to
|
||||
# export stats would crash the app on startup.
|
||||
return
|
||||
for alias in connections.databases:
|
||||
executor = MigrationExecutor(connections[alias])
|
||||
ExportMigrationsForDatabase(alias, executor)
|
||||
@@ -0,0 +1,54 @@
|
||||
from prometheus_client import Counter
|
||||
|
||||
from django_prometheus.conf import NAMESPACE
|
||||
|
||||
model_inserts = Counter(
|
||||
"django_model_inserts_total",
|
||||
"Number of insert operations by model.",
|
||||
["model"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
|
||||
model_updates = Counter(
|
||||
"django_model_updates_total",
|
||||
"Number of update operations by model.",
|
||||
["model"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
|
||||
model_deletes = Counter(
|
||||
"django_model_deletes_total",
|
||||
"Number of delete operations by model.",
|
||||
["model"],
|
||||
namespace=NAMESPACE,
|
||||
)
|
||||
|
||||
|
||||
def ExportModelOperationsMixin(model_name):
|
||||
"""Returns a mixin for models to export counters for lifecycle operations.
|
||||
|
||||
Usage:
|
||||
class User(ExportModelOperationsMixin('user'), Model):
|
||||
...
|
||||
"""
|
||||
# Force create the labels for this model in the counters. This
|
||||
# is not necessary but it avoids gaps in the aggregated data.
|
||||
model_inserts.labels(model_name)
|
||||
model_updates.labels(model_name)
|
||||
model_deletes.labels(model_name)
|
||||
|
||||
class Mixin:
|
||||
def _do_insert(self, *args, **kwargs):
|
||||
model_inserts.labels(model_name).inc()
|
||||
return super()._do_insert(*args, **kwargs)
|
||||
|
||||
def _do_update(self, *args, **kwargs):
|
||||
model_updates.labels(model_name).inc()
|
||||
return super()._do_update(*args, **kwargs)
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
model_deletes.labels(model_name).inc()
|
||||
return super().delete(*args, **kwargs)
|
||||
|
||||
Mixin.__qualname__ = f"ExportModelOperationsMixin('{model_name}')"
|
||||
return Mixin
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env python
|
||||
from django_prometheus.utils import PowersOf
|
||||
|
||||
|
||||
class TestDjangoPrometheus:
|
||||
def testPowersOf(self):
|
||||
"""Tests utils.PowersOf."""
|
||||
assert PowersOf(2, 4) == [0, 1, 2, 4, 8]
|
||||
assert PowersOf(3, 5, lower=1) == [0, 3, 9, 27, 81, 243]
|
||||
assert PowersOf(2, 4, include_zero=False) == [1, 2, 4, 8]
|
||||
assert PowersOf(2, 6, lower=2, include_zero=False) == [4, 8, 16, 32, 64, 128]
|
||||
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python
|
||||
import socket
|
||||
from unittest.mock import ANY, MagicMock, call, patch
|
||||
|
||||
from django_prometheus.exports import SetupPrometheusEndpointOnPortRange
|
||||
|
||||
|
||||
@patch("django_prometheus.exports.HTTPServer")
|
||||
def test_port_range_available(httpserver_mock):
|
||||
"""Test port range setup with an available port."""
|
||||
httpserver_mock.side_effect = [socket.error, MagicMock()]
|
||||
port_range = [8000, 8001]
|
||||
port_chosen = SetupPrometheusEndpointOnPortRange(port_range)
|
||||
assert port_chosen in port_range
|
||||
|
||||
expected_calls = [call(("", 8000), ANY), call(("", 8001), ANY)]
|
||||
assert httpserver_mock.mock_calls == expected_calls
|
||||
|
||||
|
||||
@patch("django_prometheus.exports.HTTPServer")
|
||||
def test_port_range_unavailable(httpserver_mock):
|
||||
"""Test port range setup with no available ports."""
|
||||
httpserver_mock.side_effect = [socket.error, socket.error]
|
||||
port_range = [8000, 8001]
|
||||
port_chosen = SetupPrometheusEndpointOnPortRange(port_range)
|
||||
|
||||
expected_calls = [call(("", 8000), ANY), call(("", 8001), ANY)]
|
||||
assert httpserver_mock.mock_calls == expected_calls
|
||||
assert port_chosen is None
|
||||
@@ -0,0 +1,146 @@
|
||||
#!/usr/bin/env python
|
||||
from operator import itemgetter
|
||||
|
||||
import prometheus_client
|
||||
import pytest
|
||||
|
||||
from django_prometheus.testutils import (
|
||||
assert_metric_diff,
|
||||
assert_metric_equal,
|
||||
assert_metric_no_diff,
|
||||
assert_metric_not_equal,
|
||||
get_metric,
|
||||
get_metric_from_frozen_registry,
|
||||
get_metrics_vector,
|
||||
save_registry,
|
||||
)
|
||||
|
||||
|
||||
class TestPrometheusTestCaseMixin:
|
||||
@pytest.fixture
|
||||
def registry(self):
|
||||
return prometheus_client.CollectorRegistry()
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def some_gauge(self, registry):
|
||||
some_gauge = prometheus_client.Gauge("some_gauge", "Some gauge.", registry=registry)
|
||||
some_gauge.set(42)
|
||||
return some_gauge
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def some_labelled_gauge(self, registry):
|
||||
some_labelled_gauge = prometheus_client.Gauge(
|
||||
"some_labelled_gauge",
|
||||
"Some labelled gauge.",
|
||||
["labelred", "labelblue"],
|
||||
registry=registry,
|
||||
)
|
||||
some_labelled_gauge.labels("pink", "indigo").set(1)
|
||||
some_labelled_gauge.labels("pink", "royal").set(2)
|
||||
some_labelled_gauge.labels("carmin", "indigo").set(3)
|
||||
some_labelled_gauge.labels("carmin", "royal").set(4)
|
||||
return some_labelled_gauge
|
||||
|
||||
def test_get_metric(self, registry):
|
||||
"""Tests get_metric."""
|
||||
assert get_metric("some_gauge", registry=registry) == 42
|
||||
assert (
|
||||
get_metric(
|
||||
"some_labelled_gauge",
|
||||
registry=registry,
|
||||
labelred="pink",
|
||||
labelblue="indigo",
|
||||
)
|
||||
== 1
|
||||
)
|
||||
|
||||
def test_get_metrics_vector(self, registry):
|
||||
"""Tests get_metrics_vector."""
|
||||
vector = get_metrics_vector("some_nonexistent_gauge", registry=registry)
|
||||
assert vector == []
|
||||
vector = get_metrics_vector("some_gauge", registry=registry)
|
||||
assert vector == [({}, 42)]
|
||||
vector = get_metrics_vector("some_labelled_gauge", registry=registry)
|
||||
assert sorted(
|
||||
[
|
||||
({"labelred": "pink", "labelblue": "indigo"}, 1),
|
||||
({"labelred": "pink", "labelblue": "royal"}, 2),
|
||||
({"labelred": "carmin", "labelblue": "indigo"}, 3),
|
||||
({"labelred": "carmin", "labelblue": "royal"}, 4),
|
||||
],
|
||||
key=itemgetter(1),
|
||||
) == sorted(vector, key=itemgetter(1))
|
||||
|
||||
def test_assert_metric_equal(self, registry):
|
||||
"""Tests assert_metric_equal."""
|
||||
# First we test that a scalar metric can be tested.
|
||||
assert_metric_equal(42, "some_gauge", registry=registry)
|
||||
|
||||
assert_metric_not_equal(43, "some_gauge", registry=registry)
|
||||
|
||||
# Here we test that assert_metric_equal fails on nonexistent gauges.
|
||||
assert_metric_not_equal(42, "some_nonexistent_gauge", registry=registry)
|
||||
|
||||
# Here we test that labelled metrics can be tested.
|
||||
assert_metric_equal(
|
||||
1,
|
||||
"some_labelled_gauge",
|
||||
registry=registry,
|
||||
labelred="pink",
|
||||
labelblue="indigo",
|
||||
)
|
||||
|
||||
assert_metric_not_equal(
|
||||
1,
|
||||
"some_labelled_gauge",
|
||||
registry=registry,
|
||||
labelred="tomato",
|
||||
labelblue="sky",
|
||||
)
|
||||
|
||||
def test_registry_saving(self, registry, some_gauge, some_labelled_gauge):
|
||||
"""Tests save_registry and frozen registries operations."""
|
||||
frozen_registry = save_registry(registry=registry)
|
||||
# Test that we can manipulate a frozen scalar metric.
|
||||
assert get_metric_from_frozen_registry("some_gauge", frozen_registry) == 42
|
||||
some_gauge.set(99)
|
||||
assert get_metric_from_frozen_registry("some_gauge", frozen_registry) == 42
|
||||
assert_metric_diff(frozen_registry, 99 - 42, "some_gauge", registry=registry)
|
||||
assert_metric_no_diff(frozen_registry, 1, "some_gauge", registry=registry)
|
||||
# Now test the same thing with a labelled metric.
|
||||
assert (
|
||||
get_metric_from_frozen_registry(
|
||||
"some_labelled_gauge",
|
||||
frozen_registry,
|
||||
labelred="pink",
|
||||
labelblue="indigo",
|
||||
)
|
||||
== 1
|
||||
)
|
||||
some_labelled_gauge.labels("pink", "indigo").set(5)
|
||||
assert (
|
||||
get_metric_from_frozen_registry(
|
||||
"some_labelled_gauge",
|
||||
frozen_registry,
|
||||
labelred="pink",
|
||||
labelblue="indigo",
|
||||
)
|
||||
== 1
|
||||
)
|
||||
assert_metric_diff(
|
||||
frozen_registry,
|
||||
5 - 1,
|
||||
"some_labelled_gauge",
|
||||
registry=registry,
|
||||
labelred="pink",
|
||||
labelblue="indigo",
|
||||
)
|
||||
|
||||
assert_metric_no_diff(
|
||||
frozen_registry,
|
||||
1,
|
||||
"some_labelled_gauge",
|
||||
registry=registry,
|
||||
labelred="pink",
|
||||
labelblue="indigo",
|
||||
)
|
||||
@@ -0,0 +1,199 @@
|
||||
import copy
|
||||
|
||||
from prometheus_client import REGISTRY
|
||||
|
||||
METRIC_EQUALS_ERR_EXPLANATION = """
|
||||
%s%s = %s, expected %s.
|
||||
The values for %s are:
|
||||
%s"""
|
||||
|
||||
METRIC_DIFF_ERR_EXPLANATION = """
|
||||
%s%s changed by %f, expected %f.
|
||||
Value before: %s
|
||||
Value after: %s
|
||||
"""
|
||||
|
||||
METRIC_COMPARE_ERR_EXPLANATION = """
|
||||
The change in value of %s%s didn't match the predicate.
|
||||
Value before: %s
|
||||
Value after: %s
|
||||
"""
|
||||
|
||||
METRIC_DIFF_ERR_NONE_EXPLANATION = """
|
||||
%s%s was None after.
|
||||
Value before: %s
|
||||
Value after: %s
|
||||
"""
|
||||
|
||||
|
||||
"""A collection of utilities that make it easier to write test cases
|
||||
that interact with metrics.
|
||||
"""
|
||||
|
||||
|
||||
def assert_metric_equal(expected_value, metric_name, registry=REGISTRY, **labels):
|
||||
"""Asserts that metric_name{**labels} == expected_value."""
|
||||
value = get_metric(metric_name, registry=registry, **labels)
|
||||
assert_err = METRIC_EQUALS_ERR_EXPLANATION % (
|
||||
metric_name,
|
||||
format_labels(labels),
|
||||
value,
|
||||
expected_value,
|
||||
metric_name,
|
||||
format_vector(get_metrics_vector(metric_name)),
|
||||
)
|
||||
assert expected_value == value, assert_err
|
||||
|
||||
|
||||
def assert_metric_diff(frozen_registry, expected_diff, metric_name, registry=REGISTRY, **labels):
|
||||
"""Asserts that metric_name{**labels} changed by expected_diff between
|
||||
the frozen registry and now. A frozen registry can be obtained
|
||||
by calling save_registry, typically at the beginning of a test
|
||||
case.
|
||||
"""
|
||||
saved_value = get_metric_from_frozen_registry(metric_name, frozen_registry, **labels)
|
||||
current_value = get_metric(metric_name, registry=registry, **labels)
|
||||
assert current_value is not None, METRIC_DIFF_ERR_NONE_EXPLANATION % (
|
||||
metric_name,
|
||||
format_labels(labels),
|
||||
saved_value,
|
||||
current_value,
|
||||
)
|
||||
diff = current_value - (saved_value or 0.0)
|
||||
assert_err = METRIC_DIFF_ERR_EXPLANATION % (
|
||||
metric_name,
|
||||
format_labels(labels),
|
||||
diff,
|
||||
expected_diff,
|
||||
saved_value,
|
||||
current_value,
|
||||
)
|
||||
assert expected_diff == diff, assert_err
|
||||
|
||||
|
||||
def assert_metric_no_diff(frozen_registry, expected_diff, metric_name, registry=REGISTRY, **labels):
|
||||
"""Asserts that metric_name{**labels} isn't changed by expected_diff between
|
||||
the frozen registry and now. A frozen registry can be obtained
|
||||
by calling save_registry, typically at the beginning of a test
|
||||
case.
|
||||
"""
|
||||
saved_value = get_metric_from_frozen_registry(metric_name, frozen_registry, **labels)
|
||||
current_value = get_metric(metric_name, registry=registry, **labels)
|
||||
assert current_value is not None, METRIC_DIFF_ERR_NONE_EXPLANATION % (
|
||||
metric_name,
|
||||
format_labels(labels),
|
||||
saved_value,
|
||||
current_value,
|
||||
)
|
||||
diff = current_value - (saved_value or 0.0)
|
||||
assert_err = METRIC_DIFF_ERR_EXPLANATION % (
|
||||
metric_name,
|
||||
format_labels(labels),
|
||||
diff,
|
||||
expected_diff,
|
||||
saved_value,
|
||||
current_value,
|
||||
)
|
||||
assert expected_diff != diff, assert_err
|
||||
|
||||
|
||||
def assert_metric_not_equal(expected_value, metric_name, registry=REGISTRY, **labels):
|
||||
"""Asserts that metric_name{**labels} == expected_value."""
|
||||
value = get_metric(metric_name, registry=registry, **labels)
|
||||
assert_err = METRIC_EQUALS_ERR_EXPLANATION % (
|
||||
metric_name,
|
||||
format_labels(labels),
|
||||
value,
|
||||
expected_value,
|
||||
metric_name,
|
||||
format_vector(get_metrics_vector(metric_name)),
|
||||
)
|
||||
assert expected_value != value, assert_err
|
||||
|
||||
|
||||
def assert_metric_compare(frozen_registry, predicate, metric_name, registry=REGISTRY, **labels):
|
||||
"""Asserts that metric_name{**labels} changed according to a provided
|
||||
predicate function between the frozen registry and now. A
|
||||
frozen registry can be obtained by calling save_registry,
|
||||
typically at the beginning of a test case.
|
||||
"""
|
||||
saved_value = get_metric_from_frozen_registry(metric_name, frozen_registry, **labels)
|
||||
current_value = get_metric(metric_name, registry=registry, **labels)
|
||||
assert current_value is not None, METRIC_DIFF_ERR_NONE_EXPLANATION % (
|
||||
metric_name,
|
||||
format_labels(labels),
|
||||
saved_value,
|
||||
current_value,
|
||||
)
|
||||
assert predicate(saved_value, current_value) is True, METRIC_COMPARE_ERR_EXPLANATION % (
|
||||
metric_name,
|
||||
format_labels(labels),
|
||||
saved_value,
|
||||
current_value,
|
||||
)
|
||||
|
||||
|
||||
def save_registry(registry=REGISTRY):
|
||||
"""Freezes a registry. This lets a user test changes to a metric
|
||||
instead of testing the absolute value. A typical use case looks like:
|
||||
|
||||
registry = save_registry()
|
||||
doStuff()
|
||||
assert_metric_diff(registry, 1, 'stuff_done_total')
|
||||
"""
|
||||
return copy.deepcopy(list(registry.collect()))
|
||||
|
||||
|
||||
def get_metric(metric_name, registry=REGISTRY, **labels):
|
||||
"""Gets a single metric."""
|
||||
return get_metric_from_frozen_registry(metric_name, registry.collect(), **labels)
|
||||
|
||||
|
||||
def get_metrics_vector(metric_name, registry=REGISTRY):
|
||||
"""Returns the values for all labels of a given metric.
|
||||
|
||||
The result is returned as a list of (labels, value) tuples,
|
||||
where `labels` is a dict.
|
||||
|
||||
This is quite a hack since it relies on the internal
|
||||
representation of the prometheus_client, and it should
|
||||
probably be provided as a function there instead.
|
||||
"""
|
||||
return get_metric_vector_from_frozen_registry(metric_name, registry.collect())
|
||||
|
||||
|
||||
def get_metric_vector_from_frozen_registry(metric_name, frozen_registry):
|
||||
"""Like get_metrics_vector, but from a frozen registry."""
|
||||
output = []
|
||||
for metric in frozen_registry:
|
||||
for sample in metric.samples:
|
||||
if sample[0] == metric_name:
|
||||
output.append((sample[1], sample[2]))
|
||||
return output
|
||||
|
||||
|
||||
def get_metric_from_frozen_registry(metric_name, frozen_registry, **labels):
|
||||
"""Gets a single metric from a frozen registry."""
|
||||
for metric in frozen_registry:
|
||||
for sample in metric.samples:
|
||||
if sample[0] == metric_name and sample[1] == labels:
|
||||
return sample[2]
|
||||
|
||||
|
||||
def format_labels(labels):
|
||||
"""Format a set of labels to Prometheus representation.
|
||||
|
||||
In:
|
||||
{'method': 'GET', 'port': '80'}
|
||||
|
||||
Out:
|
||||
'{method="GET",port="80"}'
|
||||
"""
|
||||
return "{{{}}}".format(",".join([f'{k}="{v}"' for k, v in labels.items()]))
|
||||
|
||||
|
||||
def format_vector(vector):
|
||||
"""Formats a list of (labels, value) where labels is a dict into a
|
||||
human-readable representation.
|
||||
"""
|
||||
return "\n".join([f"{format_labels(labels)} = {value}" for labels, value in vector])
|
||||
@@ -0,0 +1,5 @@
|
||||
from django.urls import path
|
||||
|
||||
from django_prometheus import exports
|
||||
|
||||
urlpatterns = [path("metrics", exports.ExportToDjangoView, name="prometheus-django-metrics")]
|
||||
@@ -0,0 +1,29 @@
|
||||
from timeit import default_timer
|
||||
|
||||
|
||||
def Time():
|
||||
"""Returns some representation of the current time.
|
||||
|
||||
This wrapper is meant to take advantage of a higher time
|
||||
resolution when available. Thus, its return value should be
|
||||
treated as an opaque object. It can be compared to the current
|
||||
time with TimeSince().
|
||||
"""
|
||||
return default_timer()
|
||||
|
||||
|
||||
def TimeSince(t):
|
||||
"""Compares a value returned by Time() to the current time.
|
||||
|
||||
Returns:
|
||||
the time since t, in fractional seconds.
|
||||
|
||||
"""
|
||||
return default_timer() - t
|
||||
|
||||
|
||||
def PowersOf(logbase, count, lower=0, include_zero=True):
|
||||
"""Returns a list of count powers of logbase (from logbase**lower)."""
|
||||
if not include_zero:
|
||||
return [logbase**i for i in range(lower, count + lower)]
|
||||
return [0] + [logbase**i for i in range(lower, count + lower)]
|
||||
Reference in New Issue
Block a user