Skip to content

Commit

Permalink
chore: extract module with builtin metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
albertodonato committed Jan 9, 2025
1 parent 6003044 commit 726fefb
Show file tree
Hide file tree
Showing 4 changed files with 86 additions and 74 deletions.
65 changes: 4 additions & 61 deletions query_exporter/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,51 +28,9 @@
Query,
QueryMetric,
)
from .metrics import BUILTIN_METRICS, get_builtin_metric_configs
from .yaml import load_yaml_config

# metric for counting database errors
DB_ERRORS_METRIC_NAME = "database_errors"
_DB_ERRORS_METRIC_CONFIG = MetricConfig(
name=DB_ERRORS_METRIC_NAME,
description="Number of database errors",
type="counter",
config={"increment": True},
)

# metric for counting performed queries
QUERIES_METRIC_NAME = "queries"
_QUERIES_METRIC_CONFIG = MetricConfig(
name=QUERIES_METRIC_NAME,
description="Number of database queries",
type="counter",
labels=("query", "status"),
config={"increment": True},
)
# metric for tracking last query execution timestamp
QUERY_TIMESTAMP_METRIC_NAME = "query_timestamp"
_QUERY_TIMESTAMP_METRIC_CONFIG = MetricConfig(
name=QUERY_TIMESTAMP_METRIC_NAME,
description="Query last execution timestamp",
type="gauge",
labels=("query",),
)
# metric for counting queries execution latency
QUERY_LATENCY_METRIC_NAME = "query_latency"
_QUERY_LATENCY_METRIC_CONFIG = MetricConfig(
name=QUERY_LATENCY_METRIC_NAME,
description="Query execution latency",
type="histogram",
labels=("query",),
)
GLOBAL_METRICS = frozenset(
(
DB_ERRORS_METRIC_NAME,
QUERIES_METRIC_NAME,
QUERY_LATENCY_METRIC_NAME,
QUERY_TIMESTAMP_METRIC_NAME,
)
)

# regexp for validating environment variables names
_ENV_VAR_RE = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")

Expand Down Expand Up @@ -189,22 +147,7 @@ def _get_metrics(
metrics: dict[str, dict[str, t.Any]], extra_labels: frozenset[str]
) -> dict[str, MetricConfig]:
"""Return a dict mapping metric names to their configuration."""
configs = {}
# global metrics
for metric_config in (
_DB_ERRORS_METRIC_CONFIG,
_QUERIES_METRIC_CONFIG,
_QUERY_LATENCY_METRIC_CONFIG,
_QUERY_TIMESTAMP_METRIC_CONFIG,
):
configs[metric_config.name] = MetricConfig(
metric_config.name,
metric_config.description,
metric_config.type,
labels=set(metric_config.labels) | extra_labels,
config=metric_config.config,
)
# other metrics
configs = get_builtin_metric_configs(extra_labels)
for name, config in metrics.items():
_validate_metric_config(name, config, extra_labels)
metric_type = config.pop("type")
Expand All @@ -221,7 +164,7 @@ def _validate_metric_config(
name: str, config: dict[str, t.Any], extra_labels: frozenset[str]
) -> None:
"""Validate a metric configuration stanza."""
if name in GLOBAL_METRICS:
if name in BUILTIN_METRICS:
raise ConfigError(f'Label name "{name} is reserved for builtin metric')
labels = set(config.get("labels", ()))
overlap_labels = labels & extra_labels
Expand Down Expand Up @@ -437,7 +380,7 @@ def _warn_if_unused(
entries=unused_dbs,
)
if unused_metrics := sorted(
set(config.metrics) - GLOBAL_METRICS - used_metrics
set(config.metrics) - BUILTIN_METRICS - used_metrics
):
logger.warning(
"unused config entries",
Expand Down
10 changes: 6 additions & 4 deletions query_exporter/loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,6 @@
)

from .config import (
DB_ERRORS_METRIC_NAME,
QUERIES_METRIC_NAME,
QUERY_LATENCY_METRIC_NAME,
QUERY_TIMESTAMP_METRIC_NAME,
Config,
)
from .db import (
Expand All @@ -37,6 +33,12 @@
Query,
QueryTimeoutExpired,
)
from .metrics import (
DB_ERRORS_METRIC_NAME,
QUERIES_METRIC_NAME,
QUERY_LATENCY_METRIC_NAME,
QUERY_TIMESTAMP_METRIC_NAME,
)


class MetricsLastSeen:
Expand Down
65 changes: 65 additions & 0 deletions query_exporter/metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
from prometheus_aioexporter import MetricConfig

# metric for counting database errors
DB_ERRORS_METRIC_NAME = "database_errors"
_DB_ERRORS_METRIC_CONFIG = MetricConfig(
name=DB_ERRORS_METRIC_NAME,
description="Number of database errors",
type="counter",
config={"increment": True},
)

# metric for counting performed queries
QUERIES_METRIC_NAME = "queries"
_QUERIES_METRIC_CONFIG = MetricConfig(
name=QUERIES_METRIC_NAME,
description="Number of database queries",
type="counter",
labels=("query", "status"),
config={"increment": True},
)
# metric for tracking last query execution timestamp
QUERY_TIMESTAMP_METRIC_NAME = "query_timestamp"
_QUERY_TIMESTAMP_METRIC_CONFIG = MetricConfig(
name=QUERY_TIMESTAMP_METRIC_NAME,
description="Query last execution timestamp",
type="gauge",
labels=("query",),
)
# metric for counting queries execution latency
QUERY_LATENCY_METRIC_NAME = "query_latency"
_QUERY_LATENCY_METRIC_CONFIG = MetricConfig(
name=QUERY_LATENCY_METRIC_NAME,
description="Query execution latency",
type="histogram",
labels=("query",),
)
BUILTIN_METRICS = frozenset(
(
DB_ERRORS_METRIC_NAME,
QUERIES_METRIC_NAME,
QUERY_LATENCY_METRIC_NAME,
QUERY_TIMESTAMP_METRIC_NAME,
)
)


def get_builtin_metric_configs(
extra_labels: frozenset[str],
) -> dict[str, MetricConfig]:
"""Return configuration for builtin metrics."""
return {
metric_config.name: MetricConfig(
metric_config.name,
metric_config.description,
metric_config.type,
labels=set(metric_config.labels) | extra_labels,
config=metric_config.config,
)
for metric_config in (
_DB_ERRORS_METRIC_CONFIG,
_QUERIES_METRIC_CONFIG,
_QUERY_LATENCY_METRIC_CONFIG,
_QUERY_TIMESTAMP_METRIC_CONFIG,
)
}
20 changes: 11 additions & 9 deletions tests/config_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,17 @@
import yaml

from query_exporter.config import (
DB_ERRORS_METRIC_NAME,
GLOBAL_METRICS,
QUERIES_METRIC_NAME,
ConfigError,
_get_parameters_sets,
_resolve_dsn,
load_config,
)
from query_exporter.db import QueryMetric
from query_exporter.metrics import (
BUILTIN_METRICS,
DB_ERRORS_METRIC_NAME,
QUERIES_METRIC_NAME,
)


@pytest.fixture
Expand Down Expand Up @@ -437,7 +439,7 @@ def test_load_metrics_section(self, write_config: ConfigWriter) -> None:
"states": ["on", "off"],
"expiration": 100,
}
# global metrics
# builtin metrics
assert result.metrics.get(DB_ERRORS_METRIC_NAME) is not None
assert result.metrics.get(QUERIES_METRIC_NAME) is not None

Expand Down Expand Up @@ -473,20 +475,20 @@ def test_load_metrics_overlap_database_label(
== 'Labels for metric "m" overlap with reserved/database ones: l1'
)

@pytest.mark.parametrize("global_name", list(GLOBAL_METRICS))
@pytest.mark.parametrize("builtin_metric_name", list(BUILTIN_METRICS))
def test_load_metrics_reserved_name(
self,
config_full: dict[str, t.Any],
write_config: ConfigWriter,
global_name: str,
builtin_metric_name: str,
) -> None:
config_full["metrics"][global_name] = {"type": "counter"}
config_full["metrics"][builtin_metric_name] = {"type": "counter"}
config_file = write_config(config_full)
with pytest.raises(ConfigError) as err:
load_config([config_file])
assert (
str(err.value)
== f'Label name "{global_name} is reserved for builtin metric'
== f'Label name "{builtin_metric_name} is reserved for builtin metric'
)

def test_load_metrics_unsupported_type(
Expand Down Expand Up @@ -1006,7 +1008,7 @@ def test_load_multiple_files_combine(
)
config = load_config([file1, file2])
assert set(config.databases) == {"db1", "db2"}
assert set(config.metrics) == {"m1", "m2"} | GLOBAL_METRICS
assert set(config.metrics) == {"m1", "m2"} | BUILTIN_METRICS
assert set(config.queries) == {"q1", "q2"}

def test_load_multiple_files_duplicated_database(
Expand Down

0 comments on commit 726fefb

Please sign in to comment.