Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Auto-round scores down to align scores & medals #74

Merged
merged 4 commits into from
Aug 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 4 additions & 7 deletions src/dbt_score/formatters/human_readable_formatter.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
"""Human readable formatter."""


from typing import Any

from dbt_score.evaluation import ModelResultsType
Expand Down Expand Up @@ -34,9 +33,7 @@ def model_evaluated(
"""Callback when a model has been evaluated."""
if score.value < self._config.fail_any_model_under:
self._failed_models.append((model, score))
print(
f"{score.badge} {self.bold(model.name)} (score: {round(score.value, 1)!s})"
)
print(f"{score.badge} {self.bold(model.name)} (score: {score.rounded_value!s})")
for rule, result in results.items():
if result is None:
print(f"{self.indent}{self.label_ok} {rule.source()}")
Expand All @@ -51,16 +48,16 @@ def model_evaluated(

def project_evaluated(self, score: Score) -> None:
"""Callback when a project has been evaluated."""
print(f"Project score: {self.bold(str(round(score.value, 1)))} {score.badge}")
print(f"Project score: {self.bold(str(score.rounded_value))} {score.badge}")

if len(self._failed_models) > 0:
print()
print(
f"Error: model score too low, fail_any_model_under = "
f"{self._config.fail_any_model_under}"
)
for model, score in self._failed_models:
print(f"Model {model.name} scored {round(score.value, 1)}")
for model, model_score in self._failed_models:
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We were redefining score here. So whilst here, I've updated this.

print(f"Model {model.name} scored {model_score.value}")

elif score.value < self._config.fail_project_under:
print()
Expand Down
6 changes: 6 additions & 0 deletions src/dbt_score/scoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

from __future__ import annotations

import math
import typing
from dataclasses import dataclass

Expand All @@ -19,6 +20,11 @@ class Score:
value: float
badge: str

@property
def rounded_value(self) -> float:
"""Auto-round score down to 1 decimal place."""
return math.floor(self.value * 10) / 10


class Scorer:
"""Logic for computing scores."""
Expand Down
44 changes: 43 additions & 1 deletion tests/formatters/test_human_readable_formatter.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
"""Unit tests for the human readable formatter."""


from dbt_score.evaluation import ModelResultsType
from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter
from dbt_score.rule import RuleViolation
Expand Down Expand Up @@ -48,6 +47,49 @@ def test_human_readable_formatter_project(capsys, default_config, manifest_loade
assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n"


def test_human_readable_formatter_near_perfect_model_score(
capsys,
default_config,
manifest_loader,
model1,
rule_severity_low,
rule_severity_medium,
rule_severity_critical,
):
"""Ensure the formatter has the correct output after model evaluation."""
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
results: ModelResultsType = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
rule_severity_critical: RuleViolation("Error"),
}
formatter.model_evaluated(model1, results, Score(9.99, "🥈"))
stdout = capsys.readouterr().out
assert (
stdout
== """🥈 \x1B[1mmodel1\x1B[0m (score: 9.9)
\x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low
\x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes
\x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error

"""
)


def test_human_readable_formatter_near_perfect_project_score(
capsys, default_config, manifest_loader
):
"""Ensure the formatter has the correct output after project evaluation."""
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
formatter.project_evaluated(Score(9.99, "🥈"))
stdout = capsys.readouterr().out
assert stdout == "Project score: \x1B[1m9.9\x1B[0m 🥈\n"


def test_human_readable_formatter_low_model_score(
capsys,
default_config,
Expand Down