diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index 328a01e..ba49a53 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -1,6 +1,5 @@ """Human readable formatter.""" - from typing import Any from dbt_score.evaluation import ModelResultsType @@ -34,9 +33,7 @@ def model_evaluated( """Callback when a model has been evaluated.""" if score.value < self._config.fail_any_model_under: self._failed_models.append((model, score)) - print( - f"{score.badge} {self.bold(model.name)} (score: {round(score.value, 1)!s})" - ) + print(f"{score.badge} {self.bold(model.name)} (score: {score.rounded_value!s})") for rule, result in results.items(): if result is None: print(f"{self.indent}{self.label_ok} {rule.source()}") @@ -51,7 +48,7 @@ def model_evaluated( def project_evaluated(self, score: Score) -> None: """Callback when a project has been evaluated.""" - print(f"Project score: {self.bold(str(round(score.value, 1)))} {score.badge}") + print(f"Project score: {self.bold(str(score.rounded_value))} {score.badge}") if len(self._failed_models) > 0: print() @@ -59,8 +56,8 @@ def project_evaluated(self, score: Score) -> None: f"Error: model score too low, fail_any_model_under = " f"{self._config.fail_any_model_under}" ) - for model, score in self._failed_models: - print(f"Model {model.name} scored {round(score.value, 1)}") + for model, model_score in self._failed_models: + print(f"Model {model.name} scored {model_score.value}") elif score.value < self._config.fail_project_under: print() diff --git a/src/dbt_score/scoring.py b/src/dbt_score/scoring.py index 60b05c3..5d80306 100644 --- a/src/dbt_score/scoring.py +++ b/src/dbt_score/scoring.py @@ -2,6 +2,7 @@ from __future__ import annotations +import math import typing from dataclasses import dataclass @@ -19,6 +20,11 @@ class Score: value: float badge: str + @property + def rounded_value(self) -> float: + """Auto-round score down to 1 decimal place.""" + return math.floor(self.value * 10) / 10 + class Scorer: """Logic for computing scores.""" diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index b4afeb1..6a3438a 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -1,6 +1,5 @@ """Unit tests for the human readable formatter.""" - from dbt_score.evaluation import ModelResultsType from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter from dbt_score.rule import RuleViolation @@ -48,6 +47,49 @@ def test_human_readable_formatter_project(capsys, default_config, manifest_loade assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n" +def test_human_readable_formatter_near_perfect_model_score( + capsys, + default_config, + manifest_loader, + model1, + rule_severity_low, + rule_severity_medium, + rule_severity_critical, +): + """Ensure the formatter has the correct output after model evaluation.""" + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) + results: ModelResultsType = { + rule_severity_low: None, + rule_severity_medium: Exception("Oh noes"), + rule_severity_critical: RuleViolation("Error"), + } + formatter.model_evaluated(model1, results, Score(9.99, "🥈")) + stdout = capsys.readouterr().out + assert ( + stdout + == """🥈 \x1B[1mmodel1\x1B[0m (score: 9.9) + \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low + \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + +""" + ) + + +def test_human_readable_formatter_near_perfect_project_score( + capsys, default_config, manifest_loader +): + """Ensure the formatter has the correct output after project evaluation.""" + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) + formatter.project_evaluated(Score(9.99, "🥈")) + stdout = capsys.readouterr().out + assert stdout == "Project score: \x1B[1m9.9\x1B[0m 🥈\n" + + def test_human_readable_formatter_low_model_score( capsys, default_config,