From bbdfb946c0222c293752f210ac55b3cd4e820145 Mon Sep 17 00:00:00 2001 From: Jelmer Borst Date: Thu, 22 Aug 2024 11:36:24 +0200 Subject: [PATCH 1/4] Auto-round scores down --- src/dbt_score/formatters/human_readable_formatter.py | 11 ++++------- src/dbt_score/scoring.py | 5 +++++ tests/test_scoring.py | 11 +++++------ 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index 328a01e..8aa5d92 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -1,6 +1,5 @@ """Human readable formatter.""" - from typing import Any from dbt_score.evaluation import ModelResultsType @@ -34,9 +33,7 @@ def model_evaluated( """Callback when a model has been evaluated.""" if score.value < self._config.fail_any_model_under: self._failed_models.append((model, score)) - print( - f"{score.badge} {self.bold(model.name)} (score: {round(score.value, 1)!s})" - ) + print(f"{score.badge} {self.bold(model.name)} (score: {score.value!s})") for rule, result in results.items(): if result is None: print(f"{self.indent}{self.label_ok} {rule.source()}") @@ -51,7 +48,7 @@ def model_evaluated( def project_evaluated(self, score: Score) -> None: """Callback when a project has been evaluated.""" - print(f"Project score: {self.bold(str(round(score.value, 1)))} {score.badge}") + print(f"Project score: {self.bold(str(score.value))} {score.badge}") if len(self._failed_models) > 0: print() @@ -59,8 +56,8 @@ def project_evaluated(self, score: Score) -> None: f"Error: model score too low, fail_any_model_under = " f"{self._config.fail_any_model_under}" ) - for model, score in self._failed_models: - print(f"Model {model.name} scored {round(score.value, 1)}") + for model, model_score in self._failed_models: + print(f"Model {model.name} scored {model_score.value}") elif score.value < self._config.fail_project_under: print() diff --git a/src/dbt_score/scoring.py b/src/dbt_score/scoring.py index 60b05c3..80f627d 100644 --- a/src/dbt_score/scoring.py +++ b/src/dbt_score/scoring.py @@ -2,6 +2,7 @@ from __future__ import annotations +import math import typing from dataclasses import dataclass @@ -19,6 +20,10 @@ class Score: value: float badge: str + def __post_init__(self) -> None: + """Auto-round score down to 1 decimal place.""" + self.value = math.floor(self.value * 10) / 10 + class Scorer: """Logic for computing scores.""" diff --git a/tests/test_scoring.py b/tests/test_scoring.py index e47a493..05ea459 100644 --- a/tests/test_scoring.py +++ b/tests/test_scoring.py @@ -1,6 +1,5 @@ """Unit tests for the scoring module.""" - from dbt_score.rule import RuleViolation from dbt_score.scoring import Score, Scorer @@ -18,7 +17,7 @@ def test_scorer_model_severity_low(default_config, rule_severity_low): assert scorer.score_model({rule_severity_low: Exception()}).value == 10.0 assert ( round(scorer.score_model({rule_severity_low: RuleViolation("error")}).value, 2) - == 6.67 + == 6.6 ) @@ -31,7 +30,7 @@ def test_scorer_model_severity_medium(default_config, rule_severity_medium): round( scorer.score_model({rule_severity_medium: RuleViolation("error")}).value, 2 ) - == 3.33 + == 3.3 ) @@ -83,7 +82,7 @@ def test_scorer_model_multiple_rules( ).value, 2, ) - == 6.67 + == 6.6 ) assert ( @@ -97,7 +96,7 @@ def test_scorer_model_multiple_rules( ).value, 2, ) - == 7.78 + == 7.7 ) assert ( @@ -111,7 +110,7 @@ def test_scorer_model_multiple_rules( ).value, 2, ) - == 8.89 + == 8.8 ) From 19c0637b33d612429bd350ea13dd0250973cbb9d Mon Sep 17 00:00:00 2001 From: Jelmer Borst Date: Thu, 22 Aug 2024 14:42:37 +0200 Subject: [PATCH 2/4] Only round-down for human output --- .../formatters/human_readable_formatter.py | 4 +- src/dbt_score/scoring.py | 5 +- .../test_human_readable_formatter.py | 66 +++++++++++++++---- tests/test_scoring.py | 11 ++-- 4 files changed, 65 insertions(+), 21 deletions(-) diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index 8aa5d92..4cf8350 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -33,7 +33,7 @@ def model_evaluated( """Callback when a model has been evaluated.""" if score.value < self._config.fail_any_model_under: self._failed_models.append((model, score)) - print(f"{score.badge} {self.bold(model.name)} (score: {score.value!s})") + print(f"{score.badge} {self.bold(model.name)} (score: {score.human_value!s})") for rule, result in results.items(): if result is None: print(f"{self.indent}{self.label_ok} {rule.source()}") @@ -48,7 +48,7 @@ def model_evaluated( def project_evaluated(self, score: Score) -> None: """Callback when a project has been evaluated.""" - print(f"Project score: {self.bold(str(score.value))} {score.badge}") + print(f"Project score: {self.bold(str(score.human_value))} {score.badge}") if len(self._failed_models) > 0: print() diff --git a/src/dbt_score/scoring.py b/src/dbt_score/scoring.py index 80f627d..d6be694 100644 --- a/src/dbt_score/scoring.py +++ b/src/dbt_score/scoring.py @@ -20,9 +20,10 @@ class Score: value: float badge: str - def __post_init__(self) -> None: + @property + def human_value(self) -> float: """Auto-round score down to 1 decimal place.""" - self.value = math.floor(self.value * 10) / 10 + return math.floor(self.value * 10) / 10 class Scorer: diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index b4afeb1..5f6c256 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -1,6 +1,5 @@ """Unit tests for the human readable formatter.""" - from dbt_score.evaluation import ModelResultsType from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter from dbt_score.rule import RuleViolation @@ -29,10 +28,10 @@ def test_human_readable_formatter_model( stdout = capsys.readouterr().out assert ( stdout - == """🥇 \x1B[1mmodel1\x1B[0m (score: 10.0) - \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low - \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes - \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + == """🥇 \x1b[1mmodel1\x1b[0m (score: 10.0) + \x1b[1;32mOK \x1b[0m tests.conftest.rule_severity_low + \x1b[1;31mERR \x1b[0m tests.conftest.rule_severity_medium: Oh noes + \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error """ ) @@ -45,7 +44,50 @@ def test_human_readable_formatter_project(capsys, default_config, manifest_loade ) formatter.project_evaluated(Score(10.0, "🥇")) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n" + assert stdout == "Project score: \x1b[1m10.0\x1b[0m 🥇\n" + + +def test_human_readable_formatter_near_perfect_model_score( + capsys, + default_config, + manifest_loader, + model1, + rule_severity_low, + rule_severity_medium, + rule_severity_critical, +): + """Ensure the formatter has the correct output after model evaluation.""" + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) + results: ModelResultsType = { + rule_severity_low: None, + rule_severity_medium: Exception("Oh noes"), + rule_severity_critical: RuleViolation("Error"), + } + formatter.model_evaluated(model1, results, Score(9.99, "🥈")) + stdout = capsys.readouterr().out + assert ( + stdout + == """🥈 \x1b[1mmodel1\x1b[0m (score: 9.9) + \x1b[1;32mOK \x1b[0m tests.conftest.rule_severity_low + \x1b[1;31mERR \x1b[0m tests.conftest.rule_severity_medium: Oh noes + \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error + +""" + ) + + +def test_human_readable_formatter_near_perfect_project_score( + capsys, default_config, manifest_loader +): + """Ensure the formatter has the correct output after project evaluation.""" + formatter = HumanReadableFormatter( + manifest_loader=manifest_loader, config=default_config + ) + formatter.project_evaluated(Score(9.99, "🥈")) + stdout = capsys.readouterr().out + assert stdout == "Project score: \x1b[1m9.9\x1b[0m 🥈\n" def test_human_readable_formatter_low_model_score( @@ -68,10 +110,10 @@ def test_human_readable_formatter_low_model_score( print() assert ( stdout - == """🚧 \x1B[1mmodel1\x1B[0m (score: 0.0) - \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + == """🚧 \x1b[1mmodel1\x1b[0m (score: 0.0) + \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error -Project score: \x1B[1m0.0\x1B[0m 🚧 +Project score: \x1b[1m0.0\x1b[0m 🚧 Error: model score too low, fail_any_model_under = 5.0 Model model1 scored 0.0 @@ -99,10 +141,10 @@ def test_human_readable_formatter_low_project_score( print() assert ( stdout - == """🥇 \x1B[1mmodel1\x1B[0m (score: 10.0) - \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error + == """🥇 \x1b[1mmodel1\x1b[0m (score: 10.0) + \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error -Project score: \x1B[1m0.0\x1B[0m 🚧 +Project score: \x1b[1m0.0\x1b[0m 🚧 Error: project score too low, fail_project_under = 5.0 """ diff --git a/tests/test_scoring.py b/tests/test_scoring.py index 05ea459..e47a493 100644 --- a/tests/test_scoring.py +++ b/tests/test_scoring.py @@ -1,5 +1,6 @@ """Unit tests for the scoring module.""" + from dbt_score.rule import RuleViolation from dbt_score.scoring import Score, Scorer @@ -17,7 +18,7 @@ def test_scorer_model_severity_low(default_config, rule_severity_low): assert scorer.score_model({rule_severity_low: Exception()}).value == 10.0 assert ( round(scorer.score_model({rule_severity_low: RuleViolation("error")}).value, 2) - == 6.6 + == 6.67 ) @@ -30,7 +31,7 @@ def test_scorer_model_severity_medium(default_config, rule_severity_medium): round( scorer.score_model({rule_severity_medium: RuleViolation("error")}).value, 2 ) - == 3.3 + == 3.33 ) @@ -82,7 +83,7 @@ def test_scorer_model_multiple_rules( ).value, 2, ) - == 6.6 + == 6.67 ) assert ( @@ -96,7 +97,7 @@ def test_scorer_model_multiple_rules( ).value, 2, ) - == 7.7 + == 7.78 ) assert ( @@ -110,7 +111,7 @@ def test_scorer_model_multiple_rules( ).value, 2, ) - == 8.8 + == 8.89 ) From 68deecd9f5125837bac91c64884d8ca8654ecef0 Mon Sep 17 00:00:00 2001 From: Jelmer Borst Date: Thu, 22 Aug 2024 16:18:39 +0200 Subject: [PATCH 3/4] Revert lowercase hex --- .../test_human_readable_formatter.py | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index 5f6c256..6a3438a 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -28,10 +28,10 @@ def test_human_readable_formatter_model( stdout = capsys.readouterr().out assert ( stdout - == """🥇 \x1b[1mmodel1\x1b[0m (score: 10.0) - \x1b[1;32mOK \x1b[0m tests.conftest.rule_severity_low - \x1b[1;31mERR \x1b[0m tests.conftest.rule_severity_medium: Oh noes - \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error + == """🥇 \x1B[1mmodel1\x1B[0m (score: 10.0) + \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low + \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error """ ) @@ -44,7 +44,7 @@ def test_human_readable_formatter_project(capsys, default_config, manifest_loade ) formatter.project_evaluated(Score(10.0, "🥇")) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1b[1m10.0\x1b[0m 🥇\n" + assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n" def test_human_readable_formatter_near_perfect_model_score( @@ -69,10 +69,10 @@ def test_human_readable_formatter_near_perfect_model_score( stdout = capsys.readouterr().out assert ( stdout - == """🥈 \x1b[1mmodel1\x1b[0m (score: 9.9) - \x1b[1;32mOK \x1b[0m tests.conftest.rule_severity_low - \x1b[1;31mERR \x1b[0m tests.conftest.rule_severity_medium: Oh noes - \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error + == """🥈 \x1B[1mmodel1\x1B[0m (score: 9.9) + \x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low + \x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error """ ) @@ -87,7 +87,7 @@ def test_human_readable_formatter_near_perfect_project_score( ) formatter.project_evaluated(Score(9.99, "🥈")) stdout = capsys.readouterr().out - assert stdout == "Project score: \x1b[1m9.9\x1b[0m 🥈\n" + assert stdout == "Project score: \x1B[1m9.9\x1B[0m 🥈\n" def test_human_readable_formatter_low_model_score( @@ -110,10 +110,10 @@ def test_human_readable_formatter_low_model_score( print() assert ( stdout - == """🚧 \x1b[1mmodel1\x1b[0m (score: 0.0) - \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error + == """🚧 \x1B[1mmodel1\x1B[0m (score: 0.0) + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error -Project score: \x1b[1m0.0\x1b[0m 🚧 +Project score: \x1B[1m0.0\x1B[0m 🚧 Error: model score too low, fail_any_model_under = 5.0 Model model1 scored 0.0 @@ -141,10 +141,10 @@ def test_human_readable_formatter_low_project_score( print() assert ( stdout - == """🥇 \x1b[1mmodel1\x1b[0m (score: 10.0) - \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error + == """🥇 \x1B[1mmodel1\x1B[0m (score: 10.0) + \x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error -Project score: \x1b[1m0.0\x1b[0m 🚧 +Project score: \x1B[1m0.0\x1B[0m 🚧 Error: project score too low, fail_project_under = 5.0 """ From 8d87686f76019838aee1752db96b3468f8309449 Mon Sep 17 00:00:00 2001 From: Jelmer Borst Date: Thu, 22 Aug 2024 16:20:46 +0200 Subject: [PATCH 4/4] Rename property --- src/dbt_score/formatters/human_readable_formatter.py | 4 ++-- src/dbt_score/scoring.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index 4cf8350..ba49a53 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -33,7 +33,7 @@ def model_evaluated( """Callback when a model has been evaluated.""" if score.value < self._config.fail_any_model_under: self._failed_models.append((model, score)) - print(f"{score.badge} {self.bold(model.name)} (score: {score.human_value!s})") + print(f"{score.badge} {self.bold(model.name)} (score: {score.rounded_value!s})") for rule, result in results.items(): if result is None: print(f"{self.indent}{self.label_ok} {rule.source()}") @@ -48,7 +48,7 @@ def model_evaluated( def project_evaluated(self, score: Score) -> None: """Callback when a project has been evaluated.""" - print(f"Project score: {self.bold(str(score.human_value))} {score.badge}") + print(f"Project score: {self.bold(str(score.rounded_value))} {score.badge}") if len(self._failed_models) > 0: print() diff --git a/src/dbt_score/scoring.py b/src/dbt_score/scoring.py index d6be694..5d80306 100644 --- a/src/dbt_score/scoring.py +++ b/src/dbt_score/scoring.py @@ -21,7 +21,7 @@ class Score: badge: str @property - def human_value(self) -> float: + def rounded_value(self) -> float: """Auto-round score down to 1 decimal place.""" return math.floor(self.value * 10) / 10