Skip to content

Commit

Permalink
Decompose RatingResult (#2091)
Browse files Browse the repository at this point in the history
  • Loading branch information
Kakadus authored Jan 15, 2024
1 parent faf42a6 commit 71cf5f9
Show file tree
Hide file tree
Showing 6 changed files with 61 additions and 42 deletions.
7 changes: 3 additions & 4 deletions evap/results/exporters.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from evap.evaluation.models import CourseType, Degree, Evaluation, Questionnaire
from evap.evaluation.tools import ExcelExporter
from evap.results.tools import (
RatingResult,
calculate_average_course_distribution,
calculate_average_distribution,
distribution_to_grade,
Expand Down Expand Up @@ -127,9 +128,7 @@ def filter_evaluations(semesters, evaluation_states, degrees, course_types, cont
for questionnaire_result in contribution_result.questionnaire_results:
# RatingQuestion.counts is a tuple of integers or None, if this tuple is all zero, we want to exclude it
if all(
not question_result.question.is_rating_question
or question_result.counts is None
or sum(question_result.counts) == 0
not question_result.question.is_rating_question or not RatingResult.has_answers(question_result)
for question_result in questionnaire_result.question_results
):
continue
Expand Down Expand Up @@ -266,7 +265,7 @@ def write_questionnaire(self, questionnaire, evaluations_with_results, contribut
approval_count = 0

for grade_result in results[questionnaire.id]:
if grade_result.question.id != question.id or not grade_result.has_answers:
if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result):
continue
values.append(grade_result.average * grade_result.count_sum)
count_sum += grade_result.count_sum
Expand Down
4 changes: 2 additions & 2 deletions evap/results/templates/distribution_widget.html
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{% load results_templatetags %}

{% spaceless %}
{% if question_result.has_answers %}
{% if question_result|has_answers %}
<div class="result-widget d-flex"
data-bs-toggle="tooltip" data-bs-placement="left" data-fallback-placement='["left", "bottom"]'
title="{% include 'result_widget_tooltip.html' with question_result=question_result %}"
Expand All @@ -15,7 +15,7 @@
<div class="d-flex" data-bs-toggle="tooltip" data-bs-placement="left" data-fallback-placement='["left", "bottom"]' title="{% trans 'Not enough answers were given.' %}">
{% include "distribution_with_grade_disabled.html" with icon="far fa-eye-slash" %}
<div class="badge-participants badge-disabled ms-2 ms-lg-3">
{% if question_result.is_published %}
{% if question_result|is_published %}
<span class="fas fa-user"></span> {{ question_result.count_sum }}
{% else %}
<span class="far fa-eye-slash"></span>
Expand Down
17 changes: 16 additions & 1 deletion evap/results/templatetags/results_templatetags.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
from django.template import Library

from evap.results.tools import STATES_WITH_RESULT_TEMPLATE_CACHING, get_grade_color, normalized_distribution
from evap.results.tools import (
STATES_WITH_RESULT_TEMPLATE_CACHING,
RatingResult,
get_grade_color,
normalized_distribution,
)

register = Library()

Expand All @@ -25,3 +30,13 @@ def evaluation_results_cache_timeout(evaluation):
@register.filter(name="participationclass")
def participationclass(number_of_voters, number_of_participants):
return round((number_of_voters / number_of_participants) * 10)


@register.filter
def has_answers(rating_result: RatingResult):
return RatingResult.has_answers(rating_result)


@register.filter
def is_published(rating_result: RatingResult):
return RatingResult.is_published(rating_result)
8 changes: 4 additions & 4 deletions evap/results/tests/test_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@
)
from evap.evaluation.tests.tools import make_rating_answer_counters
from evap.results.tools import (
RatingResult,
cache_results,
calculate_average_course_distribution,
calculate_average_distribution,
can_textanswer_be_seen_by,
create_rating_result,
distribution_to_grade,
get_results,
get_results_cache_key,
Expand Down Expand Up @@ -371,7 +371,7 @@ def test_result_calculation_with_no_contributor_rating_question(self):
def test_unipolarized_unipolar(self):
answer_counters = make_rating_answer_counters(self.question_likert, self.general_contribution, [5, 3, 1, 1, 0])

result = RatingResult(self.question_likert, answer_counters)
result = create_rating_result(self.question_likert, answer_counters)
distribution = unipolarized_distribution(result)
self.assertAlmostEqual(distribution[0], 0.5)
self.assertAlmostEqual(distribution[1], 0.3)
Expand All @@ -384,7 +384,7 @@ def test_unipolarized_bipolar(self):
self.question_bipolar, self.general_contribution, [0, 1, 4, 8, 2, 2, 3]
)

result = RatingResult(self.question_bipolar, answer_counters)
result = create_rating_result(self.question_bipolar, answer_counters)
distribution = unipolarized_distribution(result)
self.assertAlmostEqual(distribution[0], 0.4)
self.assertAlmostEqual(distribution[1], 0.2)
Expand All @@ -396,7 +396,7 @@ def test_unipolarized_yesno(self):
question_yesno = baker.make(Question, questionnaire=self.questionnaire, type=QuestionType.POSITIVE_YES_NO)
answer_counters = make_rating_answer_counters(question_yesno, self.general_contribution, [57, 43])

result = RatingResult(question_yesno, answer_counters)
result = create_rating_result(question_yesno, answer_counters)
distribution = unipolarized_distribution(result)
self.assertAlmostEqual(distribution[0], 0.57)
self.assertEqual(distribution[1], 0)
Expand Down
65 changes: 35 additions & 30 deletions evap/results/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from collections.abc import Iterable
from copy import copy
from math import ceil, modf
from typing import cast
from typing import TypeGuard, cast

from django.conf import settings
from django.core.cache import caches
Expand Down Expand Up @@ -41,28 +41,43 @@ def __init__(self, visible_by_contribution: Iterable[UserProfile], visible_by_de
self.visible_by_delegation_count = visible_by_delegation_count


def create_rating_result(question, answer_counters, additional_text_result=None):
if answer_counters is None:
return RatingResult(question, additional_text_result)
if any(counter.count != 0 for counter in answer_counters):
return AnsweredRatingResult(question, answer_counters, additional_text_result)
return PublishedRatingResult(question, answer_counters, additional_text_result)


class RatingResult:
def __init__(self, question, answer_counters, additional_text_result=None):
@classmethod
def is_published(cls, rating_result) -> TypeGuard["PublishedRatingResult"]:
return isinstance(rating_result, PublishedRatingResult)

@classmethod
def has_answers(cls, rating_result) -> TypeGuard["AnsweredRatingResult"]:
return isinstance(rating_result, AnsweredRatingResult)

def __init__(self, question, additional_text_result=None):
assert question.is_rating_question
self.question = discard_cached_related_objects(copy(question))
self.additional_text_result = additional_text_result

if answer_counters is not None:
counts = OrderedDict((value, 0) for value in self.choices.values if value != NO_ANSWER)
for answer_counter in answer_counters:
counts[answer_counter.answer] = answer_counter.count
self.counts = tuple(counts.values())
else:
self.counts = None

@property
def choices(self):
return CHOICES[self.question.type]


class PublishedRatingResult(RatingResult):
def __init__(self, question, answer_counters, additional_text_result=None):
super().__init__(question, additional_text_result)
counts = OrderedDict((value, 0) for value in self.choices.values if value != NO_ANSWER)
for answer_counter in answer_counters:
counts[answer_counter.answer] = answer_counter.count
self.counts = tuple(counts.values())

@property
def count_sum(self) -> int | None:
if not self.is_published:
return None
def count_sum(self) -> int:
return sum(self.counts)

@property
Expand All @@ -72,25 +87,15 @@ def minus_balance_count(self) -> float:
return (self.count_sum - portion_left) / 2

@property
def approval_count(self) -> int | None:
def approval_count(self) -> int:
assert self.question.is_yes_no_question
if not self.is_published:
return None
return self.counts[0] if self.question.is_positive_yes_no_question else self.counts[1]

@property
def average(self) -> float | None:
if not self.has_answers:
return None
return sum(grade * count for count, grade in zip(self.counts, self.choices.grades)) / self.count_sum

@property
def has_answers(self) -> bool:
return self.is_published and any(count != 0 for count in self.counts)

class AnsweredRatingResult(PublishedRatingResult):
@property
def is_published(self) -> bool:
return self.counts is not None
def average(self) -> float:
return sum(grade * count for count, grade in zip(self.counts, self.choices.grades)) / self.count_sum


class TextResult:
Expand Down Expand Up @@ -137,7 +142,7 @@ def has_answers(self) -> bool:
return True
if question.is_rating_question:
assert isinstance(question_result, RatingResult)
return question_result.has_answers
return RatingResult.has_answers(question_result)
return False


Expand All @@ -161,7 +166,7 @@ def get_single_result_rating_result(evaluation):
assert 1 <= len(answer_counters) <= 5

question = Question.objects.get(questionnaire__name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME)
return RatingResult(question, answer_counters)
return create_rating_result(question, answer_counters)


def get_results_cache_key(evaluation):
Expand Down Expand Up @@ -234,7 +239,7 @@ def _get_results_impl(evaluation: Evaluation, *, refetch_related_objects: bool =
answer_counters = racs_per_contribution_question.get((contribution.id, question.id), [])
else:
answer_counters = None
results.append(RatingResult(question, answer_counters, additional_text_result=text_result))
results.append(create_rating_result(question, answer_counters, additional_text_result=text_result))
elif question.is_text_question and evaluation.can_publish_text_results:
assert text_result is not None
results.append(text_result)
Expand Down
2 changes: 1 addition & 1 deletion evap/results/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ def add_warnings(evaluation, evaluation_result):
for rating_result in rating_results:
rating_result.warning = (
questionnaire_result.warning
or rating_result.has_answers
or RatingResult.has_answers(rating_result)
and rating_result.count_sum < questionnaire_warning_thresholds[questionnaire_result.questionnaire]
)

Expand Down

0 comments on commit 71cf5f9

Please sign in to comment.