diff --git a/openassessment/__init__.py b/openassessment/__init__.py index 9eca73b743..23ecb7c46a 100644 --- a/openassessment/__init__.py +++ b/openassessment/__init__.py @@ -2,4 +2,4 @@ Initialization Information for Open Assessment Module """ -__version__ = '6.0.34' +__version__ = '6.1.0' diff --git a/openassessment/assessment/score_type_constants.py b/openassessment/assessment/score_type_constants.py index f9bf13a002..0da1b88b87 100644 --- a/openassessment/assessment/score_type_constants.py +++ b/openassessment/assessment/score_type_constants.py @@ -1,5 +1,25 @@ """ Constant strings used to identify what type of score an Assessment is. Used in the 'score_type' field """ +from django.utils.translation import gettext as _ + PEER_TYPE = "PE" SELF_TYPE = "SE" STAFF_TYPE = "ST" + + +def score_type_to_string(score_type: str) -> str: + """ + Converts the given score type into its string representation. + + Args: + score_type (str): System representation of the score type. + + Returns: + (str) Representation of score_type as needed in Staff Grader Template. + """ + SCORE_TYPE_MAP = { + PEER_TYPE: _("Peer"), + SELF_TYPE: _("Self"), + STAFF_TYPE: _("Staff"), + } + return SCORE_TYPE_MAP.get(score_type, _("Unknown")) diff --git a/openassessment/conf/locale/en/LC_MESSAGES/django.po b/openassessment/conf/locale/en/LC_MESSAGES/django.po index 2b244c3755..892087ea25 100644 --- a/openassessment/conf/locale/en/LC_MESSAGES/django.po +++ b/openassessment/conf/locale/en/LC_MESSAGES/django.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: edx-ora2\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-02-08 13:43+0000\n" +"POT-Creation-Date: 2024-02-16 19:47-0500\n" "PO-Revision-Date: 2014-06-04 15:41-0400\n" "Last-Translator: Muhammad Ayub khan \n" "Language-Team: openedx-translation \n" @@ -49,99 +49,120 @@ msgstr "" msgid "Example {example_number} is missing an option for \"{criterion_name}\"" msgstr "" -#: data.py:534 +#: assessment/score_type_constants.py:21 xblock/grade_mixin.py:524 +msgid "Peer" +msgstr "" + +#: assessment/score_type_constants.py:22 +msgid "Self" +msgstr "" + +#: assessment/score_type_constants.py:23 +msgid "Staff" +msgstr "" + +#: assessment/score_type_constants.py:25 +msgid "Unknown" +msgstr "" + +#: data.py:584 #, python-brace-format msgid "Criterion {number}: {label}" msgstr "" -#: data.py:536 +#: data.py:586 #, python-brace-format msgid "Points {number}" msgstr "" -#: data.py:537 +#: data.py:587 #, python-brace-format msgid "Median Score {number}" msgstr "" -#: data.py:538 +#: data.py:588 #, python-brace-format msgid "Feedback {number}" msgstr "" -#: data.py:867 +#: data.py:917 msgid "Item ID" msgstr "" -#: data.py:868 +#: data.py:918 msgid "Submission ID" msgstr "" -#: data.py:880 +#: data.py:930 msgid "Anonymized Student ID" msgstr "" -#: data.py:911 +#: data.py:961 msgid "Assessment ID" msgstr "" -#: data.py:912 +#: data.py:962 msgid "Assessment Scored Date" msgstr "" -#: data.py:913 +#: data.py:963 msgid "Assessment Scored Time" msgstr "" -#: data.py:914 +#: data.py:964 msgid "Assessment Type" msgstr "" -#: data.py:915 +#: data.py:965 msgid "Anonymous Scorer Id" msgstr "" -#: data.py:917 +#: data.py:967 #: templates/legacy/staff_area/oa_student_info_assessment_detail.html:59 msgid "Overall Feedback" msgstr "" -#: data.py:918 +#: data.py:968 msgid "Assessment Score Earned" msgstr "" -#: data.py:919 +#: data.py:969 msgid "Assessment Scored At" msgstr "" -#: data.py:920 +#: data.py:970 msgid "Date/Time Final Score Given" msgstr "" -#: data.py:921 +#: data.py:971 msgid "Final Score Earned" msgstr "" -#: data.py:922 +#: data.py:972 msgid "Final Score Possible" msgstr "" -#: data.py:923 +#: data.py:973 msgid "Feedback Statements Selected" msgstr "" -#: data.py:924 +#: data.py:974 msgid "Feedback on Assessment" msgstr "" -#: data.py:926 +#: data.py:976 msgid "Response Files" msgstr "" -#: data.py:1317 +#: data.py:1367 msgid "No description provided." msgstr "" +#: data.py:1625 templates/legacy/edit/oa_edit_criterion.html:54 +#: xblock/studio_mixin.py:57 +msgid "None" +msgstr "" + #: templates/legacy/edit/oa_edit.html:28 msgid "Save" msgstr "" @@ -343,10 +364,6 @@ msgstr "" msgid "Feedback for This Criterion" msgstr "" -#: templates/legacy/edit/oa_edit_criterion.html:54 xblock/studio_mixin.py:57 -msgid "None" -msgstr "" - #: templates/legacy/edit/oa_edit_criterion.html:55 xblock/studio_mixin.py:56 msgid "Optional" msgstr "" @@ -1075,8 +1092,8 @@ msgstr "" msgid "" "\n" " This is a team assignment for team-set \"%(teamset_name)s\".\n" -" You are currently not on a team in team-set \"%(teamset_name)s" -"\".\n" +" You are currently not on a team in team-set " +"\"%(teamset_name)s\".\n" " You must be on a team in team-set \"%(teamset_name)s\" to access " "this team assignment.\n" " " @@ -1176,8 +1193,8 @@ msgid "" "\n" " \n" +"%(time_until)s)\" data-timezone=\"%(user_timezone)s\" data-" +"language=\"%(user_language)s\">\n" " " msgstr "" @@ -1188,8 +1205,8 @@ msgid "" "\n" " \n" +"%(time_until)s)\" data-timezone=\"%(user_timezone)s\" data-" +"language=\"%(user_language)s\">\n" " " msgstr "" @@ -1313,10 +1330,10 @@ msgstr "" #, python-format, python-brace-format msgid "" "\n" -" \n" +" \n" " " msgstr "" @@ -1325,10 +1342,10 @@ msgstr "" #, python-format, python-brace-format msgid "" "\n" -" \n" +" \n" " " msgstr "" @@ -1553,8 +1570,9 @@ msgstr "" #: templates/legacy/response/oa_response.html:314 msgid "" "\n" -" Learn more about team assignments here: (link)\n" " " msgstr "" @@ -1690,10 +1708,10 @@ msgstr "" #, python-format, python-brace-format msgid "" "\n" -" \n" +" \n" " " msgstr "" @@ -1702,10 +1720,10 @@ msgstr "" #, python-format, python-brace-format msgid "" "\n" -" \n" +" \n" " " msgstr "" @@ -2122,8 +2140,8 @@ msgid "" "\n" " \n" +"%(time_until)s)\" data-timezone=\"%(user_timezone)s\" data-" +"language=\"%(user_language)s\">\n" " " msgstr "" @@ -2260,10 +2278,6 @@ msgstr "" msgid "Waiting for peer reviews" msgstr "" -#: xblock/grade_mixin.py:524 -msgid "Peer" -msgstr "" - #: xblock/grade_mixin.py:655 msgid "The grade for this problem is determined by your Staff Grade." msgstr "" diff --git a/openassessment/conf/locale/en/LC_MESSAGES/djangojs.po b/openassessment/conf/locale/en/LC_MESSAGES/djangojs.po index bde9be3427..307f845b31 100644 --- a/openassessment/conf/locale/en/LC_MESSAGES/djangojs.po +++ b/openassessment/conf/locale/en/LC_MESSAGES/djangojs.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: edx-ora2\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-02-08 13:43+0000\n" +"POT-Creation-Date: 2024-02-16 19:47-0500\n" "PO-Revision-Date: 2014-06-04 15:41-0400\n" "Last-Translator: Muhammad Ayub khan \n" "Language-Team: openedx-translation \n" @@ -145,66 +145,90 @@ msgid "Demo the new Grading Experience" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:97 msgid "Unit Name" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:98 msgid "Units" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:105 msgid "Assessment" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:106 msgid "Assessments" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:113 +#: xblock/static/js/src/lms/oa_course_items_listing.js:114 msgid "Total Responses" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:121 +#: xblock/static/js/src/lms/oa_course_items_listing.js:122 msgid "Training" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:129 +#: xblock/static/js/src/lms/oa_course_items_listing.js:130 msgid "Peer" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:137 +#: xblock/static/js/src/lms/oa_course_items_listing.js:138 msgid "Self" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:145 +#: xblock/static/js/src/lms/oa_course_items_listing.js:146 msgid "Waiting" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:153 +#: xblock/static/js/src/lms/oa_course_items_listing.js:154 msgid "Staff" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:161 +#: xblock/static/js/src/lms/oa_course_items_listing.js:162 msgid "Final Grade Received" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:169 msgid "Staff Grader" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:200 msgid "List of Open Assessments is unavailable" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:302 +#: xblock/static/js/src/lms/oa_course_items_listing.js:353 msgid "Please wait" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:326 msgid "Block view is unavailable" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:319 +#: xblock/static/js/src/lms/oa_course_items_listing.js:338 msgid "Back to Full List" msgstr "" @@ -374,6 +398,7 @@ msgid "Error getting the number of ungraded responses" msgstr "" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js:377 +#: xblock/static/js/src/lms/oa_staff_area.js:538 msgid "" "If you leave this page without submitting your staff assessment, you will " "lose any work you have done." diff --git a/openassessment/conf/locale/eo/LC_MESSAGES/django.mo b/openassessment/conf/locale/eo/LC_MESSAGES/django.mo index 3dfc689288..9680a247a5 100644 Binary files a/openassessment/conf/locale/eo/LC_MESSAGES/django.mo and b/openassessment/conf/locale/eo/LC_MESSAGES/django.mo differ diff --git a/openassessment/conf/locale/eo/LC_MESSAGES/django.po b/openassessment/conf/locale/eo/LC_MESSAGES/django.po index 1a1ecb9c1e..68a7ce5241 100644 --- a/openassessment/conf/locale/eo/LC_MESSAGES/django.po +++ b/openassessment/conf/locale/eo/LC_MESSAGES/django.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: edx-ora2\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-02-08 13:43+0000\n" +"POT-Creation-Date: 2024-02-16 19:47-0500\n" "PO-Revision-Date: 2014-06-04 15:41-0400\n" "Last-Translator: Muhammad Ayub khan \n" "Language-Team: openedx-translation \n" @@ -67,6 +67,22 @@ msgstr "" "Éxämplé {example_number} ïs mïssïng än öptïön för \"{criterion_name}\" " "Ⱡ'σяєм ιρѕυм ∂σłσя ѕιт αмєт, ¢σηѕє¢тєтυя #" +#: assessment/score_type_constants.py xblock/grade_mixin.py +msgid "Peer" +msgstr "Péér Ⱡ'σяєм ι#" + +#: assessment/score_type_constants.py +msgid "Self" +msgstr "Sélf Ⱡ'σяєм ι#" + +#: assessment/score_type_constants.py +msgid "Staff" +msgstr "Stäff Ⱡ'σяєм ιρѕ#" + +#: assessment/score_type_constants.py +msgid "Unknown" +msgstr "Ûnknöwn Ⱡ'σяєм ιρѕυм #" + #: data.py #, python-brace-format msgid "Criterion {number}: {label}" @@ -159,6 +175,10 @@ msgstr "Réspönsé Fïlés Ⱡ'σяєм ιρѕυм ∂σłσя ѕιт#" msgid "No description provided." msgstr "Nö désçrïptïön prövïdéd. Ⱡ'σяєм ιρѕυм ∂σłσя ѕιт αмєт, ¢ση#" +#: data.py templates/legacy/edit/oa_edit_criterion.html xblock/studio_mixin.py +msgid "None" +msgstr "Nöné Ⱡ'σяєм ι#" + #: templates/legacy/edit/oa_edit.html msgid "Save" msgstr "Sävé Ⱡ'σяєм ι#" @@ -409,10 +429,6 @@ msgstr "Àdd Öptïön Ⱡ'σяєм ιρѕυм ∂σłσ#" msgid "Feedback for This Criterion" msgstr "Féédßäçk för Thïs Çrïtérïön Ⱡ'σяєм ιρѕυм ∂σłσя ѕιт αмєт, ¢σηѕє#" -#: templates/legacy/edit/oa_edit_criterion.html xblock/studio_mixin.py -msgid "None" -msgstr "Nöné Ⱡ'σяєм ι#" - #: templates/legacy/edit/oa_edit_criterion.html xblock/studio_mixin.py msgid "Optional" msgstr "Öptïönäl Ⱡ'σяєм ιρѕυм ∂#" @@ -2844,10 +2860,6 @@ msgstr "Ýöür Çömménts Ⱡ'σяєм ιρѕυм ∂σłσя ѕι#" msgid "Waiting for peer reviews" msgstr "Wäïtïng för péér révïéws Ⱡ'σяєм ιρѕυм ∂σłσя ѕιт αмєт, ¢ση#" -#: xblock/grade_mixin.py -msgid "Peer" -msgstr "Péér Ⱡ'σяєм ι#" - #: xblock/grade_mixin.py msgid "The grade for this problem is determined by your Staff Grade." msgstr "" diff --git a/openassessment/conf/locale/eo/LC_MESSAGES/djangojs.po b/openassessment/conf/locale/eo/LC_MESSAGES/djangojs.po index f732378f1a..ae0a65906f 100644 --- a/openassessment/conf/locale/eo/LC_MESSAGES/djangojs.po +++ b/openassessment/conf/locale/eo/LC_MESSAGES/djangojs.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: edx-ora2\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-02-08 13:43+0000\n" +"POT-Creation-Date: 2024-02-16 19:47-0500\n" "PO-Revision-Date: 2014-06-04 15:41-0400\n" "Last-Translator: Muhammad Ayub khan \n" "Language-Team: openedx-translation \n" @@ -167,68 +167,92 @@ msgid "Demo the new Grading Experience" msgstr "Démö thé néw Grädïng Éxpérïénçé Ⱡ'σяєм ιρѕυм ∂σłσя ѕιт αмєт, ¢σηѕє¢т#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Unit Name" msgstr "Ûnït Nämé Ⱡ'σяєм ιρѕυм ∂σł#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Units" msgstr "Ûnïts Ⱡ'σяєм ιρѕ#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Assessment" msgstr "Àsséssmént Ⱡ'σяєм ιρѕυм ∂σłσ#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Assessments" msgstr "Àsséssménts Ⱡ'σяєм ιρѕυм ∂σłσя #" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Total Responses" msgstr "Tötäl Réspönsés Ⱡ'σяєм ιρѕυм ∂σłσя ѕιт α#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Training" msgstr "Träïnïng Ⱡ'σяєм ιρѕυм ∂#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Peer" msgstr "Péér Ⱡ'σяєм ι#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Self" msgstr "Sélf Ⱡ'σяєм ι#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Waiting" msgstr "Wäïtïng Ⱡ'σяєм ιρѕυм #" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Staff" msgstr "Stäff Ⱡ'σяєм ιρѕ#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Final Grade Received" msgstr "Fïnäl Grädé Réçéïvéd Ⱡ'σяєм ιρѕυм ∂σłσя ѕιт αмєт, #" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Staff Grader" msgstr "Stäff Grädér Ⱡ'σяєм ιρѕυм ∂σłσя ѕ#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "List of Open Assessments is unavailable" msgstr "" "Lïst öf Öpén Àsséssménts ïs ünäväïläßlé Ⱡ'σяєм ιρѕυм ∂σłσя ѕιт αмєт, " "¢σηѕє¢тєтυя#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Please wait" msgstr "Pléäsé wäït Ⱡ'σяєм ιρѕυм ∂σłσя #" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Block view is unavailable" msgstr "Blöçk vïéw ïs ünäväïläßlé Ⱡ'σяєм ιρѕυм ∂σłσя ѕιт αмєт, ¢σηѕ#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_course_items_listing.js msgid "Back to Full List" msgstr "Bäçk tö Füll Lïst Ⱡ'σяєм ιρѕυм ∂σłσя ѕιт αмє#" @@ -437,6 +461,7 @@ msgstr "" "¢σηѕє¢тєтυя α#" #: xblock/static/dist/openassessment-lms.370a944c6f75b9557efa.js +#: xblock/static/js/src/lms/oa_staff_area.js msgid "" "If you leave this page without submitting your staff assessment, you will " "lose any work you have done." diff --git a/openassessment/data.py b/openassessment/data.py index 2659893ab7..2f116f5467 100644 --- a/openassessment/data.py +++ b/openassessment/data.py @@ -11,16 +11,19 @@ import os from urllib.parse import urljoin from zipfile import ZipFile +from typing import List, Set from django.conf import settings from django.contrib.auth import get_user_model -from django.db.models import CharField, F, OuterRef, Subquery +from django.db.models import CharField, F, OuterRef, Subquery, QuerySet from django.db.models.functions import Coalesce from django.utils.translation import gettext as _ import requests +from submissions.models import Submission from submissions import api as sub_api from submissions.errors import SubmissionNotFoundError +from openassessment.assessment.score_type_constants import score_type_to_string from openassessment.fileupload.exceptions import FileUploadInternalError from openassessment.runtime_imports.classes import import_block_structure_transformers, import_external_id from openassessment.runtime_imports.functions import get_course_blocks, modulestore @@ -29,6 +32,7 @@ from openassessment.fileupload.api import get_download_url from openassessment.workflow.models import AssessmentWorkflow, TeamAssessmentWorkflow + logger = logging.getLogger(__name__) @@ -97,6 +101,52 @@ def map_anonymized_ids_to_usernames(anonymized_ids): return anonymous_id_to_username_mapping +def map_anonymized_ids_to_user_data(anonymized_ids: Set[str]) -> dict: + """ + Map anonymized user IDs to user data. + + Retrieves user data such as email, username, and fullname associated + with the provided anonymized user IDs. + + Args: + anonymized_ids (Set[str]): Set of anonymized user ids. + + Returns: + dict: A dictionary mapping anonymized user IDs to user data. + Each key is an anonymized user ID, and its corresponding + value is a dictionary containing: + + - email (str): The email address of the user. + - username (str): The username of the user. + - fullname (str): The full name of the user. + + Example: + { + "" : { + "email": "john@doe.com" + "username": "johndoe" + "fullname": "John Doe" + } + } + """ + User = get_user_model() + + users = _use_read_replica( + User.objects.filter(anonymoususerid__anonymous_user_id__in=anonymized_ids) + .select_related("profile") + .annotate(anonymous_id=F("anonymoususerid__anonymous_user_id")) + ).values("username", "email", "profile__name", "anonymous_id") + + anonymous_id_to_user_info_mapping = { + user["anonymous_id"]: { + "username": user["username"], + "email": user["email"], + "fullname": user["profile__name"] + } for user in users + } + return anonymous_id_to_user_info_mapping + + class CsvWriter: """ Dump openassessment data to CSV files. @@ -1556,3 +1606,110 @@ def get_file_uploads(self, missing_blank=False, generate_urls=False): files.append(file_upload) self.file_uploads = files return self.file_uploads + + +def parts_summary(assessment: Assessment) -> List[dict]: + """ + Retrieves a summary of the parts from a given assessment object. + + Args: + assessment (Asessment): Assessment object. + + Returns: + List[dict]: A list containing assessment parts summary data dictionaries. + """ + return [ + { + "criterion_name": part.criterion.name, + "score_earned": part.points_earned, + "score_type": part.option.name if part.option else _("None"), + } + for part in assessment.parts.all() + ] + + +def generate_assessment_data(assessments: QuerySet[Assessment]) -> List[dict]: + """ + Creates the list of Assessment's data dictionaries. + + Args: + assessments (QuerySet[Assessment]): Assessment objects queryset. + + Returns: + List[dict]: A list containing assessment data dictionaries. + """ + # Fetch the user data we need in a single query + user_data_mapping = map_anonymized_ids_to_user_data( + {assessment.scorer_id for assessment in assessments} + ) + + # Prefetch the related data needed to generate this report + assessments = assessments.prefetch_related("parts").prefetch_related("rubric") + + assessment_data_list = [] + for assessment in assessments: + + scorer = user_data_mapping.get(assessment.scorer_id, {}) + + assessment_data_list.append({ + "assessment_id": str(assessment.pk), + "scorer_name": scorer.get("fullname") or "", + "scorer_username": scorer.get("username") or "", + "scorer_email": scorer.get("email") or "", + "assessment_date": str(assessment.scored_at), + "assessment_scores": parts_summary(assessment), + "problem_step": score_type_to_string(assessment.score_type), + "feedback": assessment.feedback or "" + }) + return assessment_data_list + + +def generate_assessment_from_data(submission_uuid: str) -> List[dict]: + """ + Generates a list of assessments received by a user based + on the submission UUID in an ORA assignment. + + Args: + submission_uuid (str): The UUID of the submission. + + Returns: + List[dict]: A list containing assessment data dictionaries. + """ + assessments = _use_read_replica( + Assessment.objects.filter(submission_uuid=submission_uuid) + ) + return generate_assessment_data(assessments) + + +def generate_assessment_to_data(item_id: str, submission_uuid: str) -> List[dict]: + """ + Generates a list of assessments given by a user based + on the item ID and submission UUID in an ORA assignment. + + Args: + item_id (str): The ID of the item (block id) + submission_uuid (str): The UUID of the submission. + + Returns: + List[dict]: A list containing assessment data dictionaries. + """ + scorer_submission = sub_api.get_submission_and_student(submission_uuid) + if not scorer_submission: + return [] + + scorer_id = scorer_submission["student_item"]["student_id"] + + submissions = _use_read_replica( + Submission.objects.filter(student_item__item_id=item_id).values("uuid") + ) + + if not submissions: + return [] + + submission_uuids = [sub["uuid"] for sub in submissions] + + assessments_made_by_student = _use_read_replica( + Assessment.objects.filter(scorer_id=scorer_id, submission_uuid__in=submission_uuids) + ) + + return generate_assessment_data(assessments_made_by_student) diff --git a/openassessment/staffgrader/serializers/submission_list.py b/openassessment/staffgrader/serializers/submission_list.py index 37c7944eb0..3a5afa3233 100644 --- a/openassessment/staffgrader/serializers/submission_list.py +++ b/openassessment/staffgrader/serializers/submission_list.py @@ -30,7 +30,9 @@ class Meta: 'gradedBy', 'username', 'teamName', - 'score' + 'score', + "email", + "fullname", ] read_only_fields = fields @@ -40,17 +42,23 @@ class Meta: CONTEXT_ANON_ID_TO_USERNAME = 'anonymous_id_to_username' CONTEXT_SUB_TO_ASSESSMENT = 'submission_uuid_to_assessment' CONTEXT_SUB_TO_ANON_ID = 'submission_uuid_to_student_id' + CONTEXT_ANON_ID_TO_EMAIL = "anonymous_id_to_email" + CONTEXT_ANON_ID_TO_FULLNAME = "anonymous_id_to_fullname" def _verify_required_context(self, context): """Verify that required individual or team context is present for serialization""" context_keys = set(context.keys()) # Required context for individual submissions - required_context = set([ - self.CONTEXT_ANON_ID_TO_USERNAME, - self.CONTEXT_SUB_TO_ASSESSMENT, - self.CONTEXT_SUB_TO_ANON_ID - ]) + required_context = set( + [ + self.CONTEXT_ANON_ID_TO_USERNAME, + self.CONTEXT_SUB_TO_ASSESSMENT, + self.CONTEXT_SUB_TO_ANON_ID, + self.CONTEXT_ANON_ID_TO_EMAIL, + self.CONTEXT_ANON_ID_TO_FULLNAME, + ] + ) missing_context = required_context - context_keys if missing_context: @@ -70,6 +78,8 @@ def __init__(self, *args, **kwargs): username = serializers.SerializerMethodField() teamName = serializers.SerializerMethodField() score = serializers.SerializerMethodField() + email = serializers.SerializerMethodField() + fullname = serializers.SerializerMethodField() def _get_username_from_context(self, anonymous_user_id): try: @@ -85,6 +95,22 @@ def _get_anonymous_id_from_context(self, submission_uuid): f"No submitter anonymous user id found for submission uuid {submission_uuid}" ) from e + def _get_email_from_context(self, anonymous_user_id): + try: + return self.context[self.CONTEXT_ANON_ID_TO_EMAIL][anonymous_user_id] + except KeyError as e: + raise MissingContextException( + f"Email not found for anonymous user id {anonymous_user_id}" + ) from e + + def _get_fullname_from_context(self, anonymous_user_id): + try: + return self.context[self.CONTEXT_ANON_ID_TO_FULLNAME][anonymous_user_id] + except KeyError as e: + raise MissingContextException( + f"fullname not found for anonymous user id {anonymous_user_id}" + ) from e + def get_dateGraded(self, workflow): return str(workflow.grading_completed_at) @@ -99,6 +125,16 @@ def get_username(self, workflow): self._get_anonymous_id_from_context(workflow.identifying_uuid) ) + def get_email(self, workflow): + return self._get_email_from_context( + self._get_anonymous_id_from_context(workflow.identifying_uuid) + ) + + def get_fullname(self, workflow): + return self._get_fullname_from_context( + self._get_anonymous_id_from_context(workflow.identifying_uuid) + ) + def get_teamName(self, workflow): # pylint: disable=unused-argument # For individual submissions, this is intentionally empty return None @@ -123,12 +159,16 @@ class TeamSubmissionListSerializer(SubmissionListSerializer): CONTEXT_SUB_TO_ASSESSMENT = 'submission_uuid_to_assessment' CONTEXT_SUB_TO_TEAM_ID = 'team_submission_uuid_to_team_id' CONTEXT_TEAM_ID_TO_TEAM_NAME = 'team_id_to_team_name' + CONTEXT_ANON_ID_TO_EMAIL = "anonymous_id_to_email" + CONTEXT_ANON_ID_TO_FULLNAME = "anonymous_id_to_fullname" REQUIRED_CONTEXT_KEYS = [ CONTEXT_ANON_ID_TO_USERNAME, CONTEXT_SUB_TO_ASSESSMENT, CONTEXT_SUB_TO_TEAM_ID, CONTEXT_TEAM_ID_TO_TEAM_NAME, + CONTEXT_ANON_ID_TO_EMAIL, + CONTEXT_ANON_ID_TO_FULLNAME, ] def _verify_required_context(self, context): @@ -160,6 +200,14 @@ def get_username(self, workflow): # pylint: disable=unused-argument # For team submissions, this is intentionally empty return None + def get_email(self, workflow): # pylint: disable=unused-argument + # For team submissions, this is intentionally empty + return None + + def get_fullname(self, workflow): # pylint: disable=unused-argument + # For team submissions, this is intentionally empty + return None + def get_teamName(self, workflow): return self._get_team_name_from_context( self._get_team_id_from_context(workflow.identifying_uuid) diff --git a/openassessment/staffgrader/staff_grader_mixin.py b/openassessment/staffgrader/staff_grader_mixin.py index f581914d28..75b90fe2d0 100644 --- a/openassessment/staffgrader/staff_grader_mixin.py +++ b/openassessment/staffgrader/staff_grader_mixin.py @@ -2,6 +2,7 @@ API endpoints for enhanced staff grader """ from functools import wraps +from typing import List import logging from django.db.models import Case, OuterRef, Prefetch, Subquery, Value, When @@ -15,7 +16,13 @@ from openassessment.assessment.errors.staff import StaffAssessmentError from openassessment.assessment.models.base import Assessment, AssessmentPart from openassessment.assessment.models.staff import StaffWorkflow, TeamStaffWorkflow -from openassessment.data import map_anonymized_ids_to_usernames, OraSubmissionAnswerFactory, VersionNotFoundException +from openassessment.data import ( + OraSubmissionAnswerFactory, + VersionNotFoundException, + map_anonymized_ids_to_user_data, + generate_assessment_from_data, + generate_assessment_to_data +) from openassessment.staffgrader.errors.submission_lock import SubmissionLockContestedError from openassessment.staffgrader.models.submission_lock import SubmissionGradingLock from openassessment.staffgrader.serializers import ( @@ -194,6 +201,39 @@ def list_staff_workflows(self, data, suffix=''): # pylint: disable=unused-argum log.exception("Failed to serialize workflow %d: %s", staff_workflow.id, str(e), exc_info=True) return result + @XBlock.json_handler + @require_course_staff("STUDENT_GRADE") + def list_assessments_to(self, data: dict, suffix="") -> List[dict]: # pylint: disable=unused-argument + """ + List the assessments given by an user (according to + the submission_uuid) in an ORA assignment. + + Args: + data (dict): Contains the necessary information to fetch the assessments. + - item_id (str): The ID of the xblock/item. + - submission_uuid (str): The UUID of the submission. + + Returns: + List[dict]: Representing a list of assessments' data. + """ + return generate_assessment_to_data(data["item_id"], data["submission_uuid"]) + + @XBlock.json_handler + @require_course_staff("STUDENT_GRADE") + def list_assessments_from(self, data: dict, suffix="") -> List[dict]: # pylint: disable=unused-argument + """ + List the assessments received by an user (according to + the submission_uuid) in an ORA assignment. + + Args: + data (dict): Contains the necessary information to fetch the assessments. + - submission_uuid (str): The UUID of the submission. + + Returns: + List[dict]: Representing a list of assessments' data. + """ + return generate_assessment_from_data(data["submission_uuid"]) + def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assignment=False): """ Fetch additional required data and models to serialize the response @@ -206,6 +246,9 @@ def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assign workflow_scorer_ids.add(workflow.scorer_id) course_id = self.get_student_item_dict()['course_id'] + context = {} + all_anonymous_ids = set(workflow_scorer_ids) + # Fetch user identifier mappings if is_team_assignment: # Look up the team IDs for submissions so we can later map to team names @@ -214,39 +257,39 @@ def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assign # Look up names for teams topic_id = self.selected_teamset_id team_id_to_team_name = self.teams_service.get_team_names(course_id, topic_id) - - # Do bulk lookup for scorer anonymous ids (submitting team name is a separate lookup) - anonymous_id_to_username = map_anonymized_ids_to_usernames(set(workflow_scorer_ids)) - - context = { + context.update({ 'team_submission_uuid_to_team_id': team_submission_uuid_to_team_id, 'team_id_to_team_name': team_id_to_team_name, - } + }) else: # When we look up usernames we want to include all connected learner student ids submission_uuid_to_student_id = get_student_ids_by_submission_uuid( course_id, submission_uuids, ) + context['submission_uuid_to_student_id'] = submission_uuid_to_student_id + all_anonymous_ids |= set(submission_uuid_to_student_id.values()) - # Do bulk lookup for all anonymous ids (submitters and scoreres). This is used for the - # `gradedBy` and `username` fields - anonymous_id_to_username = map_anonymized_ids_to_usernames( - set(submission_uuid_to_student_id.values()) | workflow_scorer_ids - ) + anonymous_id_to_user_data = map_anonymized_ids_to_user_data(all_anonymous_ids) - context = { - 'submission_uuid_to_student_id': submission_uuid_to_student_id, - } + anonymous_id_to_username, anonymous_id_to_email, anonymous_id_to_fullname = {}, {}, {} + for anonymous_id, user_data in anonymous_id_to_user_data.items(): + anonymous_id_to_username[anonymous_id] = user_data["username"] + anonymous_id_to_email[anonymous_id] = user_data["email"] + anonymous_id_to_fullname[anonymous_id] = user_data["fullname"] # Do a bulk fetch of the assessments linked to the workflows, including all connected # Rubric, Criteria, and Option models submission_uuid_to_assessment = self.bulk_deep_fetch_assessments(staff_workflows) - context.update({ - 'anonymous_id_to_username': anonymous_id_to_username, - 'submission_uuid_to_assessment': submission_uuid_to_assessment, - }) + context.update( + { + "anonymous_id_to_username": anonymous_id_to_username, + "anonymous_id_to_email": anonymous_id_to_email, + "anonymous_id_to_fullname": anonymous_id_to_fullname, + "submission_uuid_to_assessment": submission_uuid_to_assessment, + } + ) return context diff --git a/openassessment/staffgrader/tests/test_list_staff_workflows.py b/openassessment/staffgrader/tests/test_list_staff_workflows.py index 7d2dff05e9..cef2949023 100644 --- a/openassessment/staffgrader/tests/test_list_staff_workflows.py +++ b/openassessment/staffgrader/tests/test_list_staff_workflows.py @@ -49,7 +49,7 @@ TEST_START_DATE = SUBMITTED_DATE + timedelta(days=2) POINTS_POSSIBLE = 6 -TestUser = namedtuple("TestUser", ['username', 'student_id', 'submission']) +TestUser = namedtuple("TestUser", ['username', 'email', 'fullname', 'student_id', 'submission']) TestTeam = namedtuple("TestTeam", ['team_name', 'team_id', 'member_ids', 'team_submission']) MockAnnotatedStaffWorkflow = namedtuple("MockAnnotatedStaffWorkflow", EXPECTED_ANNOTATED_WORKFLOW_FIELDS) @@ -89,12 +89,23 @@ def setUpTestData(cls): test_user.student_id: test_user.username for test_user in cls.students + cls.course_staff } + cls.student_id_to_user_data_map = { + test_user.student_id: { + 'username': test_user.username, + 'email': test_user.email, + 'fullname': test_user.fullname, + } + for test_user in cls.students + cls.course_staff + } # These are just values that are going to be used several times, so also calculate them and store them now cls.submission_uuids = {student.submission['uuid'] for student in cls.students} @classmethod def _create_test_user(cls, identifier, user_type, create_submission=True): - """ Create a TestUser, a namedtuple with a student_id, username, and potentially a submission """ + """ + Create a TestUser, a namedtuple with a student_id, username, email, + fullname and potentially a submission + """ student_id = f"SWLV_{user_type}_{identifier}_student_id" if create_submission: student_item = cls._student_item(student_id) @@ -104,6 +115,8 @@ def _create_test_user(cls, identifier, user_type, create_submission=True): submission = None return TestUser( username=f"SWLV_{user_type}_{identifier}_username", + email=f"SWLV_{user_type}_{identifier}_email", + fullname=f"SWLV_{user_type}_{identifier}_fullname", student_id=student_id, submission=submission, ) @@ -154,6 +167,19 @@ def _mock_map_anonymized_ids_to_usernames(self): ) as patched_map: yield patched_map + @contextmanager + def _mock_map_anonymized_ids_to_user_data(self): + """ + Context manager that patches map_anonymized_ids_to_user_data and + returns a mapping from student IDs to a dictionary containing + username, email, and fullname. + """ + with patch( + 'openassessment.staffgrader.staff_grader_mixin.map_anonymized_ids_to_user_data', + return_value=self.student_id_to_user_data_map + ) as patched_map: + yield patched_map + def submit_staff_assessment(self, xblock, student, grader, option, option_2=None): """ Helper method to submit a staff assessment @@ -228,6 +254,8 @@ def add_expected_response_dict( 'gradingStatus': 'ungraded' if not date_graded else 'graded', 'lockStatus': lock_status, 'username': student.username if not team else None, + 'email': student.email if not team else None, + 'fullname': student.fullname if not team else None, 'teamName': team.team_name if team else None, 'score': score, } @@ -292,7 +320,7 @@ class StaffWorkflowListViewIntegrationTests(TestStaffWorkflowListViewBase): def test_no_grades_or_locks(self, xblock): """ Test for the result of calling the view for an ORA with no grades or locks""" self.set_staff_user(xblock) - with self._mock_map_anonymized_ids_to_usernames(): + with self._mock_map_anonymized_ids_to_user_data(): response = self.request(xblock, 'list_staff_workflows', json.dumps({}), response_format='json') expected_response = {} for student in self.students: @@ -307,7 +335,7 @@ def test_graded(self, xblock): self.setup_completed_assessments(xblock, grading_config) self.set_staff_user(xblock) - with self._mock_map_anonymized_ids_to_usernames(): + with self._mock_map_anonymized_ids_to_user_data(): response = self.request(xblock, 'list_staff_workflows', json.dumps({}), response_format='json') expected = {} @@ -326,7 +354,7 @@ def test_locked(self, xblock): self.setup_active_locks(lock_config) self.set_staff_user(xblock) - with self._mock_map_anonymized_ids_to_usernames(): + with self._mock_map_anonymized_ids_to_user_data(): response = self.request(xblock, 'list_staff_workflows', json.dumps({}), response_format='json') expected = {} @@ -423,7 +451,7 @@ def test_teams(self, xblock, mock_get_team_ids_by_submission): mock_get_team_ids_by_submission.return_value = self.team_ids_by_submission_id # pylint: disable=unused-argument, protected-access xblock.runtime._services['teams'] = Mock(get_team_names=lambda a, b: self.team_names_by_team_id) - with self._mock_map_anonymized_ids_to_usernames(): + with self._mock_map_anonymized_ids_to_user_data(): response = self.request(xblock, 'list_staff_workflows', "{}", response_format='response') response_body = json.loads(response.body.decode('utf-8')) @@ -601,20 +629,20 @@ def test_bulk_fetch_annotated_staff_workflows(self, xblock, set_up_grades, set_u def test_get_list_workflows_serializer_context(self, xblock): """ Unit test for _get_list_workflows_serializer_context """ self.set_staff_user(xblock) - # Set up the mock return_value for bulk_deep_fetch_assessments. - # submissions 0 and 3 are the only ones assessed + mock_staff_workflows = [ Mock(scorer_id=self.course_staff[1].student_id), Mock(assessment=None, scorer_id=None), Mock(assessment=None, scorer_id=None), Mock(scorer_id=self.course_staff[2].student_id), ] + with self._mock_get_student_ids_by_submission_uuid() as mock_get_student_ids: - # with self._mock_get_team_ids_by_team_submission_uuid() as mock_get_team_ids: - with self._mock_map_anonymized_ids_to_usernames() as mock_map_ids: + with self._mock_map_anonymized_ids_to_user_data() as mock_map_data: with patch.object(xblock, 'bulk_deep_fetch_assessments') as mock_bulk_fetch_assessments: - # pylint: disable=protected-access - context = xblock._get_list_workflows_serializer_context(mock_staff_workflows) + context = xblock._get_list_workflows_serializer_context( # pylint: disable=protected-access + mock_staff_workflows + ) mock_get_student_ids.assert_called_once_with( self.course_id, @@ -627,12 +655,14 @@ def test_get_list_workflows_serializer_context(self, xblock): expected_anonymous_id_lookups.update( {self.course_staff[1].student_id, self.course_staff[2].student_id} ) - mock_map_ids.assert_called_once_with(expected_anonymous_id_lookups) + mock_map_data.assert_called_once_with(expected_anonymous_id_lookups) mock_bulk_fetch_assessments.assert_called_once_with(mock_staff_workflows) expected_context = { 'submission_uuid_to_student_id': mock_get_student_ids.return_value, - 'anonymous_id_to_username': mock_map_ids.return_value, + 'anonymous_id_to_username': {k: v["username"] for k, v in mock_map_data.return_value.items()}, + 'anonymous_id_to_email': {k: v["email"] for k, v in mock_map_data.return_value.items()}, + 'anonymous_id_to_fullname': {k: v["fullname"] for k, v in mock_map_data.return_value.items()}, 'submission_uuid_to_assessment': mock_bulk_fetch_assessments.return_value, } diff --git a/openassessment/staffgrader/tests/test_serializers.py b/openassessment/staffgrader/tests/test_serializers.py index 0f7a9b0296..6a62320bcd 100644 --- a/openassessment/staffgrader/tests/test_serializers.py +++ b/openassessment/staffgrader/tests/test_serializers.py @@ -117,6 +117,16 @@ def mock_get_username(self): with patch.object(SubmissionListSerializer, 'get_username', return_value='get_username'): yield + @contextmanager + def mock_get_email(self): + with patch.object(SubmissionListSerializer, 'get_email', return_value='get_email'): + yield + + @contextmanager + def mock_get_fullname(self): + with patch.object(SubmissionListSerializer, 'get_fullname', return_value='get_fullname'): + yield + @contextmanager def mock_get_teamName(self): with patch.object(SubmissionListSerializer, 'get_teamName', return_value='get_teamName'): @@ -133,12 +143,25 @@ def mock_verify_required_context(self): yield @contextmanager - def mock_serializer_methods(self, gradedBy=False, username=False, teamName=False, score=False, verify=False): + def mock_serializer_methods( + self, + gradedBy=False, + username=False, + email=False, + fullname=False, + teamName=False, + score=False, + verify=False, + ): with ExitStack() as stack: if gradedBy: stack.enter_context(self.mock_get_gradedBy()) if username: stack.enter_context(self.mock_get_username()) + if email: + stack.enter_context(self.mock_get_email()) + if fullname: + stack.enter_context(self.mock_get_fullname()) if teamName: stack.enter_context(self.mock_get_teamName()) if score: @@ -149,7 +172,9 @@ def mock_serializer_methods(self, gradedBy=False, username=False, teamName=False def test_serializer(self): mock_workflow = Mock() - with self.mock_serializer_methods(gradedBy=True, username=True, teamName=True, score=True, verify=True): + with self.mock_serializer_methods( + gradedBy=True, username=True, email=True, fullname=True, teamName=True, score=True, verify=True + ): result = SubmissionListSerializer(mock_workflow).data self.assertDictEqual( result, @@ -161,6 +186,8 @@ def test_serializer(self): 'lockStatus': str(mock_workflow.lock_status), 'gradedBy': 'get_gradedBy', 'username': 'get_username', + 'email': 'get_email', + 'fullname': 'get_fullname', 'teamName': 'get_teamName', 'score': 'get_score', } @@ -175,7 +202,7 @@ def test_get_gradedBy(self, has_scorer_id): else: mock_workflow.scorer_id = None - with self.mock_serializer_methods(username=True, score=True, verify=True): + with self.mock_serializer_methods(username=True, email=True, fullname=True, score=True, verify=True): result = SubmissionListSerializer( mock_workflow, context={ @@ -197,7 +224,7 @@ def test_get_score(self, has_assessment): if has_assessment: mock_submission_uuid_to_assessment[mock_workflow.identifying_uuid] = mock_assessment - with self.mock_serializer_methods(gradedBy=True, username=True, verify=True): + with self.mock_serializer_methods(gradedBy=True, username=True, email=True, fullname=True, verify=True): with patch( 'openassessment.staffgrader.serializers.submission_list.SubmissionListScoreSerializer' ) as mock_score_serializer: @@ -219,7 +246,7 @@ def test_get_username(self): # mock_workflow.identifying_uuid = str(mock_workflow.identifying_uuid) student_id, username = 'test_student_id', 'test_username' - with self.mock_serializer_methods(gradedBy=True, score=True, verify=True): + with self.mock_serializer_methods(gradedBy=True, email=True, fullname=True, score=True, verify=True): result = SubmissionListSerializer( mock_workflow, context={ @@ -230,11 +257,45 @@ def test_get_username(self): self.assertEqual(result['username'], username) + def test_get_email(self): + mock_workflow = Mock() + # mock_workflow.identifying_uuid = str(mock_workflow.identifying_uuid) + student_id, email = 'test_student_id', 'test_email' + + with self.mock_serializer_methods(gradedBy=True, username=True, fullname=True, score=True, verify=True): + result = SubmissionListSerializer( + mock_workflow, + context={ + 'submission_uuid_to_student_id': {mock_workflow.identifying_uuid: student_id}, + 'anonymous_id_to_email': {student_id: email} + } + ).data + + self.assertEqual(result['email'], email) + + def test_get_fullname(self): + mock_workflow = Mock() + # mock_workflow.identifying_uuid = str(mock_workflow.identifying_uuid) + student_id, fullname = 'test_student_id', 'test_fullname' + + with self.mock_serializer_methods(gradedBy=True, username=True, email=True, score=True, verify=True): + result = SubmissionListSerializer( + mock_workflow, + context={ + 'submission_uuid_to_student_id': {mock_workflow.identifying_uuid: student_id}, + 'anonymous_id_to_fullname': {student_id: fullname} + } + ).data + + self.assertEqual(result['fullname'], fullname) + def test_get_teamName(self): mock_workflow = Mock() student_id, username = 'test_student_id', 'test_username' - with self.mock_serializer_methods(gradedBy=True, username=True, score=True, verify=True): + with self.mock_serializer_methods( + gradedBy=True, username=True, email=True, fullname=True, score=True, verify=True + ): result = SubmissionListSerializer( mock_workflow, context={ @@ -265,6 +326,16 @@ def test_integration(self): f'student_id_{i}': f'username_{i}' for i in range(3) } + # Simple mapping of student_id_n to email_n + anonymous_id_to_email = { + f'student_id_{i}': f'email_{i}' + for i in range(3) + } + # Simple mapping of student_id_n to fullname_n + anonymous_id_to_fullname = { + f'student_id_{i}': f'fullname_{i}' + for i in range(3) + } # also include usernames for the scorers of the first two workflows anonymous_id_to_username[workflows[0].scorer_id] = 'staff_username_1' anonymous_id_to_username[workflows[1].scorer_id] = 'staff_username_2' @@ -280,6 +351,8 @@ def test_integration(self): context={ 'submission_uuid_to_student_id': submission_uuid_to_student_id, 'anonymous_id_to_username': anonymous_id_to_username, + 'anonymous_id_to_email': anonymous_id_to_email, + 'anonymous_id_to_fullname': anonymous_id_to_fullname, 'submission_uuid_to_assessment': submission_uuid_to_assessment, }, many=True @@ -299,6 +372,8 @@ def test_integration(self): 'pointsEarned': 10, 'pointsPossible': 20, }, + 'email': 'email_0', + 'fullname': 'fullname_0', }), OrderedDict({ 'submissionUuid': str(workflows[1].submission_uuid), @@ -313,6 +388,8 @@ def test_integration(self): 'pointsEarned': 7, 'pointsPossible': 20, }, + 'email': 'email_1', + 'fullname': 'fullname_1', }), OrderedDict({ 'submissionUuid': str(workflows[2].submission_uuid), @@ -324,6 +401,8 @@ def test_integration(self): 'username': 'username_2', 'teamName': None, 'score': {}, + 'email': 'email_2', + 'fullname': 'fullname_2', }) ] @@ -346,6 +425,16 @@ def mock_get_username(self): with patch.object(TeamSubmissionListSerializer, 'get_username', return_value='get_username'): yield + @contextmanager + def mock_get_email(self): + with patch.object(TeamSubmissionListSerializer, 'get_email', return_value='get_email'): + yield + + @contextmanager + def mock_get_fullname(self): + with patch.object(TeamSubmissionListSerializer, 'get_fullname', return_value='get_fullname'): + yield + @contextmanager def mock_get_teamName(self): with patch.object(TeamSubmissionListSerializer, 'get_teamName', return_value='get_teamName'): @@ -362,12 +451,18 @@ def mock_verify_required_context(self): yield @contextmanager - def mock_serializer_methods(self, gradedBy=False, username=False, teamName=False, score=False, verify=False): + def mock_serializer_methods( + self, gradedBy=False, username=False, email=False, fullname=False, teamName=False, score=False, verify=False + ): with ExitStack() as stack: if gradedBy: stack.enter_context(self.mock_get_gradedBy()) if username: stack.enter_context(self.mock_get_username()) + if email: + stack.enter_context(self.mock_get_email()) + if fullname: + stack.enter_context(self.mock_get_fullname()) if teamName: stack.enter_context(self.mock_get_teamName()) if score: @@ -392,7 +487,9 @@ def test_missing_context(self, key_to_remove): def test_serializer(self): """Test connections between serializer fields and underlying functions""" mock_workflow = Mock() - with self.mock_serializer_methods(gradedBy=True, username=True, teamName=True, score=True, verify=True): + with self.mock_serializer_methods( + gradedBy=True, username=True, email=True, fullname=True, teamName=True, score=True, verify=True + ): result = TeamSubmissionListSerializer(mock_workflow).data self.assertDictEqual( result, @@ -404,6 +501,8 @@ def test_serializer(self): 'lockStatus': str(mock_workflow.lock_status), 'gradedBy': 'get_gradedBy', 'username': 'get_username', + 'email': 'get_email', + 'fullname': 'get_fullname', 'teamName': 'get_teamName', 'score': 'get_score', } @@ -425,6 +524,38 @@ def test_get_username(self): # Username should be null for team submissions self.assertEqual(result['username'], None) + def test_get_email(self): + mock_workflow = Mock() + student_id, email = 'test_student_id', 'test_email' + + with self.mock_serializer_methods(teamName=True, gradedBy=True, score=True, verify=True): + result = TeamSubmissionListSerializer( + mock_workflow, + context={ + 'submission_uuid_to_student_id': {mock_workflow.identifying_uuid: student_id}, + 'anonymous_id_to_email': {student_id: email} + } + ).data + + # Email should be null for team submissions + self.assertEqual(result['email'], None) + + def test_get_fullname(self): + mock_workflow = Mock() + student_id, fullname = 'test_student_id', 'test_fullname' + + with self.mock_serializer_methods(teamName=True, gradedBy=True, score=True, verify=True): + result = TeamSubmissionListSerializer( + mock_workflow, + context={ + 'submission_uuid_to_student_id': {mock_workflow.identifying_uuid: student_id}, + 'anonymous_id_to_fullname': {student_id: fullname} + } + ).data + + # Fullname should be null for team submissions + self.assertEqual(result['fullname'], None) + def test_get_teamName(self): mock_workflow = Mock() team_id, team_name = 'test_team_id', 'test_team_name' @@ -471,6 +602,9 @@ def test_integration(self): # Anonymous id to username used only for scorer ids anonymous_id_to_username = {} + anonymous_id_to_email = {} + anonymous_id_to_fullname = {} + anonymous_id_to_username[workflows[0].scorer_id] = 'staff_username_1' anonymous_id_to_username[workflows[1].scorer_id] = 'staff_username_2' @@ -484,6 +618,8 @@ def test_integration(self): workflows, context={ 'anonymous_id_to_username': anonymous_id_to_username, + 'anonymous_id_to_email': anonymous_id_to_email, + 'anonymous_id_to_fullname': anonymous_id_to_fullname, 'submission_uuid_to_assessment': submission_uuid_to_assessment, 'team_submission_uuid_to_team_id': team_submission_uuid_to_team_id, 'team_id_to_team_name': team_id_to_team_name, @@ -505,6 +641,8 @@ def test_integration(self): 'pointsEarned': 10, 'pointsPossible': 20, }, + 'email': None, + 'fullname': None, }), OrderedDict({ 'submissionUuid': str(workflows[1].team_submission_uuid), @@ -519,6 +657,8 @@ def test_integration(self): 'pointsEarned': 7, 'pointsPossible': 20, }, + 'email': None, + 'fullname': None, }), OrderedDict({ 'submissionUuid': str(workflows[2].team_submission_uuid), @@ -530,6 +670,8 @@ def test_integration(self): 'username': None, 'teamName': 'Team name 2', 'score': {}, + 'email': None, + 'fullname': None, }) ] diff --git a/openassessment/staffgrader/tests/test_staff_grader_mixin.py b/openassessment/staffgrader/tests/test_staff_grader_mixin.py index 10a112f9ae..9addc4486a 100644 --- a/openassessment/staffgrader/tests/test_staff_grader_mixin.py +++ b/openassessment/staffgrader/tests/test_staff_grader_mixin.py @@ -2,17 +2,22 @@ Tests for Staff Grader mixin """ import copy -from datetime import timedelta import json +from datetime import timedelta +from http import HTTPStatus from unittest.mock import Mock, patch from uuid import uuid4 from freezegun import freeze_time -from openassessment.assessment.errors.staff import StaffAssessmentError +from openassessment.assessment.errors.staff import StaffAssessmentError from openassessment.staffgrader.models.submission_lock import SubmissionGradingLock from openassessment.tests.factories import UserFactory -from openassessment.xblock.test.base import XBlockHandlerTestCase, scenario, STAFF_GOOD_ASSESSMENT +from openassessment.xblock.test.base import ( + STAFF_GOOD_ASSESSMENT, + XBlockHandlerTestCase, + scenario, +) @freeze_time("1969-07-20T22:56:00-04:00") @@ -231,6 +236,68 @@ def test_batch_delete_submission_locks_empty(self, xblock): self.assertEqual(response.status_code, 200) self.assertIsNone(response_body) + @patch("openassessment.staffgrader.staff_grader_mixin.generate_assessment_from_data") + @scenario("data/basic_scenario.xml", user_id="staff") + def test_list_assessments_from(self, xblock, assessment_from_data_mock: Mock): + """ List assessments returns received assessments """ + xblock.xmodule_runtime = Mock(user_is_staff=True, anonymous_student_id=self.staff_user_id) + assessments = { + "assessments": [ + {"id_assessment": "1"}, + {"id_assessment": "2"} + ] + } + submission_uuid = "test_submission_uuid" + assessment_from_data_mock.return_value = assessments + request_data = { + "submission_uuid": submission_uuid + } + + response = self.request( + xblock, + "list_assessments_from", + json.dumps(request_data), + response_format="response", + ) + response_body = json.loads(response.body.decode('utf-8')) + + assessment_from_data_mock.assert_called_once_with(submission_uuid) + self.assertEqual(response.status_code, HTTPStatus.OK) + self.assertIsInstance(response_body, dict) + self.assertEqual(response_body, assessments) + + @patch("openassessment.staffgrader.staff_grader_mixin.generate_assessment_to_data") + @scenario("data/basic_scenario.xml", user_id="staff") + def test_list_assessments_to(self, xblock, assessment_to_data_mock: Mock): + """ List assessments returns given assessments """ + xblock.xmodule_runtime = Mock(user_is_staff=True, anonymous_student_id=self.staff_user_id) + assessments = { + "assessments": [ + {"id_assessment": "1"}, + {"id_assessment": "2"} + ] + } + submission_uuid = "test_submission_uuid" + item_id = "test_item_id" + assessment_to_data_mock.return_value = assessments + request_data = { + "item_id": item_id, + "submission_uuid": submission_uuid + } + + response = self.request( + xblock, + "list_assessments_to", + json.dumps(request_data), + response_format="response", + ) + response_body = json.loads(response.body.decode('utf-8')) + + assessment_to_data_mock.assert_called_once_with(item_id, submission_uuid) + self.assertEqual(response.status_code, HTTPStatus.OK) + self.assertIsInstance(response_body, dict) + self.assertEqual(response_body, assessments) + @scenario('data/basic_scenario.xml', user_id="staff") def test_batch_delete_submission_locks_bad_param(self, xblock): """ Batch delete fails if submission_uuids is not a list """ diff --git a/openassessment/tests/test_data.py b/openassessment/tests/test_data.py index 35748f16d3..0b7508d7d7 100644 --- a/openassessment/tests/test_data.py +++ b/openassessment/tests/test_data.py @@ -9,7 +9,8 @@ import json import os.path import zipfile -from unittest.mock import call, Mock, patch +from typing import List +from unittest.mock import call, Mock, patch, MagicMock import ddt from freezegun import freeze_time @@ -22,7 +23,8 @@ from openassessment.data import ( CsvWriter, OraAggregateData, OraDownloadData, SubmissionFileUpload, OraSubmissionAnswerFactory, VersionNotFoundException, ZippedListSubmissionAnswer, OraSubmissionAnswer, ZIPPED_LIST_SUBMISSION_VERSIONS, - TextOnlySubmissionAnswer, FileMissingException, map_anonymized_ids_to_usernames + TextOnlySubmissionAnswer, FileMissingException, map_anonymized_ids_to_usernames, map_anonymized_ids_to_user_data, + generate_assessment_to_data, generate_assessment_from_data, generate_assessment_data, parts_summary, ) from openassessment.test_utils import TransactionCacheResetTest from openassessment.tests.factories import * # pylint: disable=wildcard-import @@ -35,22 +37,42 @@ STUDENT_USERNAME = "Student Username" +STUDENT_EMAIL = "Student Email" + +STUDENT_FULL_NAME = "Student Full Name" + PRE_FILE_SIZE_STUDENT_ID = "Pre_FileSize_Student" PRE_FILE_SIZE_STUDENT_USERNAME = 'Pre_FileSize_Student_Username' +PRE_FILE_SIZE_STUDENT_EMAIL = 'Pre_FileSize_Student_Email' + +PRE_FILE_SIZE_STUDENT_FULL_NAME = 'Pre_FileSize_Student_Full_Name' + PRE_FILE_NAME_STUDENT_ID = "Pre_FileName_Student" PRE_FILE_NAME_STUDENT_USERNAME = 'Pre_FileName_Student_Username' +PRE_FILE_NAME_STUDENT_EMAIL = 'Pre_FileName_Student_Email' + +PRE_FILE_NAME_STUDENT_FULL_NAME = 'Pre_FileName_Student_Full_Name' + SCORER_ID = "Scorer" SCORER_USERNAME = "Scorer Username" +SCORER_EMAIL = "Scorer Email" + +SCORER_FULL_NAME = "Scorer Full Name" + TEST_SCORER_ID = "Test Scorer" TEST_SCORER_USERNAME = "Test Scorer Username" +TEST_SCORER_EMAIL = "Test Scorer Email" + +TEST_SCORER_FULL_NAME = "Test Scorer Full Name" + USERNAME_MAPPING = { STUDENT_ID: STUDENT_USERNAME, SCORER_ID: SCORER_USERNAME, @@ -59,6 +81,22 @@ PRE_FILE_NAME_STUDENT_ID: PRE_FILE_NAME_STUDENT_USERNAME, } +USER_DATA_MAPPING = { + STUDENT_ID: {"username": STUDENT_USERNAME, "email": STUDENT_EMAIL, "fullname": STUDENT_FULL_NAME}, + SCORER_ID: {"username": SCORER_USERNAME, "email": SCORER_EMAIL, "fullname": SCORER_FULL_NAME}, + TEST_SCORER_ID: {"username": TEST_SCORER_USERNAME, "email": TEST_SCORER_EMAIL, "fullname": TEST_SCORER_FULL_NAME}, + PRE_FILE_SIZE_STUDENT_ID: { + "username": PRE_FILE_SIZE_STUDENT_USERNAME, + "email": PRE_FILE_SIZE_STUDENT_EMAIL, + "fullname": PRE_FILE_SIZE_STUDENT_FULL_NAME, + }, + PRE_FILE_NAME_STUDENT_ID: { + "username": PRE_FILE_NAME_STUDENT_USERNAME, + "email": PRE_FILE_NAME_STUDENT_EMAIL, + "fullname": PRE_FILE_NAME_STUDENT_FULL_NAME, + }, +} + ITEM_ID = "item" ITEM_DISPLAY_NAME = "Open Response Assessment" @@ -383,6 +421,55 @@ def test_map_anonymized_ids_to_usernames(self): self.assertEqual(mapping, USERNAME_MAPPING) + def test_map_anonymized_ids_to_user_data(self): + with patch('openassessment.data.get_user_model') as get_user_model_mock: + get_user_model_mock.return_value.objects.filter.return_value \ + .select_related.return_value.annotate.return_value.values.return_value = [ + { + 'anonymous_id': STUDENT_ID, + 'username': STUDENT_USERNAME, + 'email': STUDENT_EMAIL, + 'profile__name': STUDENT_FULL_NAME, + }, + { + 'anonymous_id': PRE_FILE_SIZE_STUDENT_ID, + 'username': PRE_FILE_SIZE_STUDENT_USERNAME, + 'email': PRE_FILE_SIZE_STUDENT_EMAIL, + 'profile__name': PRE_FILE_SIZE_STUDENT_FULL_NAME, + }, + { + 'anonymous_id': PRE_FILE_NAME_STUDENT_ID, + 'username': PRE_FILE_NAME_STUDENT_USERNAME, + 'email': PRE_FILE_NAME_STUDENT_EMAIL, + 'profile__name': PRE_FILE_NAME_STUDENT_FULL_NAME, + }, + { + 'anonymous_id': SCORER_ID, + 'username': SCORER_USERNAME, + 'email': SCORER_EMAIL, + 'profile__name': SCORER_FULL_NAME, + }, + { + 'anonymous_id': TEST_SCORER_ID, + 'username': TEST_SCORER_USERNAME, + 'email': TEST_SCORER_EMAIL, + 'profile__name': TEST_SCORER_FULL_NAME, + }, + ] + + # pylint: disable=protected-access + mapping = map_anonymized_ids_to_user_data( + [ + STUDENT_ID, + PRE_FILE_SIZE_STUDENT_ID, + PRE_FILE_NAME_STUDENT_ID, + SCORER_ID, + TEST_SCORER_ID, + ] + ) + + self.assertEqual(mapping, USER_DATA_MAPPING) + def test_map_students_and_scorers_ids_to_usernames(self): test_submission_information = [ ( @@ -1880,3 +1967,271 @@ def test_get_file_uploads_misaligned_fields(self): self.assertEqual(file_upload.name, submission_test_file_names[i]) self.assertEqual(file_upload.description, submission_test_file_descriptions[i]) self.assertEqual(file_upload.size, submission_test_file_sizes[i]) + + +class ListAssessmentsTest(TestCase): + """ + Unit tests for functions related to `list_assessments_from` and `list_assessments_to` handlers + """ + + patch_submission_api = patch("openassessment.data.sub_api.get_submission_and_student") + patch_submissions = patch("openassessment.data.Submission.objects.filter") + patch_assessments = patch("openassessment.data.Assessment.objects.filter") + patch_users = patch("openassessment.data.get_user_model") + + def setUp(self) -> None: + self.submission_uuid = "test_submission_uuid" + self.item_id = "test_item_id" + self.student_id = "test_student_id" + + def mock_get_submission_and_student(self) -> dict: + """Mock the return value of `sub_api.get_submission_and_student`""" + return {"student_item": {"student_id": self.student_id}} + + @staticmethod + def mock_submissions() -> List: + """Mock the return value of `Submission.objects.filter`""" + return [{"uuid": "uuid1"}, {"uuid": "uuid2"}] + + @staticmethod + def mock_parts() -> List[Mock]: + """Mock the return value of `parts.all`""" + part1 = Mock() + part1.criterion.name = "Criterion 1" + part1.points_earned = 10 + part1.option.name = "Good" + + part2 = Mock() + part2.criterion.name = "Criterion 2" + part2.points_earned = 8 + part2.option.name = "Excellent" + return [part1, part2] + + def mock_assessments(self) -> List[Mock]: + """Mock the return value of `Assessment.objects.filter`""" + first_mock = Mock( + pk="assessment_id1", + scorer_id="anonymous_id1", + scored_at="2022-01-01", + score_type="PE", + feedback="Good job!", + parts=Mock(all=Mock(return_value=self.mock_parts())), + ) + second_mock = Mock( + pk="assessment_id2", + scorer_id="anonymous_id2", + scored_at="2022-01-02", + score_type="SE", + feedback="", + parts=Mock(all=Mock(return_value=self.mock_parts())), + ) + return [first_mock, second_mock] + + @staticmethod + def mock_users() -> List[dict]: + """Mock the return value of `get_user_model().objects.filter().select_related().annotate().values`""" + return [ + { + "username": "username1", + "email": "email1", + "profile__name": "profile_name1", + "anonymous_id": "anonymous_id1" + }, + { + "username": "username2", + "email": "email2", + "profile__name": "profile_name2", + "anonymous_id": "anonymous_id2" + }, + ] + + @patch_users + @patch_assessments + @patch_submissions + @patch_submission_api + def test_generate_assessment_to_data( + self, + mock_submissions_api: Mock, + mock_submissions: Mock, + mock_assessments: Mock, + mock_users: Mock, + ): + """Test that `generate_assessment_to_data` returns the expected data""" + mock_submissions_api.return_value = self.mock_get_submission_and_student() + mock_submissions.return_value.values.return_value = self.mock_submissions() + assessments = MagicMock() + assessments.prefetch_related().prefetch_related.return_value = self.mock_assessments() + assessments.__iter__.return_value = self.mock_assessments() + mock_assessments.return_value = assessments + mock_users.return_value.objects.filter().select_related().annotate().values.return_value = ( + self.mock_users() + ) + + results = generate_assessment_to_data(self.item_id, self.submission_uuid) + + mock_submissions_api.assert_called_once_with(self.submission_uuid) + mock_submissions.assert_called_once() + mock_assessments.assert_called_once() + mock_users.assert_called_once() + self.assertIsInstance(results, list) + self.assertEqual(len(results), 2) + for result in results: + self.assertIsInstance(result, dict) + self.assertEqual( + set(result.keys()), + { + "assessment_id", + "scorer_name", + "scorer_username", + "scorer_email", + "assessment_date", + "assessment_scores", + "problem_step", + "feedback", + } + ) + + @patch_submission_api + def test_generate_assessment_to_data_no_scorer_submission( + self, mock_submission_api: Mock + ): + """ + Test that `generate_assessment_to_data` returns an empty list when + there is no scorer submission + """ + mock_submission_api.return_value = None + + result = generate_assessment_to_data(self.item_id, self.submission_uuid) + + mock_submission_api.assert_called_once_with(self.submission_uuid) + self.assertEqual(result, []) + + @patch_submissions + @patch_submission_api + def test_generate_assessment_to_data_no_submissions( + self, mock_submission_api: Mock, mock_submissions: Mock + ): + """ + Test that `generate_assessment_to_data` returns an empty list when + there are no submissions + """ + mock_submission_api.return_value = self.mock_get_submission_and_student() + mock_submissions.return_value.values.return_value = None + + result = generate_assessment_to_data(self.item_id, self.submission_uuid) + + mock_submission_api.assert_called_once_with(self.submission_uuid) + mock_submissions.assert_called_once() + self.assertEqual(result, []) + + @patch_users + @patch_assessments + def test_generate_assessment_from_data( + self, mock_assessments: Mock, mock_users: Mock + ): + """Test that `generate_assessment_from_data` returns the expected data""" + assessments = MagicMock() + assessments.prefetch_related().prefetch_related.return_value = self.mock_assessments() + assessments.__iter__.return_value = self.mock_assessments() + mock_assessments.return_value = assessments + mock_users.return_value.objects.filter().select_related().annotate().values.return_value = ( + self.mock_users() + ) + + results = generate_assessment_from_data(self.submission_uuid) + + mock_assessments.assert_called_once() + mock_users.assert_called_once() + self.assertIsInstance(results, list) + self.assertEqual(len(results), 2) + for result in results: + self.assertIsInstance(result, dict) + self.assertEqual( + set(result.keys()), + { + "assessment_id", + "scorer_name", + "scorer_username", + "scorer_email", + "assessment_date", + "assessment_scores", + "problem_step", + "feedback", + } + ) + + @patch_users + @patch_assessments + def test_generate_assessment_from_data_no_assessments( + self, mock_assessments: Mock, mock_users: Mock + ): + """ + Test that `generate_assessment_from_data` returns an empty list when + there are no assessments + """ + assessments = MagicMock() + assessments.prefetch_related().prefetch_related.return_value = {} + assessments.__iter__.return_value = {} + mock_assessments.return_value = assessments + mock_users.return_value.objects.filter().select_related().annotate().values.return_value = {} + + result = generate_assessment_from_data(self.submission_uuid) + + self.assertEqual(result, []) + + @patch_users + def test_generate_assessment_data(self, mock_users: Mock): + """Test that `generate_assessment_data` returns the expected data""" + mock_users.return_value.objects.filter().select_related().annotate().values.return_value = ( + self.mock_users() + ) + assessments = MagicMock() + assessments.prefetch_related().prefetch_related.return_value = self.mock_assessments() + assessments.__iter__.return_value = self.mock_assessments() + + results = generate_assessment_data(assessments) + self.assertIsInstance(results, list) + for result in results: + self.assertIsInstance(result, dict) + self.assertEqual( + set(result.keys()), + { + "assessment_id", + "scorer_name", + "scorer_username", + "scorer_email", + "assessment_date", + "assessment_scores", + "problem_step", + "feedback", + } + ) + + def test_parts_summary_with_multiple_parts(self): + """Test that `parts_summary` returns the expected data""" + assessment = self.mock_assessments()[0] + expected_result = [ + { + "criterion_name": "Criterion 1", + "score_earned": 10, + "score_type": "Good", + }, + { + "criterion_name": "Criterion 2", + "score_earned": 8, + "score_type": "Excellent", + }, + ] + + result = parts_summary(assessment) + + self.assertEqual(result, expected_result) + + def test_parts_summary_empty(self): + """Test that `parts_summary` returns an empty list when there are no parts""" + assessment = Mock() + assessment.parts.all.return_value = [] + + result = parts_summary(assessment) + + self.assertEqual(result, []) diff --git a/package.json b/package.json index bf9fd5f930..42dff5557b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "edx-ora2", - "version": "6.0.33", + "version": "6.1.0", "repository": "https://github.com/openedx/edx-ora2.git", "dependencies": { "@edx/frontend-build": "8.0.6",