Skip to content

Commit

Permalink
Return the existing comment alongside the grade in "read_result"
Browse files Browse the repository at this point in the history
  • Loading branch information
marcospri committed Aug 24, 2023
1 parent 9c09fe3 commit ecc48c0
Show file tree
Hide file tree
Showing 7 changed files with 60 additions and 28 deletions.
15 changes: 9 additions & 6 deletions lms/services/lti_grading/_v11.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

from lms.services.exceptions import ExternalRequestError, StudentNotInCourse
from lms.services.http import HTTPService
from lms.services.lti_grading.interface import LTIGradingService
from lms.services.lti_grading.interface import GradingResult, LTIGradingService
from lms.services.oauth1 import OAuth1Service


Expand All @@ -18,9 +18,10 @@ def __init__(
self.http_service = http_service
self.oauth1_service = oauth1_service

def read_result(self, grading_id):
def read_result(self, grading_id) -> GradingResult:
result = GradingResult(score=None, comment=None)
try:
result = self._send_request(
response = self._send_request(
{
"readResultRequest": {
"resultRecord": {"sourcedGUID": {"sourcedId": grading_id}}
Expand All @@ -34,11 +35,13 @@ def read_result(self, grading_id):
raise

try:
return float(
result["readResultResponse"]["result"]["resultScore"]["textString"]
result.score = float(
response["readResultResponse"]["result"]["resultScore"]["textString"]
)
except (TypeError, KeyError, ValueError):
return None
pass

return result

def record_result(self, grading_id, score=None, pre_record_hook=None, comment=None):
request = {"resultRecord": {"sourcedGUID": {"sourcedId": grading_id}}}
Expand Down
15 changes: 9 additions & 6 deletions lms/services/lti_grading/_v13.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from urllib.parse import urlparse

from lms.services.exceptions import ExternalRequestError, StudentNotInCourse
from lms.services.lti_grading.interface import LTIGradingService
from lms.services.lti_grading.interface import GradingResult, LTIGradingService
from lms.services.ltia_http import LTIAHTTPService

LOG = logging.getLogger(__name__)
Expand All @@ -28,7 +28,8 @@ def __init__(
super().__init__(line_item_url, line_item_container_url)
self._ltia_service = ltia_service

def read_result(self, grading_id):
def read_result(self, grading_id) -> GradingResult:
result = GradingResult(score=None, comment=None)
try:
response = self._ltia_service.request(
"GET",
Expand All @@ -39,17 +40,19 @@ def read_result(self, grading_id):
)
except ExternalRequestError as err:
if err.status_code == 404:
return None
return result
raise

results = response.json()
if not results:
return None
return result

try:
return results[-1]["resultScore"] / results[-1]["resultMaximum"]
result.score = results[-1]["resultScore"] / results[-1]["resultMaximum"]
except (TypeError, ZeroDivisionError, KeyError, IndexError):
return None
pass
result.comment = results[-1].get("comment")
return result

def get_score_maximum(self, resource_link_id) -> Optional[float]:
return self._read_grading_configuration(resource_link_id).get("scoreMaximum")
Expand Down
12 changes: 11 additions & 1 deletion lms/services/lti_grading/interface.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,13 @@
from dataclasses import dataclass
from typing import Optional


@dataclass
class GradingResult:
score: Optional[float]
comment: Optional[str]


class LTIGradingService: # pragma: no cover
"""
Service for sending grades back to the LMS.
Expand Down Expand Up @@ -47,7 +57,7 @@ def get_score_maximum(self, resource_link_id): # pylint:disable=unused-argument
"""
return None

def read_result(self, grading_id):
def read_result(self, grading_id) -> GradingResult:
"""
Return the last-submitted score for a given submission.
Expand Down
4 changes: 2 additions & 2 deletions lms/views/api/grading.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,11 +65,11 @@ def record_result(self):
def read_result(self):
"""Proxy request for current result (grade/score) to LTI Result API."""

current_score = self.lti_grading_service.read_result(
result = self.lti_grading_service.read_result(
self.parsed_params["lis_result_sourcedid"]
)

return {"currentScore": current_score}
return {"currentScore": result.score, "comment": result.comment}

@view_config(
route_name="lti_api.submissions.record", schema=APIRecordSpeedgraderSchema
Expand Down
9 changes: 5 additions & 4 deletions tests/unit/lms/services/lti_grading/_v11_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,29 +14,30 @@ class TestLTI11GradingService:
def test_read_result(self, svc, respond_with, http_service):
respond_with(GradingResponse(score=0.95))

score = svc.read_result(sentinel.grading_id)
result = svc.read_result(sentinel.grading_id)

assert self.sent_pox_body(http_service) == {
"readResultRequest": {
"resultRecord": {"sourcedGUID": {"sourcedId": "sentinel.grading_id"}}
}
}
assert score == 0.95
assert result.score == 0.95
assert result.comment is None

@pytest.mark.parametrize("score", [None, "", "not-a-float"])
def test_read_result_returns_none_if_score_not_a_float(
self, svc, respond_with, score
):
respond_with(GradingResponse(score=score))

assert svc.read_result(sentinel.grading_id) is None
assert svc.read_result(sentinel.grading_id).score is None

def test_read_result_returns_none_if_no_score(self, svc, respond_with):
response = GradingResponse()
response.result_response["result"]["resultScore"].pop("textString")
respond_with(response)

assert svc.read_result(sentinel.grading_id) is None
assert svc.read_result(sentinel.grading_id).score is None

def test_methods_raises_StudentNotInCourse(self, svc, respond_with):
response = GradingResponse(
Expand Down
21 changes: 15 additions & 6 deletions tests/unit/lms/services/lti_grading/_v13_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def test_read_lti_result(self, svc, response, ltia_http_service):
ltia_http_service.request.return_value.json.return_value = response
svc.line_item_url = "https://lms.com/lineitems?param=1"

score = svc.read_result(sentinel.user_id)
result = svc.read_result(sentinel.user_id)

ltia_http_service.request.assert_called_once_with(
"GET",
Expand All @@ -24,16 +24,19 @@ def test_read_lti_result(self, svc, response, ltia_http_service):
params={"user_id": sentinel.user_id},
headers={"Accept": "application/vnd.ims.lis.v2.resultcontainer+json"},
)
assert score == response[-1]["resultScore"] / response[-1]["resultMaximum"]
assert (
result.score == response[-1]["resultScore"] / response[-1]["resultMaximum"]
)

def test_read_lti_result_empty(self, svc, ltia_http_service):
ltia_http_service.request.side_effect = ExternalRequestError(
response=Mock(status_code=404)
)

score = svc.read_result(sentinel.user_id)
result = svc.read_result(sentinel.user_id)

assert not score
assert not result.score
assert not result.comment

def test_read_lti_result_raises(self, svc, ltia_http_service):
ltia_http_service.request.side_effect = ExternalRequestError(
Expand All @@ -46,7 +49,10 @@ def test_read_lti_result_raises(self, svc, ltia_http_service):
def test_read_empty_lti_result(self, svc, ltia_http_service):
ltia_http_service.request.return_value.json.return_value = []

assert not svc.read_result(sentinel.user_id)
result = svc.read_result(sentinel.user_id)

assert not result.score
assert not result.comment

@pytest.mark.parametrize(
"bad_response",
Expand All @@ -60,7 +66,10 @@ def test_read_empty_lti_result(self, svc, ltia_http_service):
def test_read_bad_response_lti_result(self, svc, ltia_http_service, bad_response):
ltia_http_service.request.return_value.json.return_value = bad_response

assert not svc.read_result(sentinel.user_id)
result = svc.read_result(sentinel.user_id)

assert not result.score
assert not result.comment

def test_get_score_maximum(self, svc, ltia_http_service):
ltia_http_service.request.return_value.json.return_value = [
Expand Down
12 changes: 9 additions & 3 deletions tests/unit/lms/views/api/grading_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import pytest
from h_matchers import Any

from lms.services.lti_grading.interface import GradingResult
from lms.views.api.grading import CanvasPreRecordHook, GradingViews

pytestmark = pytest.mark.usefixtures("lti_grading_service")
Expand Down Expand Up @@ -159,12 +160,17 @@ def test_it_proxies_to_read_result(self, pyramid_request, lti_grading_service):
"modelstudent-assignment1"
)

def test_it_returns_current_score(self, pyramid_request, lti_grading_service):
lti_grading_service.read_result.return_value = 0.5
def test_it_returns_current_result(self, pyramid_request, lti_grading_service):
lti_grading_service.read_result.return_value = GradingResult(
score=sentinel.score, comment=sentinel.comment
)

current_score = GradingViews(pyramid_request).read_result()

assert current_score == {"currentScore": 0.5}
assert current_score == {
"currentScore": sentinel.score,
"comment": sentinel.comment,
}

@pytest.fixture
def pyramid_request(self, pyramid_request):
Expand Down

0 comments on commit ecc48c0

Please sign in to comment.