forked from edx/edx-ora
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_util.py
124 lines (105 loc) · 3.67 KB
/
test_util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
from django.contrib.auth.models import User, Group, Permission
from django.test.client import Client
from django.conf import settings
from controller.models import Submission, Grader, SubmissionState , GraderStatus
from django.utils import timezone
from controller.models import Submission,Grader
from peer_grading.models import CalibrationHistory,CalibrationRecord
import random
import json
from ml_grading import ml_model_creation
MAX_SCORE = 3
RUBRIC_XML = """
<rubric>
<category>
<description>One</description>
<option>0</option>
<option>1</option>
</category>
<category>
<description>Two</description>
<option>0</option>
<option>1</option>
</category>
</rubric>
"""
def create_user():
if(User.objects.filter(username='test').count() == 0):
user = User.objects.create_user('test', '[email protected]', 'CambridgeMA')
user.is_staff = True
user.is_superuser = True
submitters, created = Group.objects.get_or_create(name=settings.SUBMITTERS_GROUP)
view_submission = Permission.objects.get(codename=settings.EDIT_SUBMISSIONS_PERMISSION)
submitters.permissions.add(view_submission)
user.groups.add(submitters)
user.save()
def delete_all():
for sub in Submission.objects.all():
sub.delete()
for grade in Grader.objects.all():
grade.delete()
for cal_hist in CalibrationHistory.objects.all():
cal_hist.delete()
for cal_record in CalibrationRecord.objects.all():
cal_record.delete()
def get_sub(grader_type,student_id,location, preferred_grader_type="ML", course_id="course_id", rubric=RUBRIC_XML, student_response = "This is a response that will hopefully pass basic sanity checks."):
prefix = "ml"
if preferred_grader_type=="PE":
prefix = "peer"
test_sub = Submission(
prompt="prompt",
student_id=student_id,
problem_id="id",
state=SubmissionState.waiting_to_be_graded,
student_response= student_response,
student_submission_time=timezone.now(),
xqueue_submission_id="id",
xqueue_submission_key="key",
xqueue_queue_name="MITx-6.002x",
location=location,
course_id=course_id,
max_score=MAX_SCORE,
next_grader_type=grader_type,
previous_grader_type=grader_type,
grader_settings= prefix + "_grading.conf",
preferred_grader_type=preferred_grader_type,
rubric = rubric,
)
return test_sub
def get_grader(grader_type, status_code=GraderStatus.success, score = None):
if score is None:
score = random.randint(0, MAX_SCORE)
test_grader=Grader(
score= score,
feedback="",
status_code=status_code,
grader_id="1",
grader_type=grader_type,
confidence=1,
is_calibration=False,
)
return test_grader
def get_student_info(student_id):
student_info = {
'submission_time': timezone.now().strftime("%Y%m%d%H%M%S"),
'anonymous_student_id': student_id
}
return json.dumps(student_info)
def get_xqueue_header():
xqueue_header = {
'submission_id': 1,
'submission_key': 1,
'queue_name': "MITx-6.002x",
}
return json.dumps(xqueue_header)
def create_ml_model(student_id, location):
#Create enough instructor graded submissions that ML will work
for i in xrange(0,settings.MIN_TO_USE_ML):
sub=get_sub("IN",student_id,location, "ML")
sub.state=SubmissionState.finished
sub.save()
grade=get_grader("IN")
grade.submission=sub
grade.save()
# Create ML Model
ml_model_creation.handle_single_location(location)