Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Get into Autograding Format #117

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,11 @@ RUN pip install -r /requirements.txt

RUN apk update && apk upgrade\
&& apk --no-cache add curl bash\
&& apk cache clean
&& apk cache clean\
&& apt-get install jq -y \

COPY . /opt/test-runner

WORKDIR /opt/test-runner

ENTRYPOINT ["sh", "/opt/test-runner/bin/run.sh" ]
ENTRYPOINT ["sh", "/opt/test-runner/bin/run.sh" ]
10 changes: 10 additions & 0 deletions action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
name: "GitHub Classroom Python Grader"
author: "GitHub"
description: "A plugin for GitHub Classroom's Autograder using pytest to ensure student executables output the correct values on tests."
outputs:
result:
description: "Runner output"
runs:
using: docker
image: Dockerfile
entrypoint: "/opt/test-runner/bin/run.sh"
13 changes: 1 addition & 12 deletions bin/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,6 @@
import runner.utils


def _slug(arg):
try:
return runner.utils.slug(arg)
except ValueError as err:
raise ArgumentTypeError(str(err))


def _directory(arg):
try:
return runner.utils.directory(arg)
Expand All @@ -29,10 +22,6 @@ def main():
"""
parser = ArgumentParser(description="Run the tests of a Python exercise.")

parser.add_argument(
"slug", metavar="SLUG", type=_slug, help="name of the exercise to process",
)

parser.add_argument(
"input",
metavar="IN",
Expand All @@ -50,7 +39,7 @@ def main():
parser.add_argument("pytest_args", nargs=REMAINDER)

args = parser.parse_args()
runner.run(args.slug, args.input, args.output, args.pytest_args)
runner.run(args.input, args.output, args.pytest_args)


if __name__ == "__main__":
Expand Down
11 changes: 8 additions & 3 deletions bin/run.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
#! /usr/bin/env sh
#! /bin/sh

root="$( dirname "$( cd "$( dirname "$0" )" >/dev/null 2>&1 && pwd )" )"
root="/opt/test-runner"
export PYTHONPATH="$root:$PYTHONPATH"
/usr/bin/env python3 bin/run.py "$@"

mkdir autograding_output

python3 /opt/test-runner/bin/run.py ./ ./autograding_output/

echo "result=$(jq -c . autograding_output/results.json | jq -sRr @base64)" >> "$GITHUB_OUTPUT"
80 changes: 24 additions & 56 deletions runner/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,10 @@

import pytest

from .data import Slug, Directory, Hierarchy, Results, Test
from .data import Directory, Hierarchy, Results, Test
from .sort import TestOrder



class ResultsReporter:
def __init__(self):
self.results = Results()
Expand All @@ -38,6 +37,7 @@ def pytest_collection_modifyitems(self, session, config, items):
for mark in item.iter_markers(name='task'):
self.tests[name] = Test(name=name, task_id=mark.kwargs['taskno'])


def _sort_by_lineno(item):
test_id = Hierarchy(item.nodeid)
source = Path(item.fspath)
Expand All @@ -50,60 +50,32 @@ def pytest_runtest_logreport(self, report):
Process a test setup / call / teardown report.
"""

name = ".".join(report.nodeid.split("::")[1:])
if report.head_line:
name = report.head_line.split(" (")[0]


#Add variation name to test output.
name = report.head_line if report.head_line else ".".join(report.nodeid.split("::")[1:])
if name not in self.tests:
self.tests[name] = Test(name)
# Extract filename and line number
filename = report.location[0]
line_no = report.location[1]
# Initialize Test with filename and line number
self.tests[name] = Test(name, filename=filename, line_no=line_no)

state = self.tests[name]

# Store duration
state.duration = report.duration

# ignore successful setup and teardown stages
if report.passed and report.when != "call":
return

#Update tests that have already failed with capstdout and return.
# Update tests that have already failed with capstdout and return.
if not state.is_passing():

#Check if a report is a concept exercise subtest parent.
if report.capstdout:

#split up the captured stdout by subtest result.
captures = [item for item in report.capstdout.split('\nu')]
if captures[0].startswith('u'):
captures[0] = captures[0][1:]

parsed_captures = []

# Insert spacers for subtests and stdout entries in correct position.
for item in captures:
empties = len(item) - len(item.lstrip('u'))
if empties > 0:
for number in range(1, empties+1):
parsed_captures.append(' ')
parsed_captures.append(item.lstrip('u'))
else: parsed_captures.append(item)

# Generate variation numbers for each subtest output section.
variants = (f'[variation #{number}]: {item}' for
item, number in zip(parsed_captures, range(1, len(parsed_captures)+1)))

# Go through the variations and match them to self.tests.
# Insert matched variation output into test output field.
for item in variants:
for name in self.tests:
if item.split(":")[0] in name and report.nodeid.split("::")[2] in name:
self.tests[name].output = item.split("]: ")[1]
else:
state.output = report.capstdout
if report.capstdout.rstrip('FFFFFFFF ').rstrip('uuuuu'):
state.output = report.capstdout.rstrip('FFFFFFFF ').rstrip('uuuuu')
return

else:
if report.capstdout:
state.output = report.capstdout
# Record captured relevant stdout content for passed tests.
if report.capstdout:
state.output = report.capstdout

# Handle details of test failure
if report.failed:
Expand Down Expand Up @@ -140,6 +112,7 @@ def pytest_runtest_logreport(self, report):
)
self.tests[parent_test_name].test_code = state.test_code


def pytest_sessionfinish(self, session, exitstatus):
"""
Processes the results into a report.
Expand Down Expand Up @@ -217,20 +190,16 @@ def _sanitize_args(args: List[str]) -> List[str]:
return clean


def run(slug: Slug, indir: Directory, outdir: Directory, args: List[str]) -> None:
def run(indir: Directory, outdir: Directory, args: List[str]) -> None:
"""
Run the tests for the given exercise and produce a results.json.
"""
test_files = []
config_file = indir.joinpath(".meta").joinpath("config.json")

if config_file.is_file():
config_data = json.loads(config_file.read_text())
for filename in config_data.get('files', {}).get('test', []):
test_files.append(indir.joinpath(filename))

if not test_files:
test_files.append(indir.joinpath(slug.replace("-", "_") + "_test.py"))
for root, dirs, files in os.walk(indir):
for file in files:
if file.endswith("_test.py"):
test_files.append(Path(root) / file)

out_file = outdir.joinpath("results.json")

Expand All @@ -240,9 +209,8 @@ def run(slug: Slug, indir: Directory, outdir: Directory, args: List[str]) -> Non

# dump the report
out_file.write_text(reporter.results.as_json())

# remove cache directories
for cache_dir in ['.pytest_cache', '__pycache__']:
dirpath = indir / cache_dir
if dirpath.is_dir() and dirpath.owner() == out_file.owner():
if dirpath.is_dir():
shutil.rmtree(dirpath)
10 changes: 7 additions & 3 deletions runner/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ class Test:
message: Message = None
test_code: str = ""
task_id: int = 0
filename: str = ""
line_no: int = 0
duration: float = 0.0

# for an explanation of why both of these are necessary see
# https://florimond.dev/blog/articles/2018/10/reconciling-dataclasses-and-properties-in-python/
Expand Down Expand Up @@ -79,7 +82,6 @@ def output(self, captured: Output) -> None:
return

captured = captured.strip()

truncate_msg = " [Output was truncated. Please limit to 500 chars]"
if len(captured) > 500:
captured = captured[: 500 - len(truncate_msg)] + truncate_msg
Expand Down Expand Up @@ -142,7 +144,9 @@ def error(self, message: Message = None) -> None:
def _factory(items):
result = {}
for key, value in items:
if key == "_output" or key in {"message", "output", "subtest"} and value in (None, "", " "):
if key == "_output" or key in {"message", "output", "subtest"} and value is None:
continue
elif key == "_output" or key in {"message", "output", "subtest"} and "\u001b[31mF\u001b[0m" in value:
continue

if isinstance(value, Status):
Expand All @@ -163,5 +167,5 @@ def as_json(self):
for item in results["tests"]:
item["name"] = sub(trim_name, '\\1 > ', item["name"]).replace('_', ' ')

results["tests"] = sorted(results["tests"], key= lambda item: item["task_id"])
results["tests"] = sorted(results["tests"], key=lambda item: item["task_id"])
return dumps(results, indent=2)
18 changes: 17 additions & 1 deletion test/example-with-config-multiple-files/results.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,60 +3,76 @@
"status": "pass",
"tests": [
{
"filename": "test/example-with-config-multiple-files/example_first_test.py",
"line_no": 7,
"name": "ExampleFirst > hello",
"status": "pass",
"test_code": "self.assertEqual(hello(), \"Hello, World!\")",
"task_id": 0,
"output": "User output is captured!"
},
{
"filename": "test/example-with-config-multiple-files/example_second_test.py",
"line_no": 7,
"name": "ExampleSecond > hello",
"status": "pass",
"test_code": "self.assertEqual(hello(), \"Hello, World!\")",
"task_id": 0,
"output": "User output is captured!"
},
{
"filename": "test/example-with-config-multiple-files/example_first_test.py",
"line_no": 10,
"name": "ExampleFirst > abc",
"status": "pass",
"test_code": "self.assertEqual(hello(), \"Hello, World!\")",
"task_id": 0,
"output": "User output is captured!"
},
{
"filename": "test/example-with-config-multiple-files/example_second_test.py",
"line_no": 10,
"name": "ExampleSecond > abc",
"status": "pass",
"test_code": "self.assertEqual(hello(), \"Hello, World!\")",
"task_id": 0,
"output": "User output is captured!"
},
{
"filename": "test/example-with-config-multiple-files/example_first_test.py",
"line_no": 15,
"name": "ExampleFirstOther > dummy",
"status": "pass",
"test_code": "self.assertEqual(hello(), \"Hello, World!\")",
"task_id": 0,
"output": "User output is captured!"
},
{
"filename": "test/example-with-config-multiple-files/example_second_test.py",
"line_no": 15,
"name": "ExampleSecondOther > dummy",
"status": "pass",
"test_code": "self.assertEqual(hello(), \"Hello, World!\")",
"task_id": 0,
"output": "User output is captured!"
},
{
"filename": "test/example-with-config-multiple-files/example_first_test.py",
"line_no": 18,
"name": "ExampleFirstOther > hello",
"status": "pass",
"test_code": "self.assertEqual(hello(), \"Hello, World!\")",
"task_id": 0,
"output": "User output is captured!"
},
{
"filename": "test/example-with-config-multiple-files/example_second_test.py",
"line_no": 18,
"name": "ExampleSecondOther > hello",
"status": "pass",
"test_code": "self.assertEqual(hello(), \"Hello, World!\")",
"task_id": 0,
"output": "User output is captured!"
}
]
}
}
10 changes: 9 additions & 1 deletion test/example-with-config/results.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,32 +3,40 @@
"status": "pass",
"tests": [
{
"filename": "test/example-with-config/example_with_config_test.py",
"line_no": 7,
"name": "ExampleWithConfig > hello",
"status": "pass",
"test_code": "self.assertEqual(hello(), \"Hello, World!\")",
"task_id": 0,
"output": "User output is captured!"
},
{
"filename": "test/example-with-config/example_with_config_test.py",
"line_no": 10,
"name": "ExampleWithConfig > abc",
"status": "pass",
"test_code": "self.assertEqual(hello(), \"Hello, World!\")",
"task_id": 0,
"output": "User output is captured!"
},
{
"filename": "test/example-with-config/example_with_config_test.py",
"line_no": 15,
"name": "ExampleWithConfigOther > dummy",
"status": "pass",
"test_code": "self.assertEqual(hello(), \"Hello, World!\")",
"task_id": 0,
"output": "User output is captured!"
},
{
"filename": "test/example-with-config/example_with_config_test.py",
"line_no": 18,
"name": "ExampleWithConfigOther > hello",
"status": "pass",
"test_code": "self.assertEqual(hello(), \"Hello, World!\")",
"task_id": 0,
"output": "User output is captured!"
}
]
}
}
Loading