Skip to content

Commit

Permalink
Added max score to output of get_student_grade_summary_data.
Browse files Browse the repository at this point in the history
This will put the actual score and actual max score in scope for
the the return_csv function, so actual scores can be dumped.

The ultimate goal is to provide this data in the CSV dump that is
passed to Stellar via pylmod.

This is PR openedx#10395 on edX, and issue 95 on mitocw's edx fork.

https://github.com/edx/edx-platform/pull/10395

#95
  • Loading branch information
ShawnMilo authored and amir-qayyum-khan committed Mar 1, 2016
1 parent 1d08bf3 commit 23d6e0b
Show file tree
Hide file tree
Showing 3 changed files with 227 additions and 7 deletions.
30 changes: 29 additions & 1 deletion AUTHORS
Original file line number Diff line number Diff line change
Expand Up @@ -225,4 +225,32 @@ Alessandro Verdura <[email protected]>
Sven Marnach <[email protected]>
Richard Moch <[email protected]>
Albert Liang <[email protected]>

Pan Luo <[email protected]>
Tyler Nickerson <[email protected]>

This comment has been minimized.

Copy link
@noisecapella

noisecapella Mar 10, 2016

Why are all of these authors here? They weren't in the original commit: edx/edx-platform@5d88300

This comment has been minimized.

Copy link
@bdero

bdero Mar 11, 2016

Most likely this was one of the two direct conflict resolution choices written to the file by git, since there have probably been a lot of authors added since Cypress.

Vedran Karačić <[email protected]>
William Ono <[email protected]>
Dongwook Yoon <[email protected]>
Awais Qureshi <[email protected]>
Eric Fischer <[email protected]>
Brian Beggs <[email protected]>
Bill DeRusha <[email protected]>
Kevin Falcone <[email protected]>
Mirjam Škarica <[email protected]>
Saleem Latif <[email protected]>
Julien Paillé <[email protected]>
Michael Frey <[email protected]>
Hasnain Naveed <[email protected]>
J. Cliff Dyer <[email protected]>
Jamie Folsom <[email protected]>
George Schneeloch <[email protected]>
Dustin Gadal <[email protected]>
Ibrahim Ahmed <[email protected]>
Robert Raposa <[email protected]>
Giovanni Di Milia <[email protected]>
Peter Wilkins <[email protected]>
Justin Abrahms <[email protected]>
Arbab Nazar <[email protected]>
Douglas Hall <[email protected]>
Awais Jibran <[email protected]>
Muhammad Rehan <[email protected]>
Shawn Milochik <[email protected]>
175 changes: 173 additions & 2 deletions lms/djangoapps/instructor/tests/test_legacy_raw_download_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,23 @@ def test_download_raw_grades_dump(self):
'''
self.assertEqual(body, expected_csv, msg)

def test_grade_summary_data(self):
def get_expected_grade_data(
self, get_grades=True, get_raw_scores=False,
use_offline=False, get_score_max=False):
"""
Test grade summary data report generation
Return expected results from the get_student_grade_summary_data call
with any options selected.
Note that the kwargs accepted by get_expected_grade_data (and their
default values) must be identical to those in
get_student_grade_summary_data for this function to be accurate.
If kwargs are added or removed, or the functionality triggered by
them changes, this function must be updated to match.
If get_score_max is True, instead of a single score between 0 and 1,
the actual score and total possible are returned. For example, if the
student got one out of two possible points, the values (1, 2) will be
returned instead of 0.5.
"""
self.answer_question()

Expand Down Expand Up @@ -104,6 +118,163 @@ def test_grade_summary_data(self):
]
}

# The first five columns contain the student ID, username,
# full name, and e-mail addresses.
non_grade_columns = 5
# If the following 'if' is triggered, the
# get_student_grade_summary_data function will not return any
# grade data. Only the "non_grade_columns."
# So strip out the headers beyond the "non_grade_columns," and
# strip out all the grades in the 'data' key.
if not get_grades or use_offline:
expected_data["header"] = expected_data["header"][:non_grade_columns]
# This iterates over the lists of grades in the 'data' key
# of the return dictionary and strips out everything after
# the non_grade_columns.
for index, rec in enumerate(expected_data["data"]):
expected_data["data"][index] = rec[:non_grade_columns]
# Wipe out all data in the 'assignments' key if use_offline
# is True; no assignment data is returned.
if use_offline:
expected_data['assignments'] = []
# If get_grades is False, get_student_grade_summary_data doesn't
# even return an 'assignments' key, so delete it.
if get_grades is False:
del expected_data['assignments']
# If get_raw_scores is true, get_student_grade_summary_data returns
# the raw score per assignment. For example, the "0.3333333333333333"
# in the data above is for getting one out of three possible
# answers correct. Getting raw scores means the actual score (1) is
# return instead of: 1.0/3.0
# For some reason, this also causes it to not to return any assignments
# without attempts, so most of the headers are removed.
elif get_raw_scores:
expected_data["data"] = [
[
1, u'u1', u'username', u'[email protected]',
'', None, None, None
],
[
2, u'u2', u'username', u'[email protected]',
'', 0.0, 1.0, 0.0
],
]
expected_data["assignments"] = [u'p3', u'p2', u'p1']
expected_data["header"] = [
u'ID', u'Username', u'Full Name', u'edX email',
u'External email', u'p3', u'p2', u'p1'
]
# Strip out the single-value float scores and replace them
# with two-tuples of actual and possible scores (see docstring).
if get_score_max:
expected_data["data"][-1][-3:] = (0.0, 1), (1.0, 1.0), (0.0, 1)

return expected_data

def test_grade_summary_data_defaults(self):
"""
Test grade summary data report generation with all default kwargs.
This test compares the output of the get_student_grade_summary_data
with a dictionary of exected values. The purpose of this test is
to ensure that future changes to the get_student_grade_summary_data
function (for example, mitocw/edx-platform #95).
"""
request = DummyRequest()
self.answer_question()
data = get_student_grade_summary_data(request, self.course)
expected_data = self.get_expected_grade_data()
self.compare_data(data, expected_data)

def test_grade_summary_data_raw_scores(self):
"""
Test grade summary data report generation with get_raw_scores True.
"""
request = DummyRequest()
self.answer_question()
data = get_student_grade_summary_data(
request, self.course, get_raw_scores=True,
)
expected_data = self.get_expected_grade_data(get_raw_scores=True)
self.compare_data(data, expected_data)

def test_grade_summary_data_no_grades(self):
"""
Test grade summary data report generation with
get_grades set to False.
"""
request = DummyRequest()
self.answer_question()

data = get_student_grade_summary_data(
request, self.course, get_grades=False
)
expected_data = self.get_expected_grade_data(get_grades=False)
# if get_grades is False, get_expected_grade_data does not
# add an "assignments" key.
self.assertNotIn("assignments", expected_data)
self.compare_data(data, expected_data)

def test_grade_summary_data_use_offline(self):
"""
Test grade summary data report generation with use_offline True.
"""
request = DummyRequest()
self.answer_question()
data = get_student_grade_summary_data(
request, self.course, use_offline=True)
expected_data = self.get_expected_grade_data(use_offline=True)
self.compare_data(data, expected_data)

def test_grade_summary_data_use_offline_and_raw_scores(self):
"""
Test grade summary data report generation with use_offline
and get_raw_scores both True.
"""
request = DummyRequest()
self.answer_question()
data = get_student_grade_summary_data(
request, self.course, use_offline=True, get_raw_scores=True
)
expected_data = self.get_expected_grade_data(
use_offline=True, get_raw_scores=True
)
self.compare_data(data, expected_data)

def test_grade_summary_data_get_score_max(self):
"""
Test grade summary data report generation with get_score_max set
to True (also requires get_raw_scores to be True).
"""
request = DummyRequest()
self.answer_question()
data = get_student_grade_summary_data(
request, self.course, use_offline=True, get_raw_scores=True,
get_score_max=True,
)
expected_data = self.get_expected_grade_data(
use_offline=True, get_raw_scores=True, get_score_max=True,
)
self.compare_data(data, expected_data)

def compare_data(self, data, expected_data):
"""
Compare the output of the get_student_grade_summary_data
function to the expected_data data.
"""

# Currently, all kwargs to get_student_grade_summary_data
# return a dictionary with the same keys, except for
# get_grades=False, which results in no 'assignments' key.
# This is explicitly checked for above in
# test_grade_summary_data_no_grades. This is a backup which
# will catch future changes.
self.assertListEqual(
expected_data.keys(),
data.keys(),
)

# Ensure the student info and assignment names are as expected.
for key in ['assignments', 'header']:
self.assertListEqual(expected_data[key], data[key])

Expand Down
29 changes: 25 additions & 4 deletions lms/djangoapps/instructor/views/legacy.py
Original file line number Diff line number Diff line change
Expand Up @@ -674,17 +674,21 @@ def __init__(self):
self.grades = {}
self._current_row = {}

def _add_grade_to_row(self, component, score):
def _add_grade_to_row(self, component, score, possible=None):
"""Creates component if needed, and assigns score
Args:
component (str): Course component being graded
score (float): Score of student on component
possible (float): Max possible score for the component
Returns:
None
"""
component_index = self.components.setdefault(component, len(self.components))
if possible is not None:
# send a tuple instead of a single value
score = (score, possible)
self._current_row[component_index] = score

@contextmanager
Expand Down Expand Up @@ -724,7 +728,10 @@ def get_graded_components(self):
return self.components.keys()


def get_student_grade_summary_data(request, course, get_grades=True, get_raw_scores=False, use_offline=False):
def get_student_grade_summary_data(
request, course, get_grades=True, get_raw_scores=False,
use_offline=False, get_score_max=False
):
"""
Return data arrays with student identity and grades for specified course.
Expand All @@ -740,6 +747,11 @@ def get_student_grade_summary_data(request, course, get_grades=True, get_raw_sco
data = list (one per student) of lists of data corresponding to the fields
If get_raw_scores=True, then instead of grade summaries, the raw grades for all graded modules are returned.
If get_score_max is True, two values will be returned for each grade -- the
total number of points earned and the total number of points possible. For
example, if two points are possible and one is earned, (1, 2) will be
returned instead of 0.5 (the default).
"""
course_key = course.id
enrolled_students = User.objects.filter(
Expand All @@ -766,9 +778,18 @@ def get_student_grade_summary_data(request, course, get_grades=True, get_raw_sco
log.debug(u'student=%s, gradeset=%s', student, gradeset)
with gtab.add_row(student.id) as add_grade:
if get_raw_scores:
# TODO (ichuang) encode Score as dict instead of as list, so score[0] -> score['earned']
# The following code calls add_grade, which is an alias
# for the add_row method on the GradeTable class. This adds
# a grade for each assignment. Depending on whether
# get_score_max is True, it will return either a single
# value as a float between 0 and 1, or a two-tuple
# containing the earned score and possible score for
# the assignment (see docstring).
for score in gradeset['raw_scores']:
add_grade(score.section, getattr(score, 'earned', score[0]))
if get_score_max is True:
add_grade(score.section, score.earned, score.possible)
else:
add_grade(score.section, score.earned)
else:
category_cnts = Counter()
progress_summary = grades._progress_summary(student, request, course)
Expand Down

0 comments on commit 23d6e0b

Please sign in to comment.