diff --git a/AUTHORS b/AUTHORS index 20372698165b..444e7da37378 100644 --- a/AUTHORS +++ b/AUTHORS @@ -225,4 +225,32 @@ Alessandro Verdura Sven Marnach Richard Moch Albert Liang - +Pan Luo +Tyler Nickerson +Vedran Karačić +William Ono +Dongwook Yoon +Awais Qureshi +Eric Fischer +Brian Beggs +Bill DeRusha +Kevin Falcone +Mirjam Škarica +Saleem Latif +Julien Paillé +Michael Frey +Hasnain Naveed +J. Cliff Dyer +Jamie Folsom +George Schneeloch +Dustin Gadal +Ibrahim Ahmed +Robert Raposa +Giovanni Di Milia +Peter Wilkins +Justin Abrahms +Arbab Nazar +Douglas Hall +Awais Jibran +Muhammad Rehan +Shawn Milochik diff --git a/common/lib/xmodule/xmodule/course_module.py b/common/lib/xmodule/xmodule/course_module.py index 5a5066c34433..ecf7f52de7b4 100644 --- a/common/lib/xmodule/xmodule/course_module.py +++ b/common/lib/xmodule/xmodule/course_module.py @@ -20,6 +20,7 @@ from xmodule.mixin import LicenseMixin import json +from xblock.core import XBlock from xblock.fields import Scope, List, String, Dict, Boolean, Integer, Float from .fields import Date from django.utils.timezone import UTC @@ -1315,11 +1316,15 @@ def grading_context(self): except UndefinedContext: module = self + def possibly_scored(usage_key): + """Can this XBlock type can have a score or children?""" + return usage_key.block_type in self.block_types_affecting_grading + all_descriptors = [] graded_sections = {} def yield_descriptor_descendents(module_descriptor): - for child in module_descriptor.get_children(): + for child in module_descriptor.get_children(usage_key_filter=possibly_scored): yield child for module_descriptor in yield_descriptor_descendents(child): yield module_descriptor @@ -1345,6 +1350,15 @@ def yield_descriptor_descendents(module_descriptor): return {'graded_sections': graded_sections, 'all_descriptors': all_descriptors, } + @lazy + def block_types_affecting_grading(self): + """Return all block types that could impact grading (i.e. scored, or having children).""" + return frozenset( + cat for (cat, xblock_class) in XBlock.load_classes() if ( + getattr(xblock_class, 'has_score', False) or getattr(xblock_class, 'has_children', False) + ) + ) + @staticmethod def make_id(org, course, url_name): return '/'.join([org, course, url_name]) diff --git a/common/lib/xmodule/xmodule/graders.py b/common/lib/xmodule/xmodule/graders.py index 8d73738b2802..f0ea5d632ca5 100644 --- a/common/lib/xmodule/xmodule/graders.py +++ b/common/lib/xmodule/xmodule/graders.py @@ -13,7 +13,7 @@ Score = namedtuple("Score", "earned possible graded section module_id") -def aggregate_scores(scores, section_name="summary"): +def aggregate_scores(scores, section_name="summary", section_location=None): """ scores: A list of Score objects returns: A tuple (all_total, graded_total). @@ -32,7 +32,7 @@ def aggregate_scores(scores, section_name="summary"): total_possible, False, section_name, - None + section_location ) #selecting only graded things graded_total = Score( @@ -40,7 +40,7 @@ def aggregate_scores(scores, section_name="summary"): total_possible_graded, True, section_name, - None + section_location ) return all_total, graded_total diff --git a/lms/djangoapps/ccx/tests/test_field_override_performance.py b/lms/djangoapps/ccx/tests/test_field_override_performance.py index afb013e3c16a..e012523c2858 100644 --- a/lms/djangoapps/ccx/tests/test_field_override_performance.py +++ b/lms/djangoapps/ccx/tests/test_field_override_performance.py @@ -29,7 +29,11 @@ @attr('shard_1') @mock.patch.dict( - 'django.conf.settings.FEATURES', {'ENABLE_XBLOCK_VIEW_ENDPOINT': True} + 'django.conf.settings.FEATURES', + { + 'ENABLE_XBLOCK_VIEW_ENDPOINT': True, + 'ENABLE_MAX_SCORE_CACHE': False, + } ) @ddt.ddt class FieldOverridePerformanceTestCase(ProceduralCourseTestMixin, @@ -173,18 +177,18 @@ class TestFieldOverrideMongoPerformance(FieldOverridePerformanceTestCase): TEST_DATA = { # (providers, course_width, enable_ccx): # of sql queries, # of mongo queries, # of xblocks - ('no_overrides', 1, True): (27, 7, 14), - ('no_overrides', 2, True): (135, 7, 85), - ('no_overrides', 3, True): (595, 7, 336), - ('ccx', 1, True): (27, 7, 14), - ('ccx', 2, True): (135, 7, 85), - ('ccx', 3, True): (595, 7, 336), - ('no_overrides', 1, False): (27, 7, 14), - ('no_overrides', 2, False): (135, 7, 85), - ('no_overrides', 3, False): (595, 7, 336), - ('ccx', 1, False): (27, 7, 14), - ('ccx', 2, False): (135, 7, 85), - ('ccx', 3, False): (595, 7, 336), + ('no_overrides', 1, True): (23, 7, 14), + ('no_overrides', 2, True): (68, 7, 85), + ('no_overrides', 3, True): (263, 7, 336), + ('ccx', 1, True): (23, 7, 14), + ('ccx', 2, True): (68, 7, 85), + ('ccx', 3, True): (263, 7, 336), + ('no_overrides', 1, False): (23, 7, 14), + ('no_overrides', 2, False): (68, 7, 85), + ('no_overrides', 3, False): (263, 7, 336), + ('ccx', 1, False): (23, 7, 14), + ('ccx', 2, False): (68, 7, 85), + ('ccx', 3, False): (263, 7, 336), } @@ -196,16 +200,16 @@ class TestFieldOverrideSplitPerformance(FieldOverridePerformanceTestCase): __test__ = True TEST_DATA = { - ('no_overrides', 1, True): (27, 4, 9), - ('no_overrides', 2, True): (135, 19, 54), - ('no_overrides', 3, True): (595, 84, 215), - ('ccx', 1, True): (27, 4, 9), - ('ccx', 2, True): (135, 19, 54), - ('ccx', 3, True): (595, 84, 215), - ('no_overrides', 1, False): (27, 4, 9), - ('no_overrides', 2, False): (135, 19, 54), - ('no_overrides', 3, False): (595, 84, 215), - ('ccx', 1, False): (27, 4, 9), - ('ccx', 2, False): (135, 19, 54), - ('ccx', 3, False): (595, 84, 215), + ('no_overrides', 1, True): (23, 4, 9), + ('no_overrides', 2, True): (68, 19, 54), + ('no_overrides', 3, True): (263, 84, 215), + ('ccx', 1, True): (23, 4, 9), + ('ccx', 2, True): (68, 19, 54), + ('ccx', 3, True): (263, 84, 215), + ('no_overrides', 1, False): (23, 4, 9), + ('no_overrides', 2, False): (68, 19, 54), + ('no_overrides', 3, False): (263, 84, 215), + ('ccx', 1, False): (23, 4, 9), + ('ccx', 2, False): (68, 19, 54), + ('ccx', 3, False): (263, 84, 215), } diff --git a/lms/djangoapps/courseware/grades.py b/lms/djangoapps/courseware/grades.py index 1e82ecd118db..e1bd31af2147 100644 --- a/lms/djangoapps/courseware/grades.py +++ b/lms/djangoapps/courseware/grades.py @@ -1,6 +1,7 @@ # Compute grades using real division, with no integer truncation from __future__ import division from collections import defaultdict +from functools import partial import json import random import logging @@ -9,11 +10,12 @@ from django.conf import settings from django.db import transaction from django.test.client import RequestFactory +from django.core.cache import cache import dogstats_wrapper as dog_stats_api from courseware import courses -from courseware.model_data import FieldDataCache +from courseware.model_data import FieldDataCache, ScoresClient from student.models import anonymous_id_for_user from util.module_utils import yield_dynamic_descriptor_descendants from xmodule import graders @@ -25,12 +27,176 @@ from submissions import api as sub_api # installed from the edx-submissions repository from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey - from openedx.core.djangoapps.signals.signals import GRADES_UPDATED + log = logging.getLogger("edx.courseware") +class MaxScoresCache(object): + """ + A cache for unweighted max scores for problems. + + The key assumption here is that any problem that has not yet recorded a + score for a user is worth the same number of points. An XBlock is free to + score one student at 2/5 and another at 1/3. But a problem that has never + issued a score -- say a problem two students have only seen mentioned in + their progress pages and never interacted with -- should be worth the same + number of points for everyone. + """ + def __init__(self, cache_prefix): + self.cache_prefix = cache_prefix + self._max_scores_cache = {} + self._max_scores_updates = {} + + @classmethod + def create_for_course(cls, course): + """ + Given a CourseDescriptor, return a correctly configured `MaxScoresCache` + + This method will base the `MaxScoresCache` cache prefix value on the + last time something was published to the live version of the course. + This is so that we don't have to worry about stale cached values for + max scores -- any time a content change occurs, we change our cache + keys. + """ + return cls(u"{}.{}".format(course.id, course.subtree_edited_on.isoformat())) + + def fetch_from_remote(self, locations): + """ + Populate the local cache with values from django's cache + """ + remote_dict = cache.get_many([self._remote_cache_key(loc) for loc in locations]) + self._max_scores_cache = { + self._local_cache_key(remote_key): value + for remote_key, value in remote_dict.items() + if value is not None + } + + def push_to_remote(self): + """ + Update the remote cache + """ + if self._max_scores_updates: + cache.set_many( + { + self._remote_cache_key(key): value + for key, value in self._max_scores_updates.items() + }, + 60 * 60 * 24 # 1 day + ) + + def _remote_cache_key(self, location): + """Convert a location to a remote cache key (add our prefixing).""" + return u"grades.MaxScores.{}___{}".format(self.cache_prefix, unicode(location)) + + def _local_cache_key(self, remote_key): + """Convert a remote cache key to a local cache key (i.e. location str).""" + return remote_key.split(u"___", 1)[1] + + def num_cached_from_remote(self): + """How many items did we pull down from the remote cache?""" + return len(self._max_scores_cache) + + def num_cached_updates(self): + """How many local updates are we waiting to push to the remote cache?""" + return len(self._max_scores_updates) + + def set(self, location, max_score): + """ + Adds a max score to the max_score_cache + """ + loc_str = unicode(location) + if self._max_scores_cache.get(loc_str) != max_score: + self._max_scores_updates[loc_str] = max_score + + def get(self, location): + """ + Retrieve a max score from the cache + """ + loc_str = unicode(location) + max_score = self._max_scores_updates.get(loc_str) + if max_score is None: + max_score = self._max_scores_cache.get(loc_str) + + return max_score + + +class ProgressSummary(object): + """ + Wrapper class for the computation of a user's scores across a course. + + Attributes + chapters: a summary of all sections with problems in the course. It is + organized as an array of chapters, each containing an array of sections, + each containing an array of scores. This contains information for graded + and ungraded problems, and is good for displaying a course summary with + due dates, etc. + + weighted_scores: a dictionary mapping module locations to weighted Score + objects. + + locations_to_children: a dictionary mapping module locations to their + direct descendants. + """ + def __init__(self, chapters, weighted_scores, locations_to_children): + self.chapters = chapters + self.weighted_scores = weighted_scores + self.locations_to_children = locations_to_children + + def score_for_module(self, location): + """ + Calculate the aggregate weighted score for any location in the course. + This method returns a tuple containing (earned_score, possible_score). + + If the location is of 'problem' type, this method will return the + possible and earned scores for that problem. If the location refers to a + composite module (a vertical or section ) the scores will be the sums of + all scored problems that are children of the chosen location. + """ + if location in self.weighted_scores: + score = self.weighted_scores[location] + return score.earned, score.possible + children = self.locations_to_children[location] + earned = 0.0 + possible = 0.0 + for child in children: + child_earned, child_possible = self.score_for_module(child) + earned += child_earned + possible += child_possible + return earned, possible + + +def descriptor_affects_grading(block_types_affecting_grading, descriptor): + """ + Returns True if the descriptor could have any impact on grading, else False. + + Something might be a scored item if it is capable of storing a score + (has_score=True). We also have to include anything that can have children, + since those children might have scores. We can avoid things like Videos, + which have state but cannot ever impact someone's grade. + """ + return descriptor.location.block_type in block_types_affecting_grading + + +def field_data_cache_for_grading(course, user): + """ + Given a CourseDescriptor and User, create the FieldDataCache for grading. + + This will generate a FieldDataCache that only loads state for those things + that might possibly affect the grading process, and will ignore things like + Videos. + """ + descriptor_filter = partial(descriptor_affects_grading, course.block_types_affecting_grading) + return FieldDataCache.cache_for_descriptor_descendents( + course.id, + user, + course, + depth=None, + descriptor_filter=descriptor_filter + ) + + def answer_distributions(course_key): """ Given a course_key, return answer distributions in the form of a dictionary @@ -123,14 +289,14 @@ def url_and_display_name(usage_key): @transaction.commit_manually -def grade(student, request, course, keep_raw_scores=False): +def grade(student, request, course, keep_raw_scores=False, field_data_cache=None, scores_client=None): """ Wraps "_grade" with the manual_transaction context manager just in case there are unanticipated errors. Send a signal to update the minimum grade requirement status. """ with manual_transaction(): - grade_summary = _grade(student, request, course, keep_raw_scores) + grade_summary = _grade(student, request, course, keep_raw_scores, field_data_cache, scores_client) responses = GRADES_UPDATED.send_robust( sender=None, username=student.username, @@ -145,7 +311,7 @@ def grade(student, request, course, keep_raw_scores=False): return grade_summary -def _grade(student, request, course, keep_raw_scores): +def _grade(student, request, course, keep_raw_scores, field_data_cache, scores_client): """ Unwrapped version of "grade" @@ -166,15 +332,25 @@ def _grade(student, request, course, keep_raw_scores): More information on the format is in the docstring for CourseGrader. """ - grading_context = course.grading_context - raw_scores = [] + if field_data_cache is None: + with manual_transaction(): + field_data_cache = field_data_cache_for_grading(course, student) + if scores_client is None: + scores_client = ScoresClient.from_field_data_cache(field_data_cache) # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs # scores that were registered with the submissions API, which for the moment # means only openassessment (edx-ora2) - submissions_scores = sub_api.get_scores( - course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id) - ) + submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)) + max_scores_cache = MaxScoresCache.create_for_course(course) + # For the moment, we have to get scorable_locations from field_data_cache + # and not from scores_client, because scores_client is ignorant of things + # in the submissions API. As a further refactoring step, submissions should + # be hidden behind the ScoresClient. + max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations) + + grading_context = course.grading_context + raw_scores = [] totaled_scores = {} # This next complicated loop is just to collect the totaled_scores, which is @@ -202,13 +378,10 @@ def _grade(student, request, course, keep_raw_scores): ) if not should_grade_section: - with manual_transaction(): - should_grade_section = StudentModule.objects.filter( - student=student, - module_state_key__in=[ - descriptor.location for descriptor in section['xmoduledescriptors'] - ] - ).exists() + should_grade_section = any( + descriptor.location in scores_client + for descriptor in section['xmoduledescriptors'] + ) # If we haven't seen a single problem in the section, we don't have # to grade it at all! We can assume 0% @@ -219,23 +392,24 @@ def create_module(descriptor): '''creates an XModule instance given a descriptor''' # TODO: We need the request to pass into here. If we could forego that, our arguments # would be simpler - with manual_transaction(): - field_data_cache = FieldDataCache([descriptor], course.id, student) return get_module_for_descriptor( student, request, descriptor, field_data_cache, course.id, course=course ) - for module_descriptor in yield_dynamic_descriptor_descendants( - section_descriptor, student.id, create_module - ): - + descendants = yield_dynamic_descriptor_descendants(section_descriptor, student.id, create_module) + for module_descriptor in descendants: (correct, total) = get_score( - course.id, student, module_descriptor, create_module, scores_cache=submissions_scores + student, + module_descriptor, + create_module, + scores_client, + submissions_scores, + max_scores_cache, ) if correct is None and total is None: continue - if settings.GENERATE_PROFILE_SCORES: # for debugging! + if settings.GENERATE_PROFILE_SCORES: # for debugging! if total > 1: correct = random.randrange(max(total - 2, 1), total + 1) else: @@ -256,11 +430,13 @@ def create_module(descriptor): ) ) - _, graded_total = graders.aggregate_scores(scores, section_name) + __, graded_total = graders.aggregate_scores( + scores, section_name, section_location=section_descriptor.location + ) if keep_raw_scores: raw_scores += scores else: - graded_total = Score(0.0, 1.0, True, section_name, None) + graded_total = Score(0.0, 1.0, True, section_name, section_descriptor.location) #Add the graded total to totaled_scores if graded_total.possible > 0: @@ -283,11 +459,14 @@ def create_module(descriptor): letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent']) grade_summary['grade'] = letter_grade - grade_summary['totaled_scores'] = totaled_scores # make this available, eg for instructor download & debugging + grade_summary['totaled_scores'] = totaled_scores # make this available, eg for instructor download & debugging if keep_raw_scores: # way to get all RAW scores out to instructor # so grader can be double-checked grade_summary['raw_scores'] = raw_scores + + max_scores_cache.push_to_remote() + return grade_summary @@ -314,19 +493,34 @@ def grade_for_percentage(grade_cutoffs, percentage): @transaction.commit_manually -def progress_summary(student, request, course): +def progress_summary(student, request, course, field_data_cache=None, scores_client=None): """ Wraps "_progress_summary" with the manual_transaction context manager just in case there are unanticipated errors. """ with manual_transaction(): - return _progress_summary(student, request, course) + progress = _progress_summary(student, request, course, field_data_cache, scores_client) + if progress: + return progress.chapters + else: + return None + + +@transaction.commit_manually +def get_weighted_scores(student, course, field_data_cache=None, scores_client=None): + """ + Uses the _progress_summary method to return a ProgressSummmary object + containing details of a students weighted scores for the course. + """ + with manual_transaction(): + request = _get_mock_request(student) + return _progress_summary(student, request, course, field_data_cache, scores_client) # TODO: This method is not very good. It was written in the old course style and # then converted over and performance is not good. Once the progress page is redesigned # to not have the progress summary this method should be deleted (so it won't be copied). -def _progress_summary(student, request, course): +def _progress_summary(student, request, course, field_data_cache=None, scores_client=None): """ Unwrapped version of "progress_summary". @@ -348,23 +542,30 @@ def _progress_summary(student, request, course): """ with manual_transaction(): - field_data_cache = FieldDataCache.cache_for_descriptor_descendents( - course.id, student, course, depth=None - ) - # TODO: We need the request to pass into here. If we could - # forego that, our arguments would be simpler + if field_data_cache is None: + field_data_cache = field_data_cache_for_grading(course, student) + if scores_client is None: + scores_client = ScoresClient.from_field_data_cache(field_data_cache) + course_module = get_module_for_descriptor( student, request, course, field_data_cache, course.id, course=course ) if not course_module: - # This student must not have access to the course. return None course_module = getattr(course_module, '_x_module', course_module) submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)) + max_scores_cache = MaxScoresCache.create_for_course(course) + # For the moment, we have to get scorable_locations from field_data_cache + # and not from scores_client, because scores_client is ignorant of things + # in the submissions API. As a further refactoring step, submissions should + # be hidden behind the ScoresClient. + max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations) chapters = [] + locations_to_children = defaultdict(list) + locations_to_weighted_scores = {} # Don't include chapters that aren't displayable (e.g. due to error) for chapter_module in course_module.get_display_items(): # Skip if the chapter is hidden @@ -372,7 +573,6 @@ def _progress_summary(student, request, course): continue sections = [] - for section_module in chapter_module.get_display_items(): # Skip if the section is hidden with manual_transaction(): @@ -387,23 +587,29 @@ def _progress_summary(student, request, course): for module_descriptor in yield_dynamic_descriptor_descendants( section_module, student.id, module_creator ): - course_id = course.id + locations_to_children[module_descriptor.parent].append(module_descriptor.location) (correct, total) = get_score( - course_id, student, module_descriptor, module_creator, scores_cache=submissions_scores + student, + module_descriptor, + module_creator, + scores_client, + submissions_scores, + max_scores_cache, ) if correct is None and total is None: continue - scores.append( - Score( - correct, - total, - graded, - module_descriptor.display_name_with_default, - module_descriptor.location - ) + weighted_location_score = Score( + correct, + total, + graded, + module_descriptor.display_name_with_default, + module_descriptor.location ) + scores.append(weighted_location_score) + locations_to_weighted_scores[module_descriptor.location] = weighted_location_score + scores.reverse() section_total, _ = graders.aggregate_scores( scores, section_module.display_name_with_default) @@ -426,10 +632,20 @@ def _progress_summary(student, request, course): 'sections': sections }) - return chapters + max_scores_cache.push_to_remote() + + return ProgressSummary(chapters, locations_to_weighted_scores, locations_to_children) -def get_score(course_id, user, problem_descriptor, module_creator, scores_cache=None): +def weighted_score(raw_correct, raw_total, weight): + """Return a tuple that represents the weighted (correct, total) score.""" + # If there is no weighting, or weighting can't be applied, return input. + if weight is None or raw_total == 0: + return (raw_correct, raw_total) + return (float(raw_correct) * weight / raw_total, float(weight)) + + +def get_score(user, problem_descriptor, module_creator, scores_client, submissions_scores_cache, max_scores_cache): """ Return the score for a user on a problem, as a tuple (correct, total). e.g. (5,7) if you got 5 out of 7 points. @@ -439,19 +655,21 @@ def get_score(course_id, user, problem_descriptor, module_creator, scores_cache= user: a Student object problem_descriptor: an XModuleDescriptor + scores_client: an initialized ScoresClient module_creator: a function that takes a descriptor, and returns the corresponding XModule for this user. Can return None if user doesn't have access, or if something else went wrong. - scores_cache: A dict of location names to (earned, possible) point tuples. + submissions_scores_cache: A dict of location names to (earned, possible) point tuples. If an entry is found in this cache, it takes precedence. + max_scores_cache: a MaxScoresCache """ - scores_cache = scores_cache or {} + submissions_scores_cache = submissions_scores_cache or {} if not user.is_authenticated(): return (None, None) location_url = problem_descriptor.location.to_deprecated_string() - if location_url in scores_cache: - return scores_cache[location_url] + if location_url in submissions_scores_cache: + return submissions_scores_cache[location_url] # some problems have state that is updated independently of interaction # with the LMS, so they need to always be scored. (E.g. foldit.) @@ -469,22 +687,27 @@ def get_score(course_id, user, problem_descriptor, module_creator, scores_cache= # These are not problems, and do not have a score return (None, None) - try: - student_module = StudentModule.objects.get( - student=user, - course_id=course_id, - module_state_key=problem_descriptor.location - ) - except StudentModule.DoesNotExist: - student_module = None - - if student_module is not None and student_module.max_grade is not None: - correct = student_module.grade if student_module.grade is not None else 0 - total = student_module.max_grade + # Check the score that comes from the ScoresClient (out of CSM). + # If an entry exists and has a total associated with it, we trust that + # value. This is important for cases where a student might have seen an + # older version of the problem -- they're still graded on what was possible + # when they tried the problem, not what it's worth now. + score = scores_client.get(problem_descriptor.location) + cached_max_score = max_scores_cache.get(problem_descriptor.location) + if score and score.total is not None: + # We have a valid score, just use it. + correct = score.correct if score.correct is not None else 0.0 + total = score.total + elif cached_max_score is not None and settings.FEATURES.get("ENABLE_MAX_SCORE_CACHE"): + # We don't have a valid score entry but we know from our cache what the + # max possible score is, so they've earned 0.0 / cached_max_score + correct = 0.0 + total = cached_max_score else: - # If the problem was not in the cache, or hasn't been graded yet, - # we need to instantiate the problem. - # Otherwise, the max score (cached in student_module) won't be available + # This means we don't have a valid score entry and we don't have a + # cached_max_score on hand. We know they've earned 0.0 points on this, + # but we need to instantiate the module (i.e. load student state) in + # order to find out how much it was worth. problem = module_creator(problem_descriptor) if problem is None: return (None, None) @@ -496,17 +719,11 @@ def get_score(course_id, user, problem_descriptor, module_creator, scores_cache= # In which case total might be None if total is None: return (None, None) + else: + # add location to the max score cache + max_scores_cache.set(problem_descriptor.location, total) - # Now we re-weight the problem, if specified - weight = problem_descriptor.weight - if weight is not None: - if total == 0: - log.exception("Cannot reweight a problem with zero total points. Problem: " + str(student_module)) - return (correct, total) - correct = correct * weight / total - total = weight - - return (correct, total) + return weighted_score(correct, total, problem_descriptor.weight) @contextmanager @@ -545,15 +762,10 @@ def iterate_grades_for(course_or_id, students, keep_raw_scores=False): else: course = course_or_id - # We make a fake request because grading code expects to be able to look at - # the request. We have to attach the correct user to the request before - # grading that student. - request = RequestFactory().get('/') - for student in students: with dog_stats_api.timer('lms.grades.iterate_grades_for', tags=[u'action:{}'.format(course.id)]): try: - request.user = student + request = _get_mock_request(student) # Grading calls problem rendering, which calls masquerading, # which checks session vars -- thus the empty session dict below. # It's not pretty, but untangling that is currently beyond the @@ -572,3 +784,14 @@ def iterate_grades_for(course_or_id, students, keep_raw_scores=False): exc.message ) yield student, {}, exc.message + + +def _get_mock_request(student): + """ + Make a fake request because grading code expects to be able to look at + the request. We have to attach the correct user to the request before + grading that student. + """ + request = RequestFactory().get('/') + request.user = student + return request diff --git a/lms/djangoapps/courseware/model_data.py b/lms/djangoapps/courseware/model_data.py index a0e217ede7ce..faa5fdbad3a2 100644 --- a/lms/djangoapps/courseware/model_data.py +++ b/lms/djangoapps/courseware/model_data.py @@ -23,7 +23,7 @@ import json from abc import abstractmethod, ABCMeta -from collections import defaultdict +from collections import defaultdict, namedtuple from .models import ( StudentModule, XModuleUserStateSummaryField, @@ -741,6 +741,7 @@ def __init__(self, descriptors, course_id, user, select_for_update=False, asides self.course_id, ), } + self.scorable_locations = set() self.add_descriptors_to_cache(descriptors) def add_descriptors_to_cache(self, descriptors): @@ -748,6 +749,7 @@ def add_descriptors_to_cache(self, descriptors): Add all `descriptors` to this FieldDataCache. """ if self.user.is_authenticated(): + self.scorable_locations.update(desc.location for desc in descriptors if desc.has_score) for scope, fields in self._fields_to_cache(descriptors).items(): if scope not in self.cache: continue @@ -955,3 +957,63 @@ def last_modified(self, key): def __len__(self): return sum(len(cache) for cache in self.cache.values()) + + +class ScoresClient(object): + """ + Basic client interface for retrieving Score information. + + Eventually, this should read and write scores, but at the moment it only + handles the read side of things. + """ + Score = namedtuple('Score', 'correct total') + + def __init__(self, course_key, user_id): + """Basic constructor. from_field_data_cache() is more appopriate for most uses.""" + self.course_key = course_key + self.user_id = user_id + self._locations_to_scores = {} + self._has_fetched = False + + def __contains__(self, location): + """Return True if we have a score for this location.""" + return location in self._locations_to_scores + + def fetch_scores(self, locations): + """Grab score information.""" + scores_qset = StudentModule.objects.filter( + student_id=self.user_id, + course_id=self.course_key, + module_state_key__in=set(locations), + ) + # Locations in StudentModule don't necessarily have course key info + # attached to them (since old mongo identifiers don't include runs). + # So we have to add that info back in before we put it into our lookup. + self._locations_to_scores.update({ + UsageKey.from_string(location).map_into_course(self.course_key): self.Score(correct, total) + for location, correct, total + in scores_qset.values_list('module_state_key', 'grade', 'max_grade') + }) + self._has_fetched = True + + def get(self, location): + """ + Get the score for a given location, if it exists. + + If we don't have a score for that location, return `None`. Note that as + convention, you should be passing in a location with full course run + information. + """ + if not self._has_fetched: + raise ValueError( + "Tried to fetch location {} from ScoresClient before fetch_scores() has run." + .format(location) + ) + return self._locations_to_scores.get(location) + + @classmethod + def from_field_data_cache(cls, fd_cache): + """Create a ScoresClient from a populated FieldDataCache.""" + client = cls(fd_cache.course_id, fd_cache.user.id) + client.fetch_scores(fd_cache.scorable_locations) + return client diff --git a/lms/djangoapps/courseware/models.py b/lms/djangoapps/courseware/models.py index ac7b72520bd3..32a01e9e6ea5 100644 --- a/lms/djangoapps/courseware/models.py +++ b/lms/djangoapps/courseware/models.py @@ -133,7 +133,10 @@ def __repr__(self): return 'StudentModule<%r>' % ({ 'course_id': self.course_id, 'module_type': self.module_type, - 'student': self.student.username, # pylint: disable=no-member + # We use the student_id instead of username to avoid a database hop. + # This can actually matter in cases where we're logging many of + # these (e.g. on a broken progress page). + 'student_id': self.student_id, # pylint: disable=no-member 'module_state_key': self.module_state_key, 'state': str(self.state)[:20], },) diff --git a/lms/djangoapps/courseware/tests/test_grades.py b/lms/djangoapps/courseware/tests/test_grades.py index c6825c99be99..9715cedf2854 100644 --- a/lms/djangoapps/courseware/tests/test_grades.py +++ b/lms/djangoapps/courseware/tests/test_grades.py @@ -2,13 +2,18 @@ Test grade calculation. """ from django.http import Http404 -from mock import patch +from django.test import TestCase +from django.test.client import RequestFactory + +from mock import patch, MagicMock from nose.plugins.attrib import attr from opaque_keys.edx.locations import SlashSeparatedCourseKey +from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator -from courseware.grades import grade, iterate_grades_for +from courseware.grades import field_data_cache_for_grading, grade, iterate_grades_for, MaxScoresCache, ProgressSummary from student.tests.factories import UserFactory -from xmodule.modulestore.tests.factories import CourseFactory +from student.models import CourseEnrollment +from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase @@ -121,3 +126,195 @@ def _gradesets_and_errors_for(self, course_id, students): students_to_errors[student] = err_msg return students_to_gradesets, students_to_errors + + +class TestMaxScoresCache(ModuleStoreTestCase): + """ + Tests for the MaxScoresCache + """ + def setUp(self): + super(TestMaxScoresCache, self).setUp() + self.student = UserFactory.create() + self.course = CourseFactory.create() + self.problems = [] + for _ in xrange(3): + self.problems.append( + ItemFactory.create(category='problem', parent=self.course) + ) + + CourseEnrollment.enroll(self.student, self.course.id) + self.request = RequestFactory().get('/') + self.locations = [problem.location for problem in self.problems] + + def test_max_scores_cache(self): + """ + Tests the behavior fo the MaxScoresCache + """ + max_scores_cache = MaxScoresCache("test_max_scores_cache") + self.assertEqual(max_scores_cache.num_cached_from_remote(), 0) + self.assertEqual(max_scores_cache.num_cached_updates(), 0) + + # add score to cache + max_scores_cache.set(self.locations[0], 1) + self.assertEqual(max_scores_cache.num_cached_updates(), 1) + + # push to remote cache + max_scores_cache.push_to_remote() + + # create a new cache with the same params, fetch from remote cache + max_scores_cache = MaxScoresCache("test_max_scores_cache") + max_scores_cache.fetch_from_remote(self.locations) + + # see cache is populated + self.assertEqual(max_scores_cache.num_cached_from_remote(), 1) + + +class TestFieldDataCacheScorableLocations(ModuleStoreTestCase): + """ + Make sure we can filter the locations we pull back student state for via + the FieldDataCache. + """ + def setUp(self): + super(TestFieldDataCacheScorableLocations, self).setUp() + self.student = UserFactory.create() + self.course = CourseFactory.create() + chapter = ItemFactory.create(category='chapter', parent=self.course) + sequential = ItemFactory.create(category='sequential', parent=chapter) + vertical = ItemFactory.create(category='vertical', parent=sequential) + ItemFactory.create(category='video', parent=vertical) + ItemFactory.create(category='html', parent=vertical) + ItemFactory.create(category='discussion', parent=vertical) + ItemFactory.create(category='problem', parent=vertical) + + CourseEnrollment.enroll(self.student, self.course.id) + + def test_field_data_cache_scorable_locations(self): + """Only scorable locations should be in FieldDataCache.scorable_locations.""" + fd_cache = field_data_cache_for_grading(self.course, self.student) + block_types = set(loc.block_type for loc in fd_cache.scorable_locations) + self.assertNotIn('video', block_types) + self.assertNotIn('html', block_types) + self.assertNotIn('discussion', block_types) + self.assertIn('problem', block_types) + + +class TestProgressSummary(TestCase): + """ + Test the method that calculates the score for a given block based on the + cumulative scores of its children. This test class uses a hard-coded block + hierarchy with scores as follows: + a + +--------+--------+ + b c + +--------------+-----------+ | + d e f g + +-----+ +-----+-----+ | | + h i j k l m n + (2/5) (3/5) (0/1) - (1/3) - (3/10) + + """ + def setUp(self): + super(TestProgressSummary, self).setUp() + self.course_key = CourseLocator( + org='some_org', + course='some_course', + run='some_run' + ) + self.loc_a = self.create_location('chapter', 'a') + self.loc_b = self.create_location('section', 'b') + self.loc_c = self.create_location('section', 'c') + self.loc_d = self.create_location('vertical', 'd') + self.loc_e = self.create_location('vertical', 'e') + self.loc_f = self.create_location('vertical', 'f') + self.loc_g = self.create_location('vertical', 'g') + self.loc_h = self.create_location('problem', 'h') + self.loc_i = self.create_location('problem', 'i') + self.loc_j = self.create_location('problem', 'j') + self.loc_k = self.create_location('html', 'k') + self.loc_l = self.create_location('problem', 'l') + self.loc_m = self.create_location('html', 'm') + self.loc_n = self.create_location('problem', 'n') + + weighted_scores = { + self.loc_h: self.create_score(2, 5), + self.loc_i: self.create_score(3, 5), + self.loc_j: self.create_score(0, 1), + self.loc_l: self.create_score(1, 3), + self.loc_n: self.create_score(3, 10), + } + locations_to_scored_children = { + self.loc_a: [self.loc_h, self.loc_i, self.loc_j, self.loc_l, self.loc_n], + self.loc_b: [self.loc_h, self.loc_i, self.loc_j, self.loc_l], + self.loc_c: [self.loc_n], + self.loc_d: [self.loc_h, self.loc_i], + self.loc_e: [self.loc_j, self.loc_l], + self.loc_f: [], + self.loc_g: [self.loc_n], + self.loc_k: [], + self.loc_m: [], + } + self.progress_summary = ProgressSummary( + None, weighted_scores, locations_to_scored_children + ) + + def create_score(self, earned, possible): + """ + Create a new mock Score object with specified earned and possible values + """ + score = MagicMock() + score.possible = possible + score.earned = earned + return score + + def create_location(self, block_type, block_id): + """ + Create a new BlockUsageLocation with the given type and ID. + """ + return BlockUsageLocator( + course_key=self.course_key, block_type=block_type, block_id=block_id + ) + + def test_score_chapter(self): + earned, possible = self.progress_summary.score_for_module(self.loc_a) + self.assertEqual(earned, 9) + self.assertEqual(possible, 24) + + def test_score_section_many_leaves(self): + earned, possible = self.progress_summary.score_for_module(self.loc_b) + self.assertEqual(earned, 6) + self.assertEqual(possible, 14) + + def test_score_section_one_leaf(self): + earned, possible = self.progress_summary.score_for_module(self.loc_c) + self.assertEqual(earned, 3) + self.assertEqual(possible, 10) + + def test_score_vertical_two_leaves(self): + earned, possible = self.progress_summary.score_for_module(self.loc_d) + self.assertEqual(earned, 5) + self.assertEqual(possible, 10) + + def test_score_vertical_two_leaves_one_unscored(self): + earned, possible = self.progress_summary.score_for_module(self.loc_e) + self.assertEqual(earned, 1) + self.assertEqual(possible, 4) + + def test_score_vertical_no_score(self): + earned, possible = self.progress_summary.score_for_module(self.loc_f) + self.assertEqual(earned, 0) + self.assertEqual(possible, 0) + + def test_score_vertical_one_leaf(self): + earned, possible = self.progress_summary.score_for_module(self.loc_g) + self.assertEqual(earned, 3) + self.assertEqual(possible, 10) + + def test_score_leaf(self): + earned, possible = self.progress_summary.score_for_module(self.loc_h) + self.assertEqual(earned, 2) + self.assertEqual(possible, 5) + + def test_score_leaf_no_score(self): + earned, possible = self.progress_summary.score_for_module(self.loc_m) + self.assertEqual(earned, 0) + self.assertEqual(possible, 0) diff --git a/lms/djangoapps/courseware/tests/test_model_data.py b/lms/djangoapps/courseware/tests/test_model_data.py index 0f87c49de357..a123309f85b1 100644 --- a/lms/djangoapps/courseware/tests/test_model_data.py +++ b/lms/djangoapps/courseware/tests/test_model_data.py @@ -6,8 +6,7 @@ from nose.plugins.attrib import attr from functools import partial -from courseware.model_data import DjangoKeyValueStore -from courseware.model_data import InvalidScopeError, FieldDataCache +from courseware.model_data import DjangoKeyValueStore, FieldDataCache, InvalidScopeError from courseware.models import StudentModule from courseware.models import XModuleStudentInfoField, XModuleStudentPrefsField diff --git a/lms/djangoapps/courseware/tests/test_submitting_problems.py b/lms/djangoapps/courseware/tests/test_submitting_problems.py index 315ee21d2d1f..d00871a99079 100644 --- a/lms/djangoapps/courseware/tests/test_submitting_problems.py +++ b/lms/djangoapps/courseware/tests/test_submitting_problems.py @@ -109,6 +109,15 @@ def submit_question_answer(self, problem_url_name, responses): return resp + def look_at_question(self, problem_url_name): + """ + Create state for a problem, but don't answer it + """ + location = self.problem_location(problem_url_name) + modx_url = self.modx_url(location, "problem_get") + resp = self.client.get(modx_url) + return resp + def reset_question_answer(self, problem_url_name): """ Reset specified problem for current user. @@ -127,6 +136,36 @@ def show_question_answer(self, problem_url_name): resp = self.client.post(modx_url) return resp + +class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, ProblemSubmissionTestMixin): + """ + Check that a course gets graded properly. + """ + + # arbitrary constant + COURSE_SLUG = "100" + COURSE_NAME = "test_course" + + def setUp(self): + + super(TestSubmittingProblems, self).setUp(create_user=False) + # Create course + self.course = CourseFactory.create(display_name=self.COURSE_NAME, number=self.COURSE_SLUG) + assert self.course, "Couldn't load course %r" % self.COURSE_NAME + + # create a test student + self.student = 'view@test.com' + self.password = 'foo' + self.create_account('u1', self.student, self.password) + self.activate_user(self.student) + self.enroll(self.course) + self.student_user = User.objects.get(email=self.student) + self.factory = RequestFactory() + # Disable the score change signal to prevent other components from being pulled into tests. + signal_patch = patch('courseware.module_render.SCORE_CHANGED.send') + signal_patch.start() + self.addCleanup(signal_patch.stop) + def add_dropdown_to_section(self, section_location, name, num_inputs=2): """ Create and return a dropdown problem. @@ -457,6 +496,33 @@ def test_show_answer_doesnt_write_to_csm(self): current_count = csmh.count() self.assertEqual(current_count, 3) + def test_grade_with_max_score_cache(self): + """ + Tests that the max score cache is populated after a grading run + and that the results of grading runs before and after the cache + warms are the same. + """ + self.basic_setup() + self.submit_question_answer('p1', {'2_1': 'Correct'}) + self.look_at_question('p2') + self.assertTrue( + StudentModule.objects.filter( + module_state_key=self.problem_location('p2') + ).exists() + ) + location_to_cache = unicode(self.problem_location('p2')) + max_scores_cache = grades.MaxScoresCache.create_for_course(self.course) + + # problem isn't in the cache + max_scores_cache.fetch_from_remote([location_to_cache]) + self.assertIsNone(max_scores_cache.get(location_to_cache)) + self.check_grade_percent(0.33) + + # problem is in the cache + max_scores_cache.fetch_from_remote([location_to_cache]) + self.assertIsNotNone(max_scores_cache.get(location_to_cache)) + self.check_grade_percent(0.33) + def test_none_grade(self): """ Check grade is 0 to begin with. @@ -474,6 +540,13 @@ def test_b_grade_exact(self): self.check_grade_percent(0.33) self.assertEqual(self.get_grade_summary()['grade'], 'B') + @patch.dict("django.conf.settings.FEATURES", {"ENABLE_MAX_SCORE_CACHE": False}) + def test_grade_no_max_score_cache(self): + """ + Tests grading when the max score cache is disabled + """ + self.test_b_grade_exact() + def test_b_grade_above(self): """ Check grade between cutoffs. diff --git a/lms/djangoapps/courseware/views.py b/lms/djangoapps/courseware/views.py index d7275c7fd52e..2162104f93aa 100644 --- a/lms/djangoapps/courseware/views.py +++ b/lms/djangoapps/courseware/views.py @@ -44,7 +44,7 @@ is_user_eligible_for_credit, is_credit_course ) -from courseware.model_data import FieldDataCache +from courseware.model_data import FieldDataCache, ScoresClient from .module_render import toc_for_course, get_module_for_descriptor, get_module, get_module_by_usage_id from .entrance_exams import ( course_has_entrance_exam, @@ -1054,10 +1054,15 @@ def _progress(request, course_key, student_id): # The pre-fetching of groups is done to make auth checks not require an # additional DB lookup (this kills the Progress page in particular). student = User.objects.prefetch_related("groups").get(id=student.id) - - courseware_summary = grades.progress_summary(student, request, course) + field_data_cache = grades.field_data_cache_for_grading(course, student) + scores_client = ScoresClient.from_field_data_cache(field_data_cache) + courseware_summary = grades.progress_summary( + student, request, course, field_data_cache=field_data_cache, scores_client=scores_client + ) + grade_summary = grades.grade( + student, request, course, field_data_cache=field_data_cache, scores_client=scores_client + ) studio_url = get_studio_url(course, 'settings/grading') - grade_summary = grades.grade(student, request, course) if courseware_summary is None: #This means the student didn't have access to the course (which the instructor requested) diff --git a/lms/djangoapps/instructor/tests/test_enrollment.py b/lms/djangoapps/instructor/tests/test_enrollment.py index 5298a9e8be67..574fdb17ba12 100644 --- a/lms/djangoapps/instructor/tests/test_enrollment.py +++ b/lms/djangoapps/instructor/tests/test_enrollment.py @@ -393,7 +393,10 @@ def test_delete_student_attempts(self): module_state_key=msk ).count(), 0) - def test_delete_submission_scores(self): + # Disable the score change signal to prevent other components from being + # pulled into tests. + @mock.patch('courseware.module_render.SCORE_CHANGED.send') + def test_delete_submission_scores(self, _lti_mock): user = UserFactory() problem_location = self.course_key.make_usage_key('dummy', 'module') diff --git a/lms/djangoapps/instructor/tests/test_legacy_raw_download_csv.py b/lms/djangoapps/instructor/tests/test_legacy_raw_download_csv.py index 5eeb76cee6b9..952eb0c0205a 100644 --- a/lms/djangoapps/instructor/tests/test_legacy_raw_download_csv.py +++ b/lms/djangoapps/instructor/tests/test_legacy_raw_download_csv.py @@ -67,9 +67,23 @@ def test_download_raw_grades_dump(self): ''' self.assertEqual(body, expected_csv, msg) - def test_grade_summary_data(self): + def get_expected_grade_data( + self, get_grades=True, get_raw_scores=False, + use_offline=False, get_score_max=False): """ - Test grade summary data report generation + Return expected results from the get_student_grade_summary_data call + with any options selected. + + Note that the kwargs accepted by get_expected_grade_data (and their + default values) must be identical to those in + get_student_grade_summary_data for this function to be accurate. + If kwargs are added or removed, or the functionality triggered by + them changes, this function must be updated to match. + + If get_score_max is True, instead of a single score between 0 and 1, + the actual score and total possible are returned. For example, if the + student got one out of two possible points, the values (1, 2) will be + returned instead of 0.5. """ self.answer_question() @@ -87,11 +101,11 @@ def test_grade_summary_data(self): ], 'data': [ [ - 1, u'u1', u'username', u'view@test.com', '', 0.0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, u'u1', u'username', u'view@test.com', '', (0.0, 1.0), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ - 2, u'u2', u'username', u'view2@test.com', '', 0.3333333333333333, 0, 0, 0, + 2, u'u2', u'username', u'view2@test.com', '', (1.0, 3.0), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] @@ -104,6 +118,163 @@ def test_grade_summary_data(self): ] } + # The first five columns contain the student ID, username, + # full name, and e-mail addresses. + non_grade_columns = 5 + # If the following 'if' is triggered, the + # get_student_grade_summary_data function will not return any + # grade data. Only the "non_grade_columns." + # So strip out the headers beyond the "non_grade_columns," and + # strip out all the grades in the 'data' key. + if not get_grades or use_offline: + expected_data["header"] = expected_data["header"][:non_grade_columns] + # This iterates over the lists of grades in the 'data' key + # of the return dictionary and strips out everything after + # the non_grade_columns. + for index, rec in enumerate(expected_data["data"]): + expected_data["data"][index] = rec[:non_grade_columns] + # Wipe out all data in the 'assignments' key if use_offline + # is True; no assignment data is returned. + if use_offline: + expected_data['assignments'] = [] + # If get_grades is False, get_student_grade_summary_data doesn't + # even return an 'assignments' key, so delete it. + if get_grades is False: + del expected_data['assignments'] + # If get_raw_scores is true, get_student_grade_summary_data returns + # the raw score per assignment. For example, the "0.3333333333333333" + # in the data above is for getting one out of three possible + # answers correct. Getting raw scores means the actual score (1) is + # return instead of: 1.0/3.0 + # For some reason, this also causes it to not to return any assignments + # without attempts, so most of the headers are removed. + elif get_raw_scores: + expected_data["data"] = [ + [ + 1, u'u1', u'username', u'view@test.com', + '', None, None, None + ], + [ + 2, u'u2', u'username', u'view2@test.com', + '', 0.0, 1.0, 0.0 + ], + ] + expected_data["assignments"] = [u'p3', u'p2', u'p1'] + expected_data["header"] = [ + u'ID', u'Username', u'Full Name', u'edX email', + u'External email', u'p3', u'p2', u'p1' + ] + # Strip out the single-value float scores and replace them + # with two-tuples of actual and possible scores (see docstring). + if get_score_max: + expected_data["data"][-1][-3:] = (0.0, 1), (1.0, 1.0), (0.0, 1) + + return expected_data + + def test_grade_summary_data_defaults(self): + """ + Test grade summary data report generation with all default kwargs. + + This test compares the output of the get_student_grade_summary_data + with a dictionary of exected values. The purpose of this test is + to ensure that future changes to the get_student_grade_summary_data + function (for example, mitocw/edx-platform #95). + """ + request = DummyRequest() + self.answer_question() + data = get_student_grade_summary_data(request, self.course) + expected_data = self.get_expected_grade_data() + self.compare_data(data, expected_data) + + def test_grade_summary_data_raw_scores(self): + """ + Test grade summary data report generation with get_raw_scores True. + """ + request = DummyRequest() + self.answer_question() + data = get_student_grade_summary_data( + request, self.course, get_raw_scores=True, + ) + expected_data = self.get_expected_grade_data(get_raw_scores=True) + self.compare_data(data, expected_data) + + def test_grade_summary_data_no_grades(self): + """ + Test grade summary data report generation with + get_grades set to False. + """ + request = DummyRequest() + self.answer_question() + + data = get_student_grade_summary_data( + request, self.course, get_grades=False + ) + expected_data = self.get_expected_grade_data(get_grades=False) + # if get_grades is False, get_expected_grade_data does not + # add an "assignments" key. + self.assertNotIn("assignments", expected_data) + self.compare_data(data, expected_data) + + def test_grade_summary_data_use_offline(self): + """ + Test grade summary data report generation with use_offline True. + """ + request = DummyRequest() + self.answer_question() + data = get_student_grade_summary_data( + request, self.course, use_offline=True) + expected_data = self.get_expected_grade_data(use_offline=True) + self.compare_data(data, expected_data) + + def test_grade_summary_data_use_offline_and_raw_scores(self): + """ + Test grade summary data report generation with use_offline + and get_raw_scores both True. + """ + request = DummyRequest() + self.answer_question() + data = get_student_grade_summary_data( + request, self.course, use_offline=True, get_raw_scores=True + ) + expected_data = self.get_expected_grade_data( + use_offline=True, get_raw_scores=True + ) + self.compare_data(data, expected_data) + + def test_grade_summary_data_get_score_max(self): + """ + Test grade summary data report generation with get_score_max set + to True (also requires get_raw_scores to be True). + """ + request = DummyRequest() + self.answer_question() + data = get_student_grade_summary_data( + request, self.course, use_offline=True, get_raw_scores=True, + get_score_max=True, + ) + expected_data = self.get_expected_grade_data( + use_offline=True, get_raw_scores=True, get_score_max=True, + ) + self.compare_data(data, expected_data) + + def compare_data(self, data, expected_data): + """ + Compare the output of the get_student_grade_summary_data + function to the expected_data data. + """ + + # Currently, all kwargs to get_student_grade_summary_data + # return a dictionary with the same keys, except for + # get_grades=False, which results in no 'assignments' key. + # This is explicitly checked for above in + # test_grade_summary_data_no_grades. This is a backup which + # will catch future changes. + self.assertListEqual( + expected_data.keys(), + data.keys(), + ) + + # Ensure the student info and assignment names are as expected. for key in ['assignments', 'header']: self.assertListEqual(expected_data[key], data[key]) diff --git a/lms/djangoapps/instructor/views/legacy.py b/lms/djangoapps/instructor/views/legacy.py index b5996c7d8dbf..24264f193568 100644 --- a/lms/djangoapps/instructor/views/legacy.py +++ b/lms/djangoapps/instructor/views/legacy.py @@ -12,7 +12,7 @@ import requests import urllib -from collections import defaultdict, OrderedDict +from collections import defaultdict, OrderedDict, Counter from markupsafe import escape from requests.status_codes import codes from StringIO import StringIO @@ -34,7 +34,6 @@ from courseware import grades from courseware.access import has_access from courseware.courses import get_course_with_access, get_cms_course_link -from courseware.models import StudentModule from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR from django_comment_client.utils import has_forum_access from instructor.offline_gradecalc import student_grades, offline_grades_available @@ -235,25 +234,41 @@ def domatch(student): elif action in ['Display grades for assignment', 'Export grades for assignment to remote gradebook', 'Export CSV file of grades for assignment']: + normalize_grades_enable = 1 if request.POST.get('normalize_grades', None) else 0 log.debug(action) datatable = {} aname = request.POST.get('assignment_name', '') if not aname: msg += "{text}".format(text=_("Please enter an assignment name")) else: - allgrades = get_student_grade_summary_data(request, course, get_grades=True, use_offline=use_offline) + allgrades = get_student_grade_summary_data( + request, + course, + get_grades=True, + use_offline=use_offline, + get_score_max=False if normalize_grades_enable == 1 else True + ) + if aname not in allgrades['assignments']: msg += "{text}".format( text=_("Invalid assignment name '{name}'").format(name=aname) ) else: aidx = allgrades['assignments'].index(aname) - datatable = {'header': [_('External email'), aname]} + datatable = {'header': ['External email', aname, 'max_pts', 'normalize']} ddata = [] - for student in allgrades['students']: # do one by one in case there is a student who has only partial grades - try: - ddata.append([student.email, student.grades[aidx]]) - except IndexError: + # do one by one in case there is a student who has only partial grades + for student in allgrades['students']: + if len(student.grades) >= aidx and student.grades[aidx] is not None: + ddata.append( + [ + student.email, + student.grades[aidx][0], + student.grades[aidx][1], + normalize_grades_enable + ], + ) + else: log.debug(u'No grade for assignment %(idx)s (%(name)s) for student %(email)s', { "idx": aidx, "name": aname, @@ -659,17 +674,21 @@ def __init__(self): self.grades = {} self._current_row = {} - def _add_grade_to_row(self, component, score): + def _add_grade_to_row(self, component, score, possible=None): """Creates component if needed, and assigns score Args: component (str): Course component being graded score (float): Score of student on component + possible (float): Max possible score for the component Returns: None """ component_index = self.components.setdefault(component, len(self.components)) + if possible is not None: + # send a tuple instead of a single value + score = (score, possible) self._current_row[component_index] = score @contextmanager @@ -709,7 +728,10 @@ def get_graded_components(self): return self.components.keys() -def get_student_grade_summary_data(request, course, get_grades=True, get_raw_scores=False, use_offline=False): +def get_student_grade_summary_data( + request, course, get_grades=True, get_raw_scores=False, + use_offline=False, get_score_max=False +): """ Return data arrays with student identity and grades for specified course. @@ -725,6 +747,11 @@ def get_student_grade_summary_data(request, course, get_grades=True, get_raw_sco data = list (one per student) of lists of data corresponding to the fields If get_raw_scores=True, then instead of grade summaries, the raw grades for all graded modules are returned. + + If get_score_max is True, two values will be returned for each grade -- the + total number of points earned and the total number of points possible. For + example, if two points are possible and one is earned, (1, 2) will be + returned instead of 0.5 (the default). """ course_key = course.id enrolled_students = User.objects.filter( @@ -751,12 +778,35 @@ def get_student_grade_summary_data(request, course, get_grades=True, get_raw_sco log.debug(u'student=%s, gradeset=%s', student, gradeset) with gtab.add_row(student.id) as add_grade: if get_raw_scores: - # TODO (ichuang) encode Score as dict instead of as list, so score[0] -> score['earned'] + # The following code calls add_grade, which is an alias + # for the add_row method on the GradeTable class. This adds + # a grade for each assignment. Depending on whether + # get_score_max is True, it will return either a single + # value as a float between 0 and 1, or a two-tuple + # containing the earned score and possible score for + # the assignment (see docstring). for score in gradeset['raw_scores']: - add_grade(score.section, getattr(score, 'earned', score[0])) + if get_score_max is True: + add_grade(score.section, score.earned, score.possible) + else: + add_grade(score.section, score.earned) else: + category_cnts = Counter() + progress_summary = grades._progress_summary(student, request, course) for grade_item in gradeset['section_breakdown']: - add_grade(grade_item['label'], grade_item['percent']) + category = grade_item['category'] + try: + location = gradeset['totaled_scores'][category][category_cnts[category]].module_id + earned, possible = progress_summary.score_for_module(location) + if get_score_max is True: + add_grade(grade_item['label'], earned, possible=possible) + else: + add_grade(grade_item['label'], grade_item['percent'], possible=1) + except (IndexError, KeyError): + # if exercise is in 'section_breakdown' dict but not in 'totaled_scores' because either + # student has not attempted it or it is not grade able. + add_grade(grade_item['label'], grade_item['percent'], possible=1) + category_cnts[category] += 1 student.grades = gtab.get_grade(student.id) data.append(datarow) diff --git a/lms/djangoapps/instructor_task/tests/test_tasks_helper.py b/lms/djangoapps/instructor_task/tests/test_tasks_helper.py index d58f4d1865b8..12210c4260a7 100644 --- a/lms/djangoapps/instructor_task/tests/test_tasks_helper.py +++ b/lms/djangoapps/instructor_task/tests/test_tasks_helper.py @@ -1336,7 +1336,7 @@ def test_certificate_generation_for_students(self): current_task = Mock() current_task.update_state = Mock() - with self.assertNumQueries(104): + with self.assertNumQueries(125): with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task: mock_current_task.return_value = current_task with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_queue: diff --git a/lms/djangoapps/lti_provider/migrations/0004_add_version_to_graded_assignment.py b/lms/djangoapps/lti_provider/migrations/0004_add_version_to_graded_assignment.py new file mode 100644 index 000000000000..ae232a7571b8 --- /dev/null +++ b/lms/djangoapps/lti_provider/migrations/0004_add_version_to_graded_assignment.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# pylint: disable=invalid-name, missing-docstring, unused-argument, unused-import, line-too-long +from south.utils import datetime_utils as datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding field 'GradedAssignment.version_number' + db.add_column('lti_provider_gradedassignment', 'version_number', + self.gf('django.db.models.fields.IntegerField')(default=0), + keep_default=False) + + + def backwards(self, orm): + # Deleting field 'GradedAssignment.version_number' + db.delete_column('lti_provider_gradedassignment', 'version_number') + + + models = { + 'auth.group': { + 'Meta': {'object_name': 'Group'}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), + 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) + }, + 'auth.permission': { + 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, + 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) + }, + 'auth.user': { + 'Meta': {'object_name': 'User'}, + 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), + 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), + 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), + 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), + 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) + }, + 'contenttypes.contenttype': { + 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, + 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) + }, + 'lti_provider.gradedassignment': { + 'Meta': {'unique_together': "(('outcome_service', 'lis_result_sourcedid'),)", 'object_name': 'GradedAssignment'}, + 'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'lis_result_sourcedid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'outcome_service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lti_provider.OutcomeService']"}), + 'usage_key': ('xmodule_django.models.UsageKeyField', [], {'max_length': '255', 'db_index': 'True'}), + 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), + 'version_number': ('django.db.models.fields.IntegerField', [], {'default': '0'}) + }, + 'lti_provider.lticonsumer': { + 'Meta': {'object_name': 'LtiConsumer'}, + 'consumer_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), + 'consumer_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), + 'consumer_secret': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'instance_guid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}) + }, + 'lti_provider.ltiuser': { + 'Meta': {'unique_together': "(('lti_consumer', 'lti_user_id'),)", 'object_name': 'LtiUser'}, + 'edx_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'lti_consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lti_provider.LtiConsumer']"}), + 'lti_user_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}) + }, + 'lti_provider.outcomeservice': { + 'Meta': {'object_name': 'OutcomeService'}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'lis_outcome_service_url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), + 'lti_consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lti_provider.LtiConsumer']"}) + } + } + + complete_apps = ['lti_provider'] diff --git a/lms/djangoapps/lti_provider/models.py b/lms/djangoapps/lti_provider/models.py index 61c024a107a4..847f6ee5223e 100644 --- a/lms/djangoapps/lti_provider/models.py +++ b/lms/djangoapps/lti_provider/models.py @@ -112,6 +112,7 @@ class GradedAssignment(models.Model): usage_key = UsageKeyField(max_length=255, db_index=True) outcome_service = models.ForeignKey(OutcomeService) lis_result_sourcedid = models.CharField(max_length=255, db_index=True) + version_number = models.IntegerField(default=0) class Meta(object): """ diff --git a/lms/djangoapps/lti_provider/outcomes.py b/lms/djangoapps/lti_provider/outcomes.py index bcfd37a883ac..e36234bb5780 100644 --- a/lms/djangoapps/lti_provider/outcomes.py +++ b/lms/djangoapps/lti_provider/outcomes.py @@ -7,6 +7,7 @@ from lxml import etree from lxml.builder import ElementMaker import requests +from requests.exceptions import RequestException import requests_oauthlib import uuid @@ -95,6 +96,61 @@ def generate_replace_result_xml(result_sourcedid, score): return etree.tostring(xml, xml_declaration=True, encoding='UTF-8') +def get_assignments_for_problem(problem_descriptor, user_id, course_key): + """ + Trace the parent hierarchy from a given problem to find all blocks that + correspond to graded assignment launches for this user. A problem may + show up multiple times for a given user; the problem could be embedded in + multiple courses (or multiple times in the same course), or the block could + be embedded more than once at different granularities (as an individual + problem and as a problem in a vertical, for example). + + Returns a list of GradedAssignment objects that are associated with the + given descriptor for the current user. + """ + locations = [] + current_descriptor = problem_descriptor + while current_descriptor: + locations.append(current_descriptor.location) + current_descriptor = current_descriptor.get_parent() + assignments = GradedAssignment.objects.filter( + user=user_id, course_key=course_key, usage_key__in=locations + ) + return assignments + + +def send_score_update(assignment, score): + """ + Create and send the XML message to the campus LMS system to update the grade + for a single graded assignment. + """ + xml = generate_replace_result_xml( + assignment.lis_result_sourcedid, score + ) + try: + response = sign_and_send_replace_result(assignment, xml) + except RequestException: + # failed to send result. 'response' is None, so more detail will be + # logged at the end of the method. + response = None + log.exception("Outcome Service: Error when sending result.") + + # If something went wrong, make sure that we have a complete log record. + # That way we can manually fix things up on the campus system later if + # necessary. + if not (response and check_replace_result_response(response)): + log.error( + "Outcome Service: Failed to update score on LTI consumer. " + "User: %s, course: %s, usage: %s, score: %s, status: %s, body: %s", + assignment.user, + assignment.course_key, + assignment.usage_key, + score, + response, + response.text if response else 'Unknown' + ) + + def sign_and_send_replace_result(assignment, xml): """ Take the XML document generated in generate_replace_result_xml, and sign it diff --git a/lms/djangoapps/lti_provider/tasks.py b/lms/djangoapps/lti_provider/tasks.py index cf11489f5d9c..cb1f70e6b231 100644 --- a/lms/djangoapps/lti_provider/tasks.py +++ b/lms/djangoapps/lti_provider/tasks.py @@ -2,15 +2,19 @@ Asynchronous tasks for the LTI provider app. """ +from django.conf import settings +from django.contrib.auth.models import User from django.dispatch import receiver import logging -from requests.exceptions import RequestException +from courseware.grades import get_weighted_scores from courseware.models import SCORE_CHANGED from lms import CELERY_APP from lti_provider.models import GradedAssignment -import lti_provider.outcomes +import lti_provider.outcomes as outcomes from lti_provider.views import parse_course_and_usage_keys +from opaque_keys.edx.keys import CourseKey +from xmodule.modulestore.django import modulestore log = logging.getLogger("edx.lti_provider") @@ -28,13 +32,18 @@ def score_changed_handler(sender, **kwargs): # pylint: disable=unused-argument usage_id = kwargs.get('usage_id', None) if None not in (points_earned, points_possible, user_id, course_id, user_id): - send_outcome.delay( - points_possible, - points_earned, - user_id, - course_id, - usage_id - ) + course_key, usage_key = parse_course_and_usage_keys(course_id, usage_id) + assignments = increment_assignment_versions(course_key, usage_key, user_id) + for assignment in assignments: + if assignment.usage_key == usage_key: + send_leaf_outcome.delay( + assignment.id, points_earned, points_possible + ) + else: + send_composite_outcome.apply_async( + (user_id, course_id, assignment.id, assignment.version_number), + countdown=settings.LTI_AGGREGATE_SCORE_PASSBACK_DELAY + ) else: log.error( "Outcome Service: Required signal parameter is None. " @@ -44,55 +53,86 @@ def score_changed_handler(sender, **kwargs): # pylint: disable=unused-argument ) -@CELERY_APP.task -def send_outcome(points_possible, points_earned, user_id, course_id, usage_id): +def increment_assignment_versions(course_key, usage_key, user_id): """ - Calculate the score for a given user in a problem and send it to the - appropriate LTI consumer's outcome service. + Update the version numbers for all assignments that are affected by a score + change event. Returns a list of all affected assignments. """ - course_key, usage_key = parse_course_and_usage_keys(course_id, usage_id) - assignments = GradedAssignment.objects.filter( - user=user_id, course_key=course_key, usage_key=usage_key + problem_descriptor = modulestore().get_item(usage_key) + # Get all assignments involving the current problem for which the campus LMS + # is expecting a grade. There may be many possible graded assignments, if + # a problem has been added several times to a course at different + # granularities (such as the unit or the vertical). + assignments = outcomes.get_assignments_for_problem( + problem_descriptor, user_id, course_key ) - - # Calculate the user's score, on a scale of 0.0 - 1.0. - score = float(points_earned) / float(points_possible) - - # There may be zero or more assignment records. We would expect for there - # to be zero if the user/course/usage combination does not relate to a - # previous graded LTI launch. This can happen if an LTI consumer embeds some - # gradable content in a context that doesn't require a score (maybe by - # including an exercise as a sample that students may complete but don't - # count towards their grade). - # There could be more than one GradedAssignment record if the same content - # is embedded more than once in a single course. This would be a strange - # course design on the consumer's part, but we handle it by sending update - # messages for all launches of the content. for assignment in assignments: - xml = lti_provider.outcomes.generate_replace_result_xml( - assignment.lis_result_sourcedid, score + assignment.version_number += 1 + assignment.save() + return assignments + + +@CELERY_APP.task +def send_composite_outcome(user_id, course_id, assignment_id, version): + """ + Calculate and transmit the score for a composite module (such as a + vertical). + + A composite module may contain multiple problems, so we need to + calculate the total points earned and possible for all child problems. This + requires calculating the scores for the whole course, which is an expensive + operation. + + Callers should be aware that the score calculation code accesses the latest + scores from the database. This can lead to a race condition between a view + that updates a user's score and the calculation of the grade. If the Celery + task attempts to read the score from the database before the view exits (and + its transaction is committed), it will see a stale value. Care should be + taken that this task is not triggered until the view exits. + + The GradedAssignment model has a version_number field that is incremented + whenever the score is updated. It is used by this method for two purposes. + First, it allows the task to exit if it detects that it has been superseded + by another task that will transmit the score for the same assignment. + Second, it prevents a race condition where two tasks calculate different + scores for a single assignment, and may potentially update the campus LMS + in the wrong order. + """ + assignment = GradedAssignment.objects.get(id=assignment_id) + if version != assignment.version_number: + log.info( + "Score passback for GradedAssignment %s skipped. More recent score available.", + assignment.id ) - try: - response = lti_provider.outcomes.sign_and_send_replace_result(assignment, xml) - except RequestException: - # failed to send result. 'response' is None, so more detail will be - # logged at the end of the method. - response = None - log.exception("Outcome Service: Error when sending result.") - - # If something went wrong, make sure that we have a complete log record. - # That way we can manually fix things up on the campus system later if - # necessary. - if not (response and lti_provider.outcomes.check_replace_result_response(response)): - log.error( - "Outcome Service: Failed to update score on LTI consumer. " - "User: %s, course: %s, usage: %s, score: %s, possible: %s " - "status: %s, body: %s", - user_id, - course_key, - usage_key, - points_earned, - points_possible, - response, - response.text if response else 'Unknown' - ) + return + course_key = CourseKey.from_string(course_id) + mapped_usage_key = assignment.usage_key.map_into_course(course_key) + user = User.objects.get(id=user_id) + course = modulestore().get_course(course_key, depth=0) + progress_summary = get_weighted_scores(user, course) + earned, possible = progress_summary.score_for_module(mapped_usage_key) + if possible == 0: + weighted_score = 0 + else: + weighted_score = float(earned) / float(possible) + + assignment = GradedAssignment.objects.get(id=assignment_id) + if assignment.version_number == version: + outcomes.send_score_update(assignment, weighted_score) + + +@CELERY_APP.task +def send_leaf_outcome(assignment_id, points_earned, points_possible): + """ + Calculate and transmit the score for a single problem. This method assumes + that the individual problem was the source of a score update, and so it + directly takes the points earned and possible values. As such it does not + have to calculate the scores for the course, making this method far faster + than send_outcome_for_composite_assignment. + """ + assignment = GradedAssignment.objects.get(id=assignment_id) + if points_possible == 0: + weighted_score = 0 + else: + weighted_score = float(points_earned) / float(points_possible) + outcomes.send_score_update(assignment, weighted_score) diff --git a/lms/djangoapps/lti_provider/tests/test_outcomes.py b/lms/djangoapps/lti_provider/tests/test_outcomes.py index 3e645885c9e3..e765c8e712ab 100644 --- a/lms/djangoapps/lti_provider/tests/test_outcomes.py +++ b/lms/djangoapps/lti_provider/tests/test_outcomes.py @@ -9,8 +9,19 @@ from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService import lti_provider.outcomes as outcomes -import lti_provider.tasks as tasks from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator +from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase +from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory, check_mongo_calls + + +def create_score(earned, possible): + """ + Create a new mock Score object with specified earned and possible values + """ + score = MagicMock() + score.possible = possible + score.earned = earned + return score class StoreOutcomeParametersTest(TestCase): @@ -177,81 +188,6 @@ def test_sign_and_send_replace_result(self, post_mock): self.assertEqual(response, 'response') -class SendOutcomeTest(TestCase): - """ - Tests for the send_outcome method in tasks.py - """ - - def setUp(self): - super(SendOutcomeTest, self).setUp() - self.course_key = CourseLocator( - org='some_org', - course='some_course', - run='some_run' - ) - self.usage_key = BlockUsageLocator( - course_key=self.course_key, - block_type='problem', - block_id='block_id' - ) - self.user = UserFactory.create() - self.points_possible = 10 - self.points_earned = 3 - self.generate_xml_mock = self.setup_patch( - 'lti_provider.outcomes.generate_replace_result_xml', - 'replace result XML' - ) - self.replace_result_mock = self.setup_patch( - 'lti_provider.outcomes.sign_and_send_replace_result', - 'replace result response' - ) - self.check_result_mock = self.setup_patch( - 'lti_provider.outcomes.check_replace_result_response', - True - ) - consumer = LtiConsumer( - consumer_name='Lti Consumer Name', - consumer_key='consumer_key', - consumer_secret='consumer_secret', - instance_guid='tool_instance_guid' - ) - consumer.save() - outcome = OutcomeService( - lis_outcome_service_url='http://example.com/service_url', - lti_consumer=consumer - ) - outcome.save() - self.assignment = GradedAssignment( - user=self.user, - course_key=self.course_key, - usage_key=self.usage_key, - outcome_service=outcome, - lis_result_sourcedid='sourcedid', - ) - self.assignment.save() - - def setup_patch(self, function_name, return_value): - """ - Patch a method with a given return value, and return the mock - """ - mock = MagicMock(return_value=return_value) - new_patch = patch(function_name, new=mock) - new_patch.start() - self.addCleanup(new_patch.stop) - return mock - - def test_send_outcome(self): - tasks.send_outcome( - self.points_possible, - self.points_earned, - self.user.id, - unicode(self.course_key), - unicode(self.usage_key) - ) - self.generate_xml_mock.assert_called_once_with('sourcedid', 0.3) - self.replace_result_mock.assert_called_once_with(self.assignment, 'replace result XML') - - class XmlHandlingTest(TestCase): """ Tests for the generate_replace_result_xml and check_replace_result_response @@ -366,3 +302,125 @@ def test_response_with_failing_status_field(self): major_code='failure' ) self.assertFalse(outcomes.check_replace_result_response(response)) + + +class TestAssignmentsForProblem(ModuleStoreTestCase): + """ + Test cases for the assignments_for_problem method in outcomes.py + """ + def setUp(self): + super(TestAssignmentsForProblem, self).setUp() + self.user = UserFactory.create() + self.user_id = self.user.id + self.outcome_service = self.create_outcome_service('outcomes') + self.course = CourseFactory.create() + with self.store.bulk_operations(self.course.id, emit_signals=False): + self.chapter = ItemFactory.create(parent=self.course, category="chapter") + self.vertical = ItemFactory.create(parent=self.chapter, category="vertical") + self.unit = ItemFactory.create(parent=self.vertical, category="unit") + + def create_outcome_service(self, id_suffix): + """ + Create and save a new OutcomeService model in the test database. The + OutcomeService model requires an LtiConsumer model, so we create one of + those as well. The method takes an ID string that is used to ensure that + unique fields do not conflict. + """ + lti_consumer = LtiConsumer( + consumer_name='lti_consumer_name' + id_suffix, + consumer_key='lti_consumer_key' + id_suffix, + consumer_secret='lti_consumer_secret' + id_suffix, + instance_guid='lti_instance_guid' + id_suffix + ) + lti_consumer.save() + outcome_service = OutcomeService( + lis_outcome_service_url='https://example.com/outcomes/' + id_suffix, + lti_consumer=lti_consumer + ) + outcome_service.save() + return outcome_service + + def create_graded_assignment(self, desc, result_id, outcome_service): + """ + Create and save a new GradedAssignment model in the test database. + """ + assignment = GradedAssignment( + user=self.user, + course_key=self.course.id, + usage_key=desc.location, + outcome_service=outcome_service, + lis_result_sourcedid=result_id, + version_number=0 + ) + assignment.save() + return assignment + + def test_with_no_graded_assignments(self): + with check_mongo_calls(3): + assignments = outcomes.get_assignments_for_problem( + self.unit, self.user_id, self.course.id + ) + self.assertEqual(len(assignments), 0) + + def test_with_graded_unit(self): + self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service) + with check_mongo_calls(3): + assignments = outcomes.get_assignments_for_problem( + self.unit, self.user_id, self.course.id + ) + self.assertEqual(len(assignments), 1) + self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit') + + def test_with_graded_vertical(self): + self.create_graded_assignment(self.vertical, 'graded_vertical', self.outcome_service) + with check_mongo_calls(3): + assignments = outcomes.get_assignments_for_problem( + self.unit, self.user_id, self.course.id + ) + self.assertEqual(len(assignments), 1) + self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_vertical') + + def test_with_graded_unit_and_vertical(self): + self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service) + self.create_graded_assignment(self.vertical, 'graded_vertical', self.outcome_service) + with check_mongo_calls(3): + assignments = outcomes.get_assignments_for_problem( + self.unit, self.user_id, self.course.id + ) + self.assertEqual(len(assignments), 2) + self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit') + self.assertEqual(assignments[1].lis_result_sourcedid, 'graded_vertical') + + def test_with_unit_used_twice(self): + self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service) + self.create_graded_assignment(self.unit, 'graded_unit2', self.outcome_service) + with check_mongo_calls(3): + assignments = outcomes.get_assignments_for_problem( + self.unit, self.user_id, self.course.id + ) + self.assertEqual(len(assignments), 2) + self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit') + self.assertEqual(assignments[1].lis_result_sourcedid, 'graded_unit2') + + def test_with_unit_graded_for_different_user(self): + self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service) + other_user = UserFactory.create() + with check_mongo_calls(3): + assignments = outcomes.get_assignments_for_problem( + self.unit, other_user.id, self.course.id + ) + self.assertEqual(len(assignments), 0) + + def test_with_unit_graded_for_multiple_consumers(self): + other_outcome_service = self.create_outcome_service('second_consumer') + self.create_graded_assignment(self.unit, 'graded_unit', self.outcome_service) + self.create_graded_assignment(self.unit, 'graded_unit2', other_outcome_service) + with check_mongo_calls(3): + assignments = outcomes.get_assignments_for_problem( + self.unit, self.user_id, self.course.id + ) + self.assertEqual(len(assignments), 2) + self.assertEqual(assignments[0].lis_result_sourcedid, 'graded_unit') + self.assertEqual(assignments[1].lis_result_sourcedid, 'graded_unit2') + self.assertEqual(assignments[0].outcome_service, self.outcome_service) + self.assertEqual(assignments[1].outcome_service, other_outcome_service) diff --git a/lms/djangoapps/lti_provider/tests/test_tasks.py b/lms/djangoapps/lti_provider/tests/test_tasks.py new file mode 100644 index 000000000000..e181435d3d52 --- /dev/null +++ b/lms/djangoapps/lti_provider/tests/test_tasks.py @@ -0,0 +1,132 @@ +""" +Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py +""" + +import ddt +from django.test import TestCase +from mock import patch, MagicMock +from student.tests.factories import UserFactory + +from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService +import lti_provider.tasks as tasks +from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator + + +class BaseOutcomeTest(TestCase): + """ + Super type for tests of both the leaf and composite outcome celery tasks. + """ + def setUp(self): + super(BaseOutcomeTest, self).setUp() + self.course_key = CourseLocator( + org='some_org', + course='some_course', + run='some_run' + ) + self.usage_key = BlockUsageLocator( + course_key=self.course_key, + block_type='problem', + block_id='block_id' + ) + self.user = UserFactory.create() + self.consumer = LtiConsumer( + consumer_name='Lti Consumer Name', + consumer_key='consumer_key', + consumer_secret='consumer_secret', + instance_guid='tool_instance_guid' + ) + self.consumer.save() + outcome = OutcomeService( + lis_outcome_service_url='http://example.com/service_url', + lti_consumer=self.consumer + ) + outcome.save() + self.assignment = GradedAssignment( + user=self.user, + course_key=self.course_key, + usage_key=self.usage_key, + outcome_service=outcome, + lis_result_sourcedid='sourcedid', + version_number=1, + ) + self.assignment.save() + + self.send_score_update_mock = self.setup_patch( + 'lti_provider.outcomes.send_score_update', None + ) + + def setup_patch(self, function_name, return_value): + """ + Patch a method with a given return value, and return the mock + """ + mock = MagicMock(return_value=return_value) + new_patch = patch(function_name, new=mock) + new_patch.start() + self.addCleanup(new_patch.stop) + return mock + + +@ddt.ddt +class SendLeafOutcomeTest(BaseOutcomeTest): + """ + Tests for the send_leaf_outcome method in tasks.py + """ + @ddt.data( + (2.0, 2.0, 1.0), + (2.0, 0.0, 0.0), + (1, 2, 0.5), + ) + @ddt.unpack + def test_outcome_with_score(self, earned, possible, expected): + tasks.send_leaf_outcome( + self.assignment.id, # pylint: disable=no-member + earned, + possible + ) + self.send_score_update_mock.assert_called_once_with(self.assignment, expected) + + +@ddt.ddt +class SendCompositeOutcomeTest(BaseOutcomeTest): + """ + Tests for the send_composite_outcome method in tasks.py + """ + def setUp(self): + super(SendCompositeOutcomeTest, self).setUp() + self.descriptor = MagicMock() + self.descriptor.location = BlockUsageLocator( + course_key=self.course_key, + block_type='problem', + block_id='problem', + ) + self.weighted_scores = MagicMock() + self.weighted_scores_mock = self.setup_patch( + 'lti_provider.tasks.get_weighted_scores', self.weighted_scores + ) + self.module_store = MagicMock() + self.module_store.get_item = MagicMock(return_value=self.descriptor) + self.check_result_mock = self.setup_patch( + 'lti_provider.tasks.modulestore', + self.module_store + ) + + @ddt.data( + (2.0, 2.0, 1.0), + (2.0, 0.0, 0.0), + (1, 2, 0.5), + ) + @ddt.unpack + def test_outcome_with_score_score(self, earned, possible, expected): + self.weighted_scores.score_for_module = MagicMock(return_value=(earned, possible)) + tasks.send_composite_outcome( + self.user.id, unicode(self.course_key), self.assignment.id, 1 # pylint: disable=no-member + ) + self.send_score_update_mock.assert_called_once_with(self.assignment, expected) + + def test_outcome_with_outdated_version(self): + self.assignment.version_number = 2 + self.assignment.save() + tasks.send_composite_outcome( + self.user.id, unicode(self.course_key), self.assignment.id, 1 # pylint: disable=no-member + ) + self.assertEqual(self.weighted_scores_mock.call_count, 0) diff --git a/lms/envs/common.py b/lms/envs/common.py index cffb5a299c55..21112b03e8e1 100644 --- a/lms/envs/common.py +++ b/lms/envs/common.py @@ -40,6 +40,7 @@ from .discussionsettings import * import dealer.git from xmodule.modulestore.modulestore_settings import update_module_store_settings +from xmodule.modulestore.edit_info import EditInfoMixin from xmodule.mixin import LicenseMixin from lms.djangoapps.lms_xblock.mixin import LmsBlockMixin @@ -416,6 +417,9 @@ # The block types to disable need to be specified in "x block disable config" in django admin. 'ENABLE_DISABLING_XBLOCK_TYPES': True, + + # Enable the max score cache to speed up grading + 'ENABLE_MAX_SCORE_CACHE': True, } # Ignore static asset files on import which match this pattern @@ -703,7 +707,7 @@ # These are the Mixins that should be added to every XBlock. # This should be moved into an XBlock Runtime/Application object # once the responsibility of XBlock creation is moved out of modulestore - cpennington -XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin) +XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin, EditInfoMixin) # Allow any XBlock in the LMS XBLOCK_SELECT_FUNCTION = prefer_xmodules @@ -2560,3 +2564,31 @@ # not expected to be active; this setting simply allows administrators to # route any messages intended for LTI users to a common domain. LTI_USER_EMAIL_DOMAIN = 'lti.example.com' + +# An aggregate score is one derived from multiple problems (such as the +# cumulative score for a vertical element containing many problems). Sending +# aggregate scores immediately introduces two issues: one is a race condition +# between the view method and the Celery task where the updated score may not +# yet be visible to the database if the view has not yet returned (and committed +# its transaction). The other is that the student is likely to receive a stream +# of notifications as the score is updated with every problem. Waiting a +# reasonable period of time allows the view transaction to end, and allows us to +# collapse multiple score updates into a single message. +# The time value is in seconds. +LTI_AGGREGATE_SCORE_PASSBACK_DELAY = 15 * 60 + +# Number of seconds before JWT tokens expire +JWT_EXPIRATION = 30 +JWT_ISSUER = None + +# Credit notifications settings +NOTIFICATION_EMAIL_CSS = "templates/credit_notifications/credit_notification.css" +NOTIFICATION_EMAIL_EDX_LOGO = "templates/credit_notifications/edx-logo-header.png" + +#### PROCTORING CONFIGURATION DEFAULTS + +PROCTORING_BACKEND_PROVIDER = { + 'class': 'edx_proctoring.backends.null.NullBackendProvider', + 'options': {}, +} +PROCTORING_SETTINGS = {} diff --git a/lms/templates/courseware/legacy_instructor_dashboard.html b/lms/templates/courseware/legacy_instructor_dashboard.html index 7cf15e441683..712442f0b423 100644 --- a/lms/templates/courseware/legacy_instructor_dashboard.html +++ b/lms/templates/courseware/legacy_instructor_dashboard.html @@ -246,7 +246,9 @@

${_("Export grades to remote gradebook")}



-
  • ${_("Assignment name:")} +
  • ${_("Assignment name:")}