+
+ {% trans "Grading strategy for the peer assessment" %}
+
+ {% trans "Mean" %}
+ {% trans "Median (default)" %}
+
+
+ {% trans "Select the preferred grading strategy for the peer assessment. By default, the median across all peer reviews is used to calculate the final grade. If you select the mean, the average of all peer reviews will be used." %}
+
+ {% endif %}
diff --git a/openassessment/xblock/config_mixin.py b/openassessment/xblock/config_mixin.py
index 9512fc9ead..23752c8d57 100644
--- a/openassessment/xblock/config_mixin.py
+++ b/openassessment/xblock/config_mixin.py
@@ -16,6 +16,7 @@
ENHANCED_STAFF_GRADER = 'enhanced_staff_grader'
MFE_VIEWS = 'mfe_views'
SELECTABLE_LEARNER_WAITING_REVIEW = 'selectable_learner_waiting_review'
+ENABLE_PEER_CONFIGURABLE_GRADING = 'peer_configurable_grading'
FEATURE_TOGGLES_BY_FLAG_NAME = {
ALL_FILES_URLS: 'ENABLE_ORA_ALL_FILE_URLS',
@@ -24,7 +25,8 @@
USER_STATE_UPLOAD_DATA: 'ENABLE_ORA_USER_STATE_UPLOAD_DATA',
RUBRIC_REUSE: 'ENABLE_ORA_RUBRIC_REUSE',
ENHANCED_STAFF_GRADER: 'ENABLE_ENHANCED_STAFF_GRADER',
- SELECTABLE_LEARNER_WAITING_REVIEW: 'ENABLE_ORA_SELECTABLE_LEARNER_WAITING_REVIEW'
+ SELECTABLE_LEARNER_WAITING_REVIEW: 'ENABLE_ORA_SELECTABLE_LEARNER_WAITING_REVIEW',
+ ENABLE_PEER_CONFIGURABLE_GRADING: 'ENABLE_ORA_PEER_CONFIGURABLE_GRADING',
}
@@ -180,3 +182,17 @@ def is_selectable_learner_waiting_review_enabled(self):
# .. toggle_creation_date: 2024-02-09
# .. toggle_tickets: https://github.com/openedx/edx-ora2/pull/2025
return self.is_feature_enabled(SELECTABLE_LEARNER_WAITING_REVIEW)
+
+ @cached_property
+ def enable_peer_configurable_grading(self):
+ """
+ Return a boolean indicating the peer configurable grading feature is enabled or not.
+ """
+ # .. toggle_name: FEATURES['ENABLE_ORA_PEER_CONFIGURABLE_GRADING']
+ # .. toggle_implementation: SettingToggle
+ # .. toggle_default: False
+ # .. toggle_description: Enable configurable grading for peer review.
+ # .. toggle_use_cases: open_edx
+ # .. toggle_creation_date: 2024-03-25
+ # .. toggle_tickets: https://github.com/openedx/edx-ora2/pull/2196
+ return self.is_feature_enabled(ENABLE_PEER_CONFIGURABLE_GRADING)
diff --git a/openassessment/xblock/grade_mixin.py b/openassessment/xblock/grade_mixin.py
index 04d1ad7752..d1ac50d0d4 100644
--- a/openassessment/xblock/grade_mixin.py
+++ b/openassessment/xblock/grade_mixin.py
@@ -9,6 +9,7 @@
from xblock.core import XBlock
from django.utils.translation import gettext as _
+from openassessment.assessment.api.peer import get_peer_grading_strategy, PeerGradingStrategy
from openassessment.assessment.errors import PeerAssessmentError, SelfAssessmentError
@@ -301,7 +302,10 @@ def has_feedback(assessments):
if staff_assessment:
median_scores = staff_api.get_assessment_scores_by_criteria(submission_uuid)
elif "peer-assessment" in assessment_steps:
- median_scores = peer_api.get_assessment_median_scores(submission_uuid)
+ median_scores = peer_api.get_assessment_scores_with_grading_strategy(
+ submission_uuid,
+ self.workflow_requirements()
+ )
elif "self-assessment" in assessment_steps:
median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
@@ -369,7 +373,9 @@ def _get_assessment_part(title, feedback_title, part_criterion_name, assessment)
)
if "peer-assessment" in assessment_steps:
peer_assessment_part = {
- 'title': _('Peer Median Grade'),
+ 'title': self._get_peer_assessment_part_title(
+ get_peer_grading_strategy(self.workflow_requirements())
+ ),
'criterion': criterion,
'option': self._peer_median_option(submission_uuid, criterion),
'individual_assessments': [
@@ -409,6 +415,20 @@ def _get_assessment_part(title, feedback_title, part_criterion_name, assessment)
return assessments
+ def _get_peer_assessment_part_title(self, grading_strategy):
+ """
+ Returns the title for the peer assessment part.
+
+ Args:
+ grading_strategy (str): The grading strategy for the peer assessment.
+
+ Returns:
+ The title for the peer assessment part.
+ """
+ if grading_strategy == PeerGradingStrategy.MEAN:
+ return _('Peer Mean Grade')
+ return _('Peer Median Grade')
+
def _peer_median_option(self, submission_uuid, criterion):
"""
Returns the option for the median peer grade.
@@ -424,7 +444,10 @@ def _peer_median_option(self, submission_uuid, criterion):
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api
- median_scores = peer_api.get_assessment_median_scores(submission_uuid)
+ median_scores = peer_api.get_assessment_scores_with_grading_strategy(
+ submission_uuid,
+ self.workflow_requirements()
+ )
median_score = median_scores.get(criterion['name'], None)
median_score = -1 if median_score is None else median_score
@@ -650,12 +673,10 @@ def _get_score_explanation(self, workflow):
complete = score is not None
assessment_type = self._get_assessment_type(workflow)
-
sentences = {
"staff": _("The grade for this problem is determined by your Staff Grade."),
- "peer": _(
- "The grade for this problem is determined by the median score of "
- "your Peer Assessments."
+ "peer": self._get_peer_explanation_sentence(
+ get_peer_grading_strategy(self.workflow_requirements())
),
"self": _("The grade for this problem is determined by your Self Assessment.")
}
@@ -676,6 +697,26 @@ def _get_score_explanation(self, workflow):
return f"{first_sentence} {second_sentence}".strip()
+ def _get_peer_explanation_sentence(self, peer_grading_strategy):
+ """
+ Return a string which explains how the peer grade is calculated for an ORA assessment.
+
+ Args:
+ peer_grading_strategy (str): The grading strategy for the peer assessment.
+ Returns:
+ str: Message explaining how the grade is determined.
+ """
+ peer_sentence = _(
+ "The grade for this problem is determined by the median score of "
+ "your Peer Assessments."
+ )
+ if peer_grading_strategy == PeerGradingStrategy.MEAN:
+ peer_sentence = _(
+ "The grade for this problem is determined by the mean score of "
+ "your Peer Assessments."
+ )
+ return peer_sentence
+
def generate_report_data(self, user_state_iterator, limit_responses=None):
"""
Return a list of student responses and assessments for this block in a readable way.
diff --git a/openassessment/xblock/static/dist/manifest.json b/openassessment/xblock/static/dist/manifest.json
index 8e5bb8f1cc..e6018befe6 100644
--- a/openassessment/xblock/static/dist/manifest.json
+++ b/openassessment/xblock/static/dist/manifest.json
@@ -1,23 +1,23 @@
{
"base_url": "/static/dist",
- "openassessment-editor-textarea.js": "/openassessment-editor-textarea.2cee26d88c3441ada635.js",
- "openassessment-editor-textarea.js.map": "/openassessment-editor-textarea.2cee26d88c3441ada635.js.map",
- "openassessment-editor-tinymce.js": "/openassessment-editor-tinymce.0b97b77ad7f1b7150f67.js",
- "openassessment-editor-tinymce.js.map": "/openassessment-editor-tinymce.0b97b77ad7f1b7150f67.js.map",
- "openassessment-lms.css": "/openassessment-lms.dc8bb1e464bcaaab4668.css",
- "openassessment-lms.js": "/openassessment-lms.dc8bb1e464bcaaab4668.js",
- "openassessment-lms.css.map": "/openassessment-lms.dc8bb1e464bcaaab4668.css.map",
- "openassessment-lms.js.map": "/openassessment-lms.dc8bb1e464bcaaab4668.js.map",
- "openassessment-ltr.css": "/openassessment-ltr.7955a1e2cc11fc6948de.css",
- "openassessment-ltr.js": "/openassessment-ltr.7955a1e2cc11fc6948de.js",
- "openassessment-ltr.css.map": "/openassessment-ltr.7955a1e2cc11fc6948de.css.map",
- "openassessment-ltr.js.map": "/openassessment-ltr.7955a1e2cc11fc6948de.js.map",
- "openassessment-rtl.css": "/openassessment-rtl.9de7c9bc7c1048c07707.css",
- "openassessment-rtl.js": "/openassessment-rtl.9de7c9bc7c1048c07707.js",
- "openassessment-rtl.css.map": "/openassessment-rtl.9de7c9bc7c1048c07707.css.map",
- "openassessment-rtl.js.map": "/openassessment-rtl.9de7c9bc7c1048c07707.js.map",
- "openassessment-studio.js": "/openassessment-studio.d576fb212cefa2e4b720.js",
- "openassessment-studio.js.map": "/openassessment-studio.d576fb212cefa2e4b720.js.map",
+ "openassessment-editor-textarea.js": "/openassessment-editor-textarea.de70b044ddf6baeaf0b7.js",
+ "openassessment-editor-textarea.js.map": "/openassessment-editor-textarea.de70b044ddf6baeaf0b7.js.map",
+ "openassessment-editor-tinymce.js": "/openassessment-editor-tinymce.a87e38bc7b19d8273858.js",
+ "openassessment-editor-tinymce.js.map": "/openassessment-editor-tinymce.a87e38bc7b19d8273858.js.map",
+ "openassessment-lms.css": "/openassessment-lms.7430e499fae20eeff7bd.css",
+ "openassessment-lms.js": "/openassessment-lms.7430e499fae20eeff7bd.js",
+ "openassessment-lms.css.map": "/openassessment-lms.7430e499fae20eeff7bd.css.map",
+ "openassessment-lms.js.map": "/openassessment-lms.7430e499fae20eeff7bd.js.map",
+ "openassessment-ltr.css": "/openassessment-ltr.5b291771f2af113d4918.css",
+ "openassessment-ltr.js": "/openassessment-ltr.5b291771f2af113d4918.js",
+ "openassessment-ltr.css.map": "/openassessment-ltr.5b291771f2af113d4918.css.map",
+ "openassessment-ltr.js.map": "/openassessment-ltr.5b291771f2af113d4918.js.map",
+ "openassessment-rtl.css": "/openassessment-rtl.731b1e1ea896e74cb5c0.css",
+ "openassessment-rtl.js": "/openassessment-rtl.731b1e1ea896e74cb5c0.js",
+ "openassessment-rtl.css.map": "/openassessment-rtl.731b1e1ea896e74cb5c0.css.map",
+ "openassessment-rtl.js.map": "/openassessment-rtl.731b1e1ea896e74cb5c0.js.map",
+ "openassessment-studio.js": "/openassessment-studio.2f5a8d7caafc999b604a.js",
+ "openassessment-studio.js.map": "/openassessment-studio.2f5a8d7caafc999b604a.js.map",
"fallback-default.png": "/4620b30a966533ace489dcc7afb151b9.png",
"default-avatar.svg": "/95ec738c0b7faac5b5c9126794446bbd.svg"
}
\ No newline at end of file
diff --git a/openassessment/xblock/static/dist/openassessment-editor-textarea.2cee26d88c3441ada635.js b/openassessment/xblock/static/dist/openassessment-editor-textarea.de70b044ddf6baeaf0b7.js
similarity index 94%
rename from openassessment/xblock/static/dist/openassessment-editor-textarea.2cee26d88c3441ada635.js
rename to openassessment/xblock/static/dist/openassessment-editor-textarea.de70b044ddf6baeaf0b7.js
index 6e5557c375..b3f38a1c62 100644
--- a/openassessment/xblock/static/dist/openassessment-editor-textarea.2cee26d88c3441ada635.js
+++ b/openassessment/xblock/static/dist/openassessment-editor-textarea.de70b044ddf6baeaf0b7.js
@@ -1,2 +1,2 @@
-!function(e){var t={};function n(r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)n.d(r,o,function(t){return e[t]}.bind(null,o));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="/",n(n.s=203)}({203:function(e,t){function n(e){return(n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function r(e,t){for(var r=0;r