Parker, Michael J.; Bunch, Matt; Pike, Andrew
How Much is Enough? Formative Assessment Dynamics Artikel
In: Journal of Learning Analytics, Bd. 12, Ausg. 2, S. 196–210, 2025, ISSN: 1929-7750.
Abstract | Links | BibTeX | Schlagwörter: Assessment, formative assessment, learning analytics, learning outcomes, O, outcomes prediction, research paper
@article{Parker2025,
title = {How Much is Enough? Formative Assessment Dynamics},
author = {Michael J. Parker and Matt Bunch and Andrew Pike},
url = {https://doi.org/10.18608/jla.2025.8753},
doi = {10.18608/jla.2025.8753},
issn = {1929-7750},
year = {2025},
date = {2025-07-04},
urldate = {2025-07-04},
journal = {Journal of Learning Analytics},
volume = {12},
issue = {2},
pages = {196–210},
abstract = {While the educational value of formative assessment is widely acknowledged, the precise amount needed to effectively predict student performance on summative assessments remains unclear. This study investigates the relationship between intermediate formative assessment performance and final exam scores, addressing the critical question of how much assessment is needed for accurate prediction. Using a large dataset encompassing over 20,000 student enrollments across 127 course runs of 15 online biomedical sciences courses, we examined the correlation between intermediate assessment scores and final exam performance. Our results show that after completing about 40% of the formative assessments in a course, student scores demonstrate a strong correlation (Pearson r > 0.7) with their final exam scores. The correlation after taking additional formative assessments reaches a maximum of approximately 0.75. This finding was consistent across different course types and lengths, suggesting that the relative amount of assessment taken, rather than the absolute number, is key. Surprisingly, we found that random sampling of assessments was even more predictive than chronological sampling, suggesting that the proportion of questions used, relative to the total number of assessment questions, is more important than their specific sequence. These findings contribute to a deeper understanding of the predictive capabilities of formative assessment, and enable educators to identify at-risk students earlier, optimize assessment design, and develop more efficient and targeted interventions.},
keywords = {Assessment, formative assessment, learning analytics, learning outcomes, O, outcomes prediction, research paper},
pubstate = {published},
tppubtype = {article}
}
Morell, Lesley J.
Iterated assessment and feedback improves student outcomes Artikel
In: Studies in Higher Education, Bd. 46, Nr. 3, S. 485–496, 2019.
Abstract | Links | BibTeX | Schlagwörter: Assessment, iterated assessment, Iterative assessment, O, self-assessment, sustainable feedback
@article{Morell2019,
title = {Iterated assessment and feedback improves student outcomes},
author = {Lesley J. Morell},
url = {https://doi.org/10.1080/03075079.2019.1643301},
doi = {10.1080/03075079.2019.1643301},
year = {2019},
date = {2019-07-23},
journal = {Studies in Higher Education},
volume = {46},
number = {3},
pages = {485–496},
abstract = {Feedback is critically important to student learning, but the reduced frequency of assignments combined with isolated or stand-alone tasks reduces the opportunity for students to engage with feedback and use it effectively to enhance their learning. Here, I evaluate student attainment during a module consisting of eight iterated tasks where the task itself is the same but the academic content differs. At the end of the module, students then self-assess their eight submissions and select two for summative assessment. I demonstrate that achievement increases over the course of the module, and that choice is valuable in allowing students to achieve higher summative marks for the course than their formative marks would suggest. Students who performed more weakly at the start of the module saw the greatest benefits from practice and choice, suggesting that these students particularly can benefit from repeated cycles of feedback and increase their marks.},
keywords = {Assessment, iterated assessment, Iterative assessment, O, self-assessment, sustainable feedback},
pubstate = {published},
tppubtype = {article}
}
Seidel, Niels
Aufgabentypen für das Zusammenspiel von E-Assessment und Lernvideos Buchkapitel
In: Bergert, Aline; Lehmann, Anje; Liebscher, Maja; Schulz, Jens (Hrsg.): Videocampus Sachsen – Machbarkeitsuntersuchung, S. 45–60, TU Bergakademie Freiberg, Freiberg, 1, 2018, ISBN: 978-3-86012-575-5.
Abstract | Links | BibTeX | Schlagwörter: Assessment, e-Assessment, higher education, O, Saxony, teaching, video
@inbook{Seidel2018,
title = {Aufgabentypen für das Zusammenspiel von E-Assessment und Lernvideos},
author = {Niels Seidel},
editor = {Aline Bergert and Anje Lehmann and Maja Liebscher and Jens Schulz},
url = {http://nbn-resolving.de/urn:nbn:de:bsz:105-qucosa2-312017},
isbn = {978-3-86012-575-5},
year = {2018},
date = {2018-08-10},
urldate = {2018-11-18},
booktitle = {Videocampus Sachsen – Machbarkeitsuntersuchung},
pages = {45–60},
publisher = {TU Bergakademie Freiberg},
address = {Freiberg},
edition = {1},
institution = {Medienzentrum der TU Bergakademie Freiberg},
series = {Freiberger Forschungshefte},
abstract = {Lernvideos werden oft als Instruktionsmedien verstanden, die Lerninhalte in audiovisueller Form konservieren und transportieren. Dieser Beitrag ergänzt diese Sichtweise um den Aspekt der Überprüfung des Lernerfolgs mit Hilfe von E-Assessments. Durch die Integration von speziellen Aufgabentypen in den Ablauf der Videowiedergabe können höhere Kompetenzlevel geprüft und weiterführende didaktische Intentionen, Lernszenarien und -formen umgesetzt werden. Im Rahmen der Verbundförderung des Videocampus Sachsen (VCS) konnten entsprechende Feldstudien ausgewertet und Pilotanwendungen im Rahmen des Innovationsvorhabens ViAssess entwickelt werden.},
keywords = {Assessment, e-Assessment, higher education, O, Saxony, teaching, video},
pubstate = {published},
tppubtype = {inbook}
}
Butler, Andrew C.
Multiple-Choice Testing in Education: Are the Best Practices for Assessment Also Good for Learning? Artikel
In: Journal of Applied Research in Memory and Cognition, Bd. 7, Nr. 3, S. 323–331, 2018, ISSN: 2211-3681.
Abstract | Links | BibTeX | Schlagwörter: Assessment, Learning, Multiple-choice, O, Testing
@article{Butler2018,
title = {Multiple-Choice Testing in Education: Are the Best Practices for Assessment Also Good for Learning?},
author = {Andrew C. Butler},
url = {https://doi.org/10.1016/j.jarmac.2018.07.002
http://www.sciencedirect.com/science/article/pii/S2211368118301426},
doi = {10.1016/j.jarmac.2018.07.002},
issn = {2211-3681},
year = {2018},
date = {2018-07-31},
urldate = {2018-10-20},
journal = {Journal of Applied Research in Memory and Cognition},
volume = {7},
number = {3},
pages = {323–331},
abstract = {Multiple-choice tests are arguably the most popular type of assessment in education, and much research has been dedicated to determining best practices for using them to measure learning. The act of taking a test also causes learning, and numerous studies have investigated how best to use multiple-choice tests to improve long-term retention and produce deeper understanding. In this review article, I explore whether the best practices for assessment align with the best practices for learning. Although consensus between these two literatures is not a foregone conclusion, there is substantial agreement in how best to construct and use multiple-choice tests for these two disparate purposes. The overall recommendation from both literatures is to create questions that are simple in format (e.g., avoid use of complex item types), challenge students but allow them to succeed often, and target specific cognitive processes that correspond to learning objectives.},
keywords = {Assessment, Learning, Multiple-choice, O, Testing},
pubstate = {published},
tppubtype = {article}
}