Morell, Lesley J.
Iterated assessment and feedback improves student outcomes Artikel
In: Studies in Higher Education, Bd. 46, Nr. 3, S. 485–496, 2019.
Abstract | Links | BibTeX | Schlagwörter: Assessment, iterated assessment, Iterative assessment, O, self-assessment, sustainable feedback
@article{Morell2019,
title = {Iterated assessment and feedback improves student outcomes},
author = {Lesley J. Morell},
url = {https://doi.org/10.1080/03075079.2019.1643301},
doi = {10.1080/03075079.2019.1643301},
year = {2019},
date = {2019-07-23},
journal = {Studies in Higher Education},
volume = {46},
number = {3},
pages = {485–496},
abstract = {Feedback is critically important to student learning, but the reduced frequency of assignments combined with isolated or stand-alone tasks reduces the opportunity for students to engage with feedback and use it effectively to enhance their learning. Here, I evaluate student attainment during a module consisting of eight iterated tasks where the task itself is the same but the academic content differs. At the end of the module, students then self-assess their eight submissions and select two for summative assessment. I demonstrate that achievement increases over the course of the module, and that choice is valuable in allowing students to achieve higher summative marks for the course than their formative marks would suggest. Students who performed more weakly at the start of the module saw the greatest benefits from practice and choice, suggesting that these students particularly can benefit from repeated cycles of feedback and increase their marks.},
keywords = {Assessment, iterated assessment, Iterative assessment, O, self-assessment, sustainable feedback},
pubstate = {published},
tppubtype = {article}
}
Seidel, Niels
Aufgabentypen für das Zusammenspiel von E-Assessment und Lernvideos Buchkapitel
In: Bergert, Aline; Lehmann, Anje; Liebscher, Maja; Schulz, Jens (Hrsg.): Videocampus Sachsen – Machbarkeitsuntersuchung, S. 45–60, TU Bergakademie Freiberg, Freiberg, 1, 2018, ISBN: 978-3-86012-575-5.
Abstract | Links | BibTeX | Schlagwörter: Assessment, e-Assessment, higher education, O, Saxony, teaching, video
@inbook{Seidel2018,
title = {Aufgabentypen für das Zusammenspiel von E-Assessment und Lernvideos},
author = {Niels Seidel},
editor = {Aline Bergert and Anje Lehmann and Maja Liebscher and Jens Schulz},
url = {http://nbn-resolving.de/urn:nbn:de:bsz:105-qucosa2-312017},
isbn = {978-3-86012-575-5},
year = {2018},
date = {2018-08-10},
urldate = {2018-11-18},
booktitle = {Videocampus Sachsen – Machbarkeitsuntersuchung},
pages = {45–60},
publisher = {TU Bergakademie Freiberg},
address = {Freiberg},
edition = {1},
institution = {Medienzentrum der TU Bergakademie Freiberg},
series = {Freiberger Forschungshefte},
abstract = {Lernvideos werden oft als Instruktionsmedien verstanden, die Lerninhalte in audiovisueller Form konservieren und transportieren. Dieser Beitrag ergänzt diese Sichtweise um den Aspekt der Überprüfung des Lernerfolgs mit Hilfe von E-Assessments. Durch die Integration von speziellen Aufgabentypen in den Ablauf der Videowiedergabe können höhere Kompetenzlevel geprüft und weiterführende didaktische Intentionen, Lernszenarien und -formen umgesetzt werden. Im Rahmen der Verbundförderung des Videocampus Sachsen (VCS) konnten entsprechende Feldstudien ausgewertet und Pilotanwendungen im Rahmen des Innovationsvorhabens ViAssess entwickelt werden.},
keywords = {Assessment, e-Assessment, higher education, O, Saxony, teaching, video},
pubstate = {published},
tppubtype = {inbook}
}
Butler, Andrew C.
Multiple-Choice Testing in Education: Are the Best Practices for Assessment Also Good for Learning? Artikel
In: Journal of Applied Research in Memory and Cognition, Bd. 7, Nr. 3, S. 323–331, 2018, ISSN: 2211-3681.
Abstract | Links | BibTeX | Schlagwörter: Assessment, Learning, Multiple-choice, O, Testing
@article{Butler2018,
title = {Multiple-Choice Testing in Education: Are the Best Practices for Assessment Also Good for Learning?},
author = {Andrew C. Butler},
url = {https://doi.org/10.1016/j.jarmac.2018.07.002
http://www.sciencedirect.com/science/article/pii/S2211368118301426},
doi = {10.1016/j.jarmac.2018.07.002},
issn = {2211-3681},
year = {2018},
date = {2018-07-31},
urldate = {2018-10-20},
journal = {Journal of Applied Research in Memory and Cognition},
volume = {7},
number = {3},
pages = {323–331},
abstract = {Multiple-choice tests are arguably the most popular type of assessment in education, and much research has been dedicated to determining best practices for using them to measure learning. The act of taking a test also causes learning, and numerous studies have investigated how best to use multiple-choice tests to improve long-term retention and produce deeper understanding. In this review article, I explore whether the best practices for assessment align with the best practices for learning. Although consensus between these two literatures is not a foregone conclusion, there is substantial agreement in how best to construct and use multiple-choice tests for these two disparate purposes. The overall recommendation from both literatures is to create questions that are simple in format (e.g., avoid use of complex item types), challenge students but allow them to succeed often, and target specific cognitive processes that correspond to learning objectives.},
keywords = {Assessment, Learning, Multiple-choice, O, Testing},
pubstate = {published},
tppubtype = {article}
}