Morell, Lesley J.
Iterated assessment and feedback improves student outcomes Artikel
In: Studies in Higher Education, Bd. 46, Nr. 3, S. 485–496, 2019.
Abstract | Links | BibTeX | Schlagwörter: Assessment, iterated assessment, Iterative assessment, O, self-assessment, sustainable feedback
@article{Morell2019,
title = {Iterated assessment and feedback improves student outcomes},
author = {Lesley J. Morell},
url = {https://doi.org/10.1080/03075079.2019.1643301},
doi = {10.1080/03075079.2019.1643301},
year = {2019},
date = {2019-07-23},
journal = {Studies in Higher Education},
volume = {46},
number = {3},
pages = {485–496},
abstract = {Feedback is critically important to student learning, but the reduced frequency of assignments combined with isolated or stand-alone tasks reduces the opportunity for students to engage with feedback and use it effectively to enhance their learning. Here, I evaluate student attainment during a module consisting of eight iterated tasks where the task itself is the same but the academic content differs. At the end of the module, students then self-assess their eight submissions and select two for summative assessment. I demonstrate that achievement increases over the course of the module, and that choice is valuable in allowing students to achieve higher summative marks for the course than their formative marks would suggest. Students who performed more weakly at the start of the module saw the greatest benefits from practice and choice, suggesting that these students particularly can benefit from repeated cycles of feedback and increase their marks.},
keywords = {Assessment, iterated assessment, Iterative assessment, O, self-assessment, sustainable feedback},
pubstate = {published},
tppubtype = {article}
}
Kardas, Michael; O’Brien, Ed
Easier Seen Than Done: Merely Watching Others Perform Can Foster an Illusion of Skill Acquisition Artikel
In: Psychological Science, Bd. 29, Nr. 4, S. 521-536, 2018.
Abstract | Links | BibTeX | Schlagwörter: empathy gap, O, open data, open materials, preregistered, repeated exposure, self-assessment
@article{Kardas2018,
title = {Easier Seen Than Done: Merely Watching Others Perform Can Foster an Illusion of Skill Acquisition},
author = {Michael Kardas and Ed O’Brien},
url = { https://doi.org/10.1177/0956797617740646},
doi = {10.1177/0956797617740646},
year = {2018},
date = {2018-02-16},
journal = {Psychological Science},
volume = {29},
number = {4},
pages = {521-536},
abstract = {Modern technologies such as YouTube afford unprecedented access to the skilled performances of other people. Six experiments (N = 2,225) reveal that repeatedly watching others can foster an illusion of skill acquisition. The more people merely watch others perform (without actually practicing themselves), the more they nonetheless believe they could perform the skill, too (Experiment 1). However, people’s actual abilities—from throwing darts and doing the moonwalk to playing an online game—do not improve after merely watching others, despite predictions to the contrary (Experiments 2–4). What do viewers see that makes them think they are learning? We found that extensive viewing allows people to track what steps to take (Experiment 5) but not how those steps feel when taking them. Accordingly, experiencing a “taste” of performing attenuates the illusion: Watching others juggle but then holding the pins oneself tempers perceived change in one’s own ability (Experiment 6). These findings highlight unforeseen problems for self-assessment when watching other people.},
keywords = {empathy gap, O, open data, open materials, preregistered, repeated exposure, self-assessment},
pubstate = {published},
tppubtype = {article}
}
Nuhfer, Edward; Cogan, Christopher; Fleisher, Steven; Gaze, Eric C.; Wirth, Karl
In: Numeracy, Bd. 9, Ausg. 1, Nr. 4, 2016.
Abstract | Links | BibTeX | Schlagwörter: A, Dunning-Kruger effect, graphs, knowledge surveys, noise, numeracy, random number simulation, reliability, self-assessment, signal
@article{Nuhfer2016,
title = {Random Number Simulations Reveal How Random Noise Affects the Measurements and Graphical Portrayals of Self-Assessed Competency},
author = {Edward Nuhfer and Christopher Cogan and Steven Fleisher and Eric C. Gaze and Karl Wirth},
url = {http://dx.doi.org/10.5038/1936-4660.9.1.4},
doi = {10.5038/1936-4660.9.1.4},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
journal = {Numeracy},
volume = {9},
number = {4},
issue = {1},
abstract = {Self-assessment measures of competency are blends of an authentic self-assessment signal that researchers seek to measure and random disorder or "noise" that accompanies that signal. In this study, we use random number simulations to explore how random noise affects critical aspects of self-assessment investigations: reliability, correlation, critical sample size, and the graphical representations of self-assessment data. We show that graphical conventions common in the self-assessment literature introduce artifacts that invite misinterpretation. Troublesome conventions include: (y minus x) vs. (x) scatterplots; (y minus x) vs. (x) column graphs aggregated as quantiles; line charts that display data aggregated as quantiles; and some histograms. Graphical conventions that generate minimal artifacts include scatterplots with a best-fit line that depict (y) vs. (x) measures (self-assessed competence vs. measured competence) plotted by individual participant scores, and (y) vs. (x) scatterplots of collective average measures of all participants plotted item-by-item. This last graphic convention attenuates noise and improves the definition of the signal. To provide relevant comparisons across varied graphical conventions, we use a single dataset derived from paired measures of 1154 participants' self-assessed competence and demonstrated competence in science literacy. Our results show that different numerical approaches employed in investigating and describing self-assessment accuracy are not equally valid. By modeling this dataset with random numbers, we show how recognizing the varied expressions of randomness in self-assessment data can improve the validity of numeracy-based descriptions of self-assessment.},
keywords = {A, Dunning-Kruger effect, graphs, knowledge surveys, noise, numeracy, random number simulation, reliability, self-assessment, signal},
pubstate = {published},
tppubtype = {article}
}
Foster, David; Miller, Harold L.
In: Psychology Science Quarterly, Bd. 51, Nr. 4, S. 355–369, 2009, ISSN: 1866-6140.
Abstract | Links | BibTeX | Schlagwörter: computerized testing, Discrete-Option Multiple-Choice, fairness, multiple choice, self-assessment, test security
@article{Foster2009,
title = {A new format for multiple-choice testing: Discrete-Option Multiple-Choice. Results from early studies},
author = {David Foster and Harold L. Miller},
url = {https://doaj.org/article/9851131c12144827a1369f195773d083},
issn = {1866-6140},
year = {2009},
date = {2009-04-01},
urldate = {2018-06-13},
journal = {Psychology Science Quarterly},
volume = {51},
number = {4},
pages = {355–369},
abstract = {The standard multiple-choice format has remained relatively unchanged for nearly 100 years, even over the past 25 years as multiple-choice tests have been computerized. We introduce a unique version of the multiple-choice format that has the potential to improve a test’s measurement and security properties, along with other advantages. We summarize our research with college students on course-level exams to demonstrate these benefits and to establish the Discrete-Option Multiple-Choice (DOMC) format as not only a viable way to measure skills and content knowledge, but an essential one.},
keywords = {computerized testing, Discrete-Option Multiple-Choice, fairness, multiple choice, self-assessment, test security},
pubstate = {published},
tppubtype = {article}
}