Pölert, Hauke
Blogbeitrag, 2025.
Links | BibTeX | Schlagwörter: A, feedback, Korrektur
@misc{Pölert2025,
title = {Stoppt den Korrekturwahnsinn! oder: Warum wir spätestens 2025 unsere Korrekturpraxis überdenken sollten (De-Implementierung nach Benedikt Wisniewski)},
author = {Hauke Pölert},
url = {https://unterrichten.digital/2025/01/06/korrekturen-feedback-de-implementierung-wisniewski/},
year = {2025},
date = {2025-01-06},
urldate = {2025-01-06},
howpublished = {Blogbeitrag},
keywords = {A, feedback, Korrektur},
pubstate = {published},
tppubtype = {misc}
}
Muehlhoff, Rainer; Henningsen, Marte
Chatbots im Schulunterricht: Wir testen das Fobizz-Tool zur automatischen Bewertung von Hausaufgaben Unveröffentlicht
Preprint auf arXiv:2412.06651, 2024.
Abstract | Links | BibTeX | Schlagwörter: AI, artificial intelligence, chatbots, correction, feedback, O
@unpublished{Muehlhoff2024,
title = {Chatbots im Schulunterricht: Wir testen das Fobizz-Tool zur automatischen Bewertung von Hausaufgaben},
author = {Rainer Muehlhoff and Marte Henningsen},
url = {https://doi.org/10.48550/arXiv.2412.06651
https://media.ccc.de/v/38c3-chatbots-im-schulunterricht},
doi = {10.48550/arXiv.2412.06651},
year = {2024},
date = {2024-12-09},
urldate = {2024-12-09},
issue = {arXiv:2412.06651},
abstract = {This study examines the AI-powered grading tool "AI Grading Assistant" by the German company Fobizz, designed to support teachers in evaluating and providing feedback on student assignments. Against the societal backdrop of an overburdened education system and rising expectations for artificial intelligence as a solution to these challenges, the investigation evaluates the tool's functional suitability through two test series. The results reveal significant shortcomings: The tool's numerical grades and qualitative feedback are often random and do not improve even when its suggestions are incorporated. The highest ratings are achievable only with texts generated by ChatGPT. False claims and nonsensical submissions frequently go undetected, while the implementation of some grading criteria is unreliable and opaque. Since these deficiencies stem from the inherent limitations of large language models (LLMs), fundamental improvements to this or similar tools are not immediately foreseeable. The study critiques the broader trend of adopting AI as a quick fix for systemic problems in education, concluding that Fobizz's marketing of the tool as an objective and time-saving solution is misleading and irresponsible. Finally, the study calls for systematic evaluation and subject-specific pedagogical scrutiny of the use of AI tools in educational contexts.},
howpublished = {Preprint auf arXiv:2412.06651},
keywords = {AI, artificial intelligence, chatbots, correction, feedback, O},
pubstate = {published},
tppubtype = {unpublished}
}
Eskreis-Winkler, Lauren; Fishbach, Ayelet
Not Learning From Failure—the Greatest Failure of All Artikel
In: Psychological science, Bd. 30, Nr. 12, S. 1733–1744, 2019, ISSN: 1467-9280.
Abstract | Links | BibTeX | Schlagwörter: ego threat, failure, feedback, Learning, motivation, O, open data, open materials, preregistered
@article{Eskreis-Winkler2019,
title = {Not Learning From Failure—the Greatest Failure of All},
author = {Lauren Eskreis-Winkler and Ayelet Fishbach},
url = {https://doi.org/10.1177/0956797619881133},
doi = {10.1177/0956797619881133},
issn = {1467-9280},
year = {2019},
date = {2019-11-08},
urldate = {2019-12-18},
journal = {Psychological science},
volume = {30},
number = {12},
pages = {1733–1744},
abstract = {Our society celebrates failure as a teachable moment. Yet in five studies (total N = 1,674), failure did the opposite: It undermined learning. Across studies, participants answered binary-choice questions, following which they were told they answered correctly (success feedback) or incorrectly (failure feedback). Both types of feedback conveyed the correct answer, because there were only two answer choices. However, on a follow-up test, participants learned less from failure feedback than from success feedback. This effect was replicated across professional, linguistic, and social domains—even when learning from failure was less cognitively taxing than learning from success and even when learning was incentivized. Participants who received failure feedback also remembered fewer of their answer choices. Why does failure undermine learning? Failure is ego threatening, which causes people to tune out. Participants learned less from personal failure than from personal success, yet they learned just as much from other people’s failure as from others’ success. Thus, when ego concerns are muted, people tune in and learn from failure.},
keywords = {ego threat, failure, feedback, Learning, motivation, O, open data, open materials, preregistered},
pubstate = {published},
tppubtype = {article}
}
Fyfe, Emily; de Leeuw, Joshua; Carvalho, Paulo; Goldstone, Robert; Sherman, Janelle; Admiraal, David; Alford, Laura; Bonner, Alison; Brassil, Chad; Brooks, Christopher; Carbonetto, Tracey; Chang, Sau Hou; Cruz, Laura; Czymoniewicz-Klippel, Melina; Daniel, Frances; Driessen, Michelle D; Habashy, Noel; Hanson-Bradley, Carrie; Hirt, Ed; Carbonell, Virginia Hojas; Jackson, Daniel; Jones, Shay; Keagy, Jennifer; Keith, Brandi; Malmquist, Sarah; McQuarrie, Barry; Metzger, Kelsey; Min, Maung; Patil, Sameer; Patrick, Ryan; Pelaprat, Etienne; Petrunich-Rutherford, Maureen; Porter, Meghan; Prescott, Kristina; Reck, Cathrine; Renner, Terri; Robbins, Eric; Smith, Adam; Stuczynski, Phil; Thompson, Jaye; Tsotakos, Nikolaos; Turk, Judith; Unruh, Kyle; Webb, Jennifer; Whitehead, Stephanie; Wisniewski, Elaine; Motz, Benjamin
In: Advances in Methods and Practices in Psychological Science (Preprint), 2019.
Abstract | Links | BibTeX | Schlagwörter: Education, Evidence-Based Practices, Experiment, feedback, O, Reproducibility
@article{Fyfe2019,
title = {ManyClasses 1: Assessing the generalizable effect of immediate versus delayed feedback across many college classes},
author = {Emily Fyfe and Joshua de Leeuw and Paulo Carvalho and Robert Goldstone and Janelle Sherman and David Admiraal and Laura Alford and Alison Bonner and Chad Brassil and Christopher Brooks and Tracey Carbonetto and Sau Hou Chang and Laura Cruz and Melina Czymoniewicz-Klippel and Frances Daniel and Michelle D Driessen and Noel Habashy and Carrie Hanson-Bradley and Ed Hirt and Virginia Hojas Carbonell and Daniel Jackson and Shay Jones and Jennifer Keagy and Brandi Keith and Sarah Malmquist and Barry McQuarrie and Kelsey Metzger and Maung Min and Sameer Patil and Ryan Patrick and Etienne Pelaprat and Maureen Petrunich-Rutherford and Meghan Porter and Kristina Prescott and Cathrine Reck and Terri Renner and Eric Robbins and Adam Smith and Phil Stuczynski and Jaye Thompson and Nikolaos Tsotakos and Judith Turk and Kyle Unruh and Jennifer Webb and Stephanie Whitehead and Elaine Wisniewski and Benjamin Motz},
url = {https://doi.org/10.31234/osf.io/4mvyh
https://osf.io/q84t7/},
doi = {10.31234/osf.io/4mvyh},
year = {2019},
date = {2019-05-01},
journal = {Advances in Methods and Practices in Psychological Science (Preprint)},
abstract = {Psychology researchers have long attempted to identify educational practices that improve student learning. However, experimental research on these practices is often conducted in laboratory contexts or in a single course, threatening the external validity of the results. In this paper, we establish an experimental paradigm for evaluating the benefits of recommended practices across a variety of authentic educational contexts – a model we call ManyClasses. The core feature is that researchers examine the same research question and measure the same experimental effect across many classes spanning a range of topics, institutions, teacher implementations, and student populations. We report the first ManyClasses study, which examined how the timing of feedback on class assignments, either immediate or delayed by a few days, affected subsequent performance on class assessments. Across 38 classes, the overall estimate for the effect of feedback timing was 0.002 (95{37d1f293241a1edd19b097ce37fa29bd44d887a41b5283a0fc9485076e078306} HDI -0.05 to 0.05), indicating that there was no effect of immediate versus delayed feedback on student learning that generalizes across classes. Further, there were no credibly non-zero effects for 40 pre-registered moderators related to class-level and student-level characteristics. Yet, our results provide hints that in certain kinds of classes, which were under-sampled in the current study, there may be modest advantages for delayed feedback. More broadly, these findings provide insights regarding the feasibility of conducting within-class randomized experiments across a range of naturally occurring learning environments.},
keywords = {Education, Evidence-Based Practices, Experiment, feedback, O, Reproducibility},
pubstate = {published},
tppubtype = {article}
}
Truscott, John
The effect of error correction on learners’ ability to write accurately Artikel
In: Journal of Second Language Writing, Bd. 16, Ausg. 4, S. 255–272, 2007, ISBN: 1873-1422.
Abstract | Links | BibTeX | Schlagwörter: A, correction, feedback, Korrektur, writing
@article{Truscott2007,
title = {The effect of error correction on learners’ ability to write accurately},
author = {John Truscott},
url = {https://doi.org/10.1016/j.jslw.2007.06.003},
doi = {10.1016/j.jslw.2007.06.003},
isbn = {1873-1422},
year = {2007},
date = {2007-12-01},
journal = {Journal of Second Language Writing},
volume = {16},
issue = {4},
pages = {255–272},
abstract = {The paper evaluates and synthesizes research on the question of how error correction affects learners’ ability to write accurately, combining qualitative analysis of the relevant studies with quantitative meta-analysis of their findings. The conclusions are that, based on existing research: (a) the best estimate is that correction has a small negative effect on learners’ ability to write accurately, and (b) we can be 95% confident that if it has any actual benefits, they are very small. This analysis is followed by discussion of factors that have probably biased the findings in favor of correction groups, the implication being that the conclusions of the meta-analysis probably underestimate the failure of correction.},
keywords = {A, correction, feedback, Korrektur, writing},
pubstate = {published},
tppubtype = {article}
}
Kluger, Avraham N.; DeNisi, Angelo
In: Psychological Bulletin, Bd. 119, Ausg. 2, S. 254–284, 1996.
Abstract | Links | BibTeX | Schlagwörter: A, feedback
@article{nokey,
title = {The effects of feedback interventions on performance: A historical review, a meta-analysis, and a preliminary feedback intervention theory},
author = {Avraham N. Kluger and Angelo DeNisi},
url = {https://psycnet.apa.org/doi/10.1037/0033-2909.119.2.254
https://psycnet.apa.org/record/1996-02773-003},
doi = {10.1037/0033-2909.119.2.254},
year = {1996},
date = {1996-01-01},
journal = {Psychological Bulletin},
volume = {119},
issue = {2},
pages = {254–284},
abstract = {Since the beginning of the century, feedback interventions (FIs) produced negative--but largely ignored--effects on performance. A meta-analysis (607 effect sizes; 23,663 observations) suggests that FIs improved performance on average ( d = .41) but that over one-third of the FIs decreased performance. This finding cannot be explained by sampling error, feedback sign, or existing theories. The authors proposed a preliminary FI theory (FIT) and tested it with moderator analyses. The central assumption of FIT is that FIs change the locus of attention among 3 general and hierarchically organized levels of control: task learning, task motivation, and meta-tasks (including self-related) processes. The results suggest that FI effectiveness decreases as attention moves up the hierarchy closer to the self and away from the task. These findings are further moderated by task characteristics that are still poorly understood. (PsycInfo Database Record (c) 2020 APA, all rights reserved)},
keywords = {A, feedback},
pubstate = {published},
tppubtype = {article}
}