Corbin, Thomas; Dawson, Phillip; Nicola-Richmond, Kelli; Partridge, Helen
‘Where’s the line? It’s an absurd line’: towards a framework for acceptable uses of AI in assessment Artikel
In: Assessment & Evaluation in Higher Education, S. 1–13, 2025.
Abstract | Links | BibTeX | Schlagwörter: academic integrity, artificial intelligence, assessment design, higher education, O
@article{Corbin2025,
title = {‘Where’s the line? It’s an absurd line’: towards a framework for acceptable uses of AI in assessment},
author = {Thomas Corbin and Phillip Dawson and Kelli Nicola-Richmond and Helen Partridge},
url = {https://doi.org/10.1080/02602938.2025.2456207},
doi = {10.1080/02602938.2025.2456207},
year = {2025},
date = {2025-01-24},
journal = {Assessment & Evaluation in Higher Education},
pages = {1–13},
abstract = {As higher education grapples with ensuring assessment validity in an increasingly AI-populated time, institutions and educators are working to establish appropriate boundaries for AI use. However, little is known about how students and teachers conceptualize and experience these boundaries in practice. This study investigates how students and teachers navigate the line between acceptable and unacceptable AI use in assessment, drawing on a thematic analysis of qualitative interviews with 19 students and 12 staff at a large Australian university informed by social boundary theory. The titular metaphor of ‘drawing a line’ emerged organically from both students and staff in our interviews, revealing ongoing struggles to understand and articulate what counts as appropriate. We found that students frequently construct their own individually unique and often complex ethical frameworks for AI use. Teachers, meanwhile, report significant emotional burden and professional uncertainty as they attempt to understand and communicate what is appropriate to their students. Our analysis suggests that assessment policies for AI ought to move beyond simple prohibitions or permissions and begin to address three critical dimensions: the feasibility of enforcement, the preservation of authentic learning, and the emotional wellbeing of teachers and students.},
keywords = {academic integrity, artificial intelligence, assessment design, higher education, O},
pubstate = {published},
tppubtype = {article}
}
Muehlhoff, Rainer; Henningsen, Marte
Chatbots im Schulunterricht: Wir testen das Fobizz-Tool zur automatischen Bewertung von Hausaufgaben Unveröffentlicht
Preprint auf arXiv:2412.06651, 2024.
Abstract | Links | BibTeX | Schlagwörter: AI, artificial intelligence, chatbots, correction, feedback, O
@unpublished{Muehlhoff2024,
title = {Chatbots im Schulunterricht: Wir testen das Fobizz-Tool zur automatischen Bewertung von Hausaufgaben},
author = {Rainer Muehlhoff and Marte Henningsen},
url = {https://doi.org/10.48550/arXiv.2412.06651
https://media.ccc.de/v/38c3-chatbots-im-schulunterricht},
doi = {10.48550/arXiv.2412.06651},
year = {2024},
date = {2024-12-09},
urldate = {2024-12-09},
issue = {arXiv:2412.06651},
abstract = {This study examines the AI-powered grading tool "AI Grading Assistant" by the German company Fobizz, designed to support teachers in evaluating and providing feedback on student assignments. Against the societal backdrop of an overburdened education system and rising expectations for artificial intelligence as a solution to these challenges, the investigation evaluates the tool's functional suitability through two test series. The results reveal significant shortcomings: The tool's numerical grades and qualitative feedback are often random and do not improve even when its suggestions are incorporated. The highest ratings are achievable only with texts generated by ChatGPT. False claims and nonsensical submissions frequently go undetected, while the implementation of some grading criteria is unreliable and opaque. Since these deficiencies stem from the inherent limitations of large language models (LLMs), fundamental improvements to this or similar tools are not immediately foreseeable. The study critiques the broader trend of adopting AI as a quick fix for systemic problems in education, concluding that Fobizz's marketing of the tool as an objective and time-saving solution is misleading and irresponsible. Finally, the study calls for systematic evaluation and subject-specific pedagogical scrutiny of the use of AI tools in educational contexts.},
howpublished = {Preprint auf arXiv:2412.06651},
keywords = {AI, artificial intelligence, chatbots, correction, feedback, O},
pubstate = {published},
tppubtype = {unpublished}
}
UNESCO,
Declaration Draft, 2024.
Links | BibTeX | Schlagwörter: A, AI, artificial intelligence, KI, OER, open educational ressources (OER), UNESCO
@misc{nokey,
title = {Draft Dubai Declaration on OER: Digital Public Goods and Emerging Technologies for Equitable and Inclusive Access to Knowledge},
author = {UNESCO},
editor = {UNESCO},
url = {https://oerdynamiccoalition.org/resources/draft-dubai-declaration-oer-digital-public-goods-and-emerging-technologies-equitable-and#draft-dubai-declaration-on-oer-digital-public-goods-and-emerging-technologies-for-equitable-and-inclusive-access-to-knowledge](https://oerdynamiccoalition.org/resources/draft-dubai-declaration-oer-digital-public-goods-and-emerging-technologies-equitable-and#draft-dubai-declaration-on-oer-digital-public-goods-and-emerging-technologies-for-equitable-and-inclusive-access-to-knowledge)},
year = {2024},
date = {2024-11-20},
address = {Dubai},
howpublished = {Declaration Draft},
keywords = {A, AI, artificial intelligence, KI, OER, open educational ressources (OER), UNESCO},
pubstate = {published},
tppubtype = {misc}
}
Möller, Moritz; Nirmal, Gargi; Fabietti, Dario; Stierstorfer, Quintus; Zakhvatkin, Mark; Sommerfeld, Holger; Schütt, Sven
Revolutionising Distance Learning: A Comparative Study of Learning Progress with AI-Driven Tutoring Sonstige
Preprint, 2024.
Abstract | Links | BibTeX | Schlagwörter: A, artificial intelligence, higher education, large language models, university teaching
@misc{Möller2024,
title = {Revolutionising Distance Learning: A Comparative Study of Learning Progress with AI-Driven Tutoring},
author = {Moritz Möller and Gargi Nirmal and Dario Fabietti and Quintus Stierstorfer and Mark Zakhvatkin and Holger Sommerfeld and Sven Schütt},
url = {https://arxiv.org/abs/2403.14642v1
https://doi.org/10.48550/arXiv.2403.14642
},
doi = {10.48550/arXiv.2403.14642},
year = {2024},
date = {2024-02-21},
issue = {arXiv:2403.14642v1},
abstract = {Generative AI is expected to have a vast, positive impact on education; however, at present, this potential has not yet been demonstrated at scale at university level. In this study, we present first evidence that generative AI can increase the speed of learning substantially in university students. We tested whether using the AI-powered teaching assistant Syntea affected the speed of learning of hundreds of distance learning students across more than 40 courses at the IU International University of Applied Sciences. Our analysis suggests that using Syntea reduced their study time substantially--by about 27% on average--in the third month after the release of Syntea. Taken together, the magnitude of the effect and the scalability of the approach implicate generative AI as a key lever to significantly improve and accelerate learning by personalisation.},
howpublished = {Preprint},
keywords = {A, artificial intelligence, higher education, large language models, university teaching},
pubstate = {published},
tppubtype = {misc}
}
Crawford, Kate; Schultz, Jason; Rettberg, Jill Walker; Suchman, Lucy; Andrejevic, Mark; Jaton, Florian; Ananny, Mike; Dick, Stephanie; Chun, Wendy Hui Kyong; Canute, Matt; Li, Xiaochang; Taylor, Linnet; Gentelet, Karine; Didier, Emmanuel
An AI Society Artikel
In: Issues in Sience & Technology, Bd. 40, Ausg. 2, 2024.
Abstract | Links | BibTeX | Schlagwörter: AI, artificial intelligence, KI, Künstliche Intelligenz, O
@article{Crawford0000,
title = {An AI Society},
author = {Kate Crawford and Jason Schultz and Jill Walker Rettberg and Lucy Suchman and Mark Andrejevic and Florian Jaton and Mike Ananny and Stephanie Dick and Wendy Hui Kyong Chun and Matt Canute and Xiaochang Li and Linnet Taylor and Karine Gentelet and Emmanuel Didier},
editor = {Kate Crawford},
url = {https://issues.org/an-ai-society/},
year = {2024},
date = {2024-02-01},
journal = {Issues in Sience & Technology},
volume = {40},
issue = {2},
abstract = {Artificial intelligence is reshaping society, but human forces shape AI. Getting governance wrong could mean narrowing cultural narratives, de-incentivizing creativity, and exploiting workers. In these 11 essays, social scientists and humanities experts explore how to harness the interaction between AI and society, revealing urgent avenues for research and policy.},
keywords = {AI, artificial intelligence, KI, Künstliche Intelligenz, O},
pubstate = {published},
tppubtype = {article}
}
Balepur, Nishant; Ravichander, Abhilasha; Rudinger, Rachel
Artifacts or Abduction: How Do LLMs Answer Multiple-Choice Questions Without the Question? Sonstige
In-progress preprint, 2024.
Abstract | Links | BibTeX | Schlagwörter: artificial intelligence, KI, large language models, LLM, multiple choice, O
@misc{balepur2024artifacts,
title = {Artifacts or Abduction: How Do LLMs Answer Multiple-Choice Questions Without the Question?},
author = {Nishant Balepur and Abhilasha Ravichander and Rachel Rudinger},
url = {https://doi.org/10.48550/arXiv.2402.12483},
doi = {10.48550/arXiv.2402.12483 Focus to learn more},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
abstract = {Multiple-choice question answering (MCQA) is often used to evaluate large language models (LLMs). To see if MCQA assesses LLMs as intended, we probe if LLMs can perform MCQA with choices-only prompts, where models must select the correct answer only from the choices. In three MCQA datasets and four LLMs, this prompt bests a majority baseline in 11/12 cases, with up to 0.33 accuracy gain. To help explain this behavior, we conduct an in-depth, black-box analysis on memorization, choice dynamics, and question inference. Our key findings are threefold. First, we find no evidence that the choices-only accuracy stems from memorization alone. Second, priors over individual choices do not fully explain choices-only accuracy, hinting that LLMs use the group dynamics of choices. Third, LLMs have some ability to infer a relevant question from choices, and surprisingly can sometimes even match the original question. We hope to motivate the use of stronger baselines in MCQA benchmarks, the design of robust MCQA datasets, and further efforts to explain LLM decision-making.},
howpublished = {In-progress preprint},
keywords = {artificial intelligence, KI, large language models, LLM, multiple choice, O},
pubstate = {published},
tppubtype = {misc}
}
Limburg, Anika; Salden, Peter; Mundorf, Margret; Weßels, Doris
Plagiarismus in Zeiten Künstlicher Intelligenz Artikel
In: Zeitschrift für Hochschulentwicklung (ZFHE), Bd. 17, Ausg. 3, S. 91–106, 2022, ISSN: 2219-6994.
Abstract | Links | BibTeX | Schlagwörter: A, AI, artificial intelligence, gute wissenschaftliche Praxis, KI, Künstliche Intelligenz, Natural Language Processing, plagiarism, Plagiarismus, reflective science, Schreibdidaktik, writing tools
@article{Limburg2022,
title = {Plagiarismus in Zeiten Künstlicher Intelligenz},
author = {Anika Limburg and Peter Salden and Margret Mundorf and Doris Weßels},
editor = {Ines Langemeyer and Ernst Schraube and Peter Tremp},
url = {https://doi.org/10.3217/zfhe-17-03/06},
doi = {10.3217/zfhe-17-03/06},
issn = {2219-6994},
year = {2022},
date = {2022-10-01},
journal = {Zeitschrift für Hochschulentwicklung (ZFHE)},
volume = {17},
issue = {3},
pages = {91–106},
abstract = {Software auf Basis Künstlicher Intelligenz aus dem Bereich des Natural Language Processing hat das Potenzial, wissenschaftliches Schreiben grundlegend zu verändern. Entsprechende Tools können bereits erstaunlich kohärente Texte in wissenschaftlichem Ton produzieren. Dies führt zu fundamentalen Fragen guter wissenschaftlicher Praxis und akademischer Kultur. Wir diskutieren diese Entwicklung vor dem Hintergrund einer Befragung deutscher Schreibdidaktiker:innen und arbeiten Fragen heraus, die im Zusammenhang mit KI-Schreibtools zukünftig von zentraler Bedeutung sein werden. Abschließend schlagen wir einen Passus für eine Selbstständigkeitserklärung vor, der den Entwicklungen Rechnung trägt.},
keywords = {A, AI, artificial intelligence, gute wissenschaftliche Praxis, KI, Künstliche Intelligenz, Natural Language Processing, plagiarism, Plagiarismus, reflective science, Schreibdidaktik, writing tools},
pubstate = {published},
tppubtype = {article}
}