@inproceedings{d11e31c4d11140bbb80c057216301143,
title = "Few-shot Question Generation for Reading Comprehension",
abstract = "According to the internationally recognized PIRLS (Progress in International Reading Literacy Study) assessment standards, reading comprehension questions should require not only information retrieval, but also higher-order processes such as inferencing, interpreting and evaluation. However, these kinds of questions are often not available in large quantities for training question generation models. This paper investigates whether pre-trained Large Language Models (LLMs) can produce higher-order questions. Human assessment on a Chinese dataset shows that few-shot LLM prompting generates more usable and higher-order questions than two competitive neural baselines.",
author = "Yin Poon and Lee, {John S.Y.} and Lam, {Yu Yan} and Suen, {Wing Lam} and Ong, {Elsie Li Chen} and Chu, {Samuel Kai Wah}",
note = "Publisher Copyright: {\textcopyright} 2024 Association for Computational Linguistics; 10th SIGHAN Workshop on Chinese Language Processing, SIGHAN 2024 ; Conference date: 16-08-2024",
year = "2024",
language = "English",
series = "SIGHAN 2024 - 10th SIGHAN Workshop on Chinese Language Processing, Proceedings of the Workshop",
pages = "21--27",
editor = "Kam-Fai Wong and Min Zhang and Ruifeng Xu and Jing Li and Zhongyu Wei and Lin Gui and Bin Liang and Runcong Zhao",
booktitle = "SIGHAN 2024 - 10th SIGHAN Workshop on Chinese Language Processing, Proceedings of the Workshop",
}