@inproceedings{657480f3ee8b4217a6f44b5c0223cf7b,
title = "Learning Task Sampling Policy for Multitask Learning",
abstract = "It has been shown that training multi-task models with auxiliary tasks can improve the target tasks quality through cross-task transfer. However, the importance of each auxiliary task to the primary task is likely not known a priori. While the importance weights of auxiliary tasks can be manually tuned, it becomes practically infeasible with the number of tasks scaling up. To address this, we propose a search method that automatically assigns importance weights. We formulate it as a reinforcement learning problem and learn a task sampling schedule based on evaluation accuracy of the multi-task model. Our empirical evaluation on XNLI and GLUE shows that our method outperforms uniform sampling and the corresponding single-task baseline.",
author = "Dhanasekar Sundararaman and Henry Tsai and Lee, {Kuang Huei} and Iulia Turc and Lawrence Carin",
note = "Publisher Copyright: {\textcopyright} 2021 Association for Computational Linguistics.; 2021 Findings of the Association for Computational Linguistics, Findings of ACL: EMNLP 2021 ; Conference date: 07-11-2021 Through 11-11-2021",
year = "2021",
language = "English (US)",
series = "Findings of the Association for Computational Linguistics, Findings of ACL: EMNLP 2021",
publisher = "Association for Computational Linguistics (ACL)",
pages = "4410--4415",
editor = "Marie-Francine Moens and Xuanjing Huang and Lucia Specia and Yih, {Scott Wen-Tau}",
booktitle = "Findings of the Association for Computational Linguistics, Findings of ACL",
address = "United States",
}