@inproceedings{huang2024factalign,title={FactAlign: Long-form Factuality Alignment of Large Language Models},author={Huang, Chao-Wei and Chen, Yun-Nung},booktitle={Findings of the Association for Computational Linguistics: EMNLP 2024},year={2024},paper={https://arxiv.org/abs/2410.01691},}
EMNLP 2024
PairDistill: Pairwise Relevance Distillation for Dense Passage Retrieval
@inproceedings{huang2024pairdistill,title={PairDistill: Pairwise Relevance Distillation for Dense Passage Retrieval},author={Huang, Chao-Wei and Chen, Yun-Nung},booktitle={Proceedings of The 2024 Conference on Empirical Methods in Natural Language Processing (EMNLP 2024)},year={2024},paper={https://arxiv.org/abs/2410.01383},}
Interspeech 2024
Investigating Decoder-only Large Language Models for Speech-to-text Translation
Chao-Wei Huang, Hui Lu, Hongyu Gong, and 4 more authors
In Proceedings of the Annual Conference of the International Speech Communication Association, INTERSPEECH, 2024
@inproceedings{huang2024investigating,title={Investigating Decoder-only Large Language Models for Speech-to-text Translation},author={Huang, Chao-Wei and Lu, Hui and Gong, Hongyu and Inaguma, Hirofumi and Kulikov, Ilia and Mavlyutov, Ruslan and Popuri, Sravya},booktitle={Proceedings of the Annual Conference of the International Speech Communication Association, INTERSPEECH},volume={2024},year={2024},paper={https://arxiv.org/abs/2407.03169}}
EACL 2024
Unsupervised Multilingual Dense Retrieval via Generative Pseudo Labeling
Chao-Wei Huang, Tsu-Yuan Hsu, Chen-An Li, and 2 more authors
In Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (EACL 2024), 2024
@inproceedings{huang2024umr,title={Unsupervised Multilingual Dense Retrieval via Generative Pseudo Labeling},author={Huang, Chao-Wei and Hsu, Tsu-Yuan and Li, Chen-An and Hsu, Chen-Yu and Chen, Yun-Nung},booktitle={Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (EACL 2024)},year={2024},paper={https://aclanthology.org/2024.findings-eacl.49/},}
EMNLP 2024
Two Tales of Persona in LLMs: A Survey of Role-Playing and Personalization
Yu-Min Tseng, Yu-Chao Huang, Teng-Yun Hsiao, and 4 more authors
In Findings of the Association for Computational Linguistics: EMNLP 2024, 2024
@inproceedings{tseng2024talespersonallmssurvey,title={Two Tales of Persona in LLMs: A Survey of Role-Playing and Personalization},author={Tseng, Yu-Min and Huang, Yu-Chao and Hsiao, Teng-Yun and Chen, Wei-Lin and Huang, Chao-Wei and Meng, Yu and Chen, Yun-Nung},booktitle={Findings of the Association for Computational Linguistics: EMNLP 2024},year={2024},paper={https://arxiv.org/abs/2406.01171},}
EMNLP 2024
Editing the Mind of Giants: An In-Depth Exploration of Pitfalls of Knowledge Editing in Large Language Models
Cheng-Hsun Hsueh, Paul Kuo-Ming Huang, Tzu-Han Lin, and 4 more authors
In Findings of the Association for Computational Linguistics: EMNLP 2024, 2024
@inproceedings{hsueh2024editing,title={Editing the Mind of Giants: An In-Depth Exploration of Pitfalls of Knowledge Editing in Large Language Models},author={Hsueh, Cheng-Hsun and Huang, Paul Kuo-Ming and Lin, Tzu-Han and Liao, Che-Wei and Fang, Hung-Chieh and Huang, Chao-Wei and Chen, Yun-Nung},booktitle={Findings of the Association for Computational Linguistics: EMNLP 2024},year={2024},paper={https://arxiv.org/abs/2406.01436},}
Arxiv
InstUPR: Instruction-based Unsupervised Passage Reranking with Large Language Models
@article{huang2024instupr,title={InstUPR: Instruction-based Unsupervised Passage Reranking with Large Language Models},author={Huang, Chao-Wei and Chen, Yun-Nung},journal={arXiv preprint arXiv:2403.16435},year={2024},paper={https://arxiv.org/abs/2403.16435},}
A Survey of Generative Information Retrieval
Tzu-Lin Kuo, Tzu-Wei Chiu, Tzung-Sheng Lin, and 3 more authors
@inproceedings{huang2023converser,title={{CONVERSER}: Few-shot Conversational Dense Retrieval with Synthetic Data Generation},author={Huang, Chao-Wei and Hsu, Chen-Yu and Hsu, Tsu-Yuan and Li, Chen-An and Chen, Yun-Nung},booktitle={Proceedings of the 24th Annual Meeting of the Special Interest Group on Discourse and Dialogue},month=sep,year={2023},address={Prague, Czechia},publisher={Association for Computational Linguistics},url={https://aclanthology.org/2023.sigdial-1.34},pages={381--387},paper={https://aclanthology.org/2023.sigdial-1.34/},}
ACL 2023
Visually-Enhanced Phrase Understanding
Tsu-Yuan Hsu, Chen-An Li, Chao-Wei Huang, and 1 more author
In Findings of the Association for Computational Linguistics: ACL 2023, Sep 2023
@inproceedings{hsu2023visually,title={Visually-Enhanced Phrase Understanding},author={Hsu, Tsu-Yuan and Li, Chen-An and Huang, Chao-Wei and Chen, Yun-Nung},booktitle={Findings of the Association for Computational Linguistics: ACL 2023},pages={5879--5888},year={2023},paper={https://aclanthology.org/2023.findings-acl.363/},}
2022
ClinicalNLP 2022
PLM-ICD: Automatic ICD Coding with Pretrained Language Models
@inproceedings{huang-etal-2022-plm,title={{PLM}-{ICD}: Automatic {ICD} Coding with Pretrained Language Models},author={Huang, Chao-Wei and Tsai, Shang-Chi and Chen, Yun-Nung},booktitle={Proceedings of the 4th Clinical Natural Language Processing Workshop},month=jul,year={2022},address={Seattle, WA},publisher={Association for Computational Linguistics},url={https://aclanthology.org/2022.clinicalnlp-1.2},pages={10--20},paper={https://aclanthology.org/2022.clinicalnlp-1.2/},}
AACL 2022
Open-Domain Conversational Question Answering with Historical Answers
Hung-Chieh Fang, Kuo-Han Hung, Chen-Wei Huang, and 1 more author
In Findings of the Association for Computational Linguistics: AACL-IJCNLP 2022, Jul 2022
@inproceedings{fang2022open,title={Open-Domain Conversational Question Answering with Historical Answers},author={Fang, Hung-Chieh and Hung, Kuo-Han and Huang, Chen-Wei and Chen, Yun-Nung},booktitle={Findings of the Association for Computational Linguistics: AACL-IJCNLP 2022},pages={319--326},year={2022},paper={https://aclanthology.org/2022.findings-aacl.30/},}
SIGDIAL 2022
Controllable User Dialogue Act Augmentation for Dialogue State Tracking
Chun-Mao Lai, Ming-Hao Hsu, Chao-Wei Huang, and 1 more author
In Proceedings of the 23rd Annual Meeting of the Special Interest Group on Discourse and Dialogue, Jul 2022
@inproceedings{lai2022controllable,title={Controllable User Dialogue Act Augmentation for Dialogue State Tracking},author={Lai, Chun-Mao and Hsu, Ming-Hao and Huang, Chao-Wei and Chen, Yun-Nung},booktitle={Proceedings of the 23rd Annual Meeting of the Special Interest Group on Discourse and Dialogue},year={2022},paper={https://aclanthology.org/2022.sigdial-1.5/},}
Alexa Prize
Miutsu: NTU’s TaskBot for the Alexa Prize
Yen-Ting Lin, Hui-Chi Kuo, Ze-Song Xu, and 5 more authors
@article{lin2022miutsu,title={Miutsu: NTU's TaskBot for the Alexa Prize},author={Lin, Yen-Ting and Kuo, Hui-Chi and Xu, Ze-Song and Chiu, Ssu and Hung, Chieh-Chi and Chen, Yi-Cheng and Huang, Chao-Wei and Chen, Yun-Nung},journal={arXiv preprint arXiv:2205.07446},year={2022},paper={https://arxiv.org/abs/2205.07446}}
2021
NAACL 2021
Modeling Diagnostic Label Correlation for Automatic ICD Coding
In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Jul 2021
@inproceedings{tsai2021modeling,title={Modeling Diagnostic Label Correlation for Automatic ICD Coding},author={Tsai, Shang-Chi and Huang, Chao-Wei and Chen, Yun-Nung},booktitle={Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},pages={4043--4052},year={2021},paper={https://aclanthology.org/2021.naacl-main.318/},}
2020
DSTC 9
Overview of the ninth dialog system technology challenge: Dstc9
Chulaka Gunasekara, Seokhwan Kim, Luis Fernando D’Haro, and 8 more authors
@article{gunasekara2020overview,title={Overview of the ninth dialog system technology challenge: Dstc9},author={Gunasekara, Chulaka and Kim, Seokhwan and D'Haro, Luis Fernando and Rastogi, Abhinav and Chen, Yun-Nung and Eric, Mihail and Hedayatnia, Behnam and Gopalakrishnan, Karthik and Liu, Yang and Huang, Chao-Wei and others},journal={arXiv preprint arXiv:2011.06486},year={2020},paper={https://arxiv.org/abs/2011.06486}}
DSTC 7
Learning multi-level information for dialogue response selection by highway recurrent transformer
Ting-Rui Chiang, Chao-Wei Huang, Shang-Yu Su, and 1 more author
@article{chiang2020learning,title={Learning multi-level information for dialogue response selection by highway recurrent transformer},author={Chiang, Ting-Rui and Huang, Chao-Wei and Su, Shang-Yu and Chen, Yun-Nung},journal={Computer Speech \& Language},volume={63},pages={101073},year={2020},publisher={Academic Press},paper={https://www.sciencedirect.com/science/article/abs/pii/S0885230820300061},}
DSTC 7
RAP-Net: Recurrent attention pooling networks for dialogue response selection
Chao-Wei Huang, Ting-Rui Chiang, Shang-Yu Su, and 1 more author
@article{huang2020rap,title={RAP-Net: Recurrent attention pooling networks for dialogue response selection},author={Huang, Chao-Wei and Chiang, Ting-Rui and Su, Shang-Yu and Chen, Yun-Nung},journal={Computer Speech \& Language},volume={63},pages={101079},year={2020},publisher={Academic Press},paper={https://www.sciencedirect.com/science/article/abs/pii/S0885230820300127},}
ICASSP 2020
Learning asr-robust contextualized embeddings for spoken language understanding
@inproceedings{huang2020learning,title={Learning asr-robust contextualized embeddings for spoken language understanding},author={Huang, Chao-Wei and Chen, Yun-Nung},booktitle={ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},pages={8009--8013},year={2020},organization={IEEE},paper={https://www.csie.ntu.edu.tw/~yvchen/doc/ICASSP20_SpokenVec.pdf},}
ACL 2020
Learning Spoken Language Representations with Neural Lattice Language Modeling
Pre-trained language models have achieved huge improvement on many NLP tasks. However, these methods are usually designed for written text, so they do not consider the properties of spoken language. Therefore, this paper aims at generalizing the idea of language model pre-training to lattices generated by recognition systems. We propose a framework that trains neural lattice language models to provide contextualized representations for spoken language understanding tasks. The proposed two-stage pre-training approach reduces the demands of speech data and has better efficiency. Experiments on intent detection and dialogue act recognition datasets demonstrate that our proposed method consistently outperforms strong baselines when evaluated on spoken inputs. The code is available at https://github.com/MiuLab/Lattice-ELMo.
@inproceedings{huang-chen-2020-learning,title={Learning Spoken Language Representations with Neural Lattice Language Modeling},author={Huang, Chao-Wei and Chen, Yun-Nung},booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},month=jul,year={2020},address={Online},publisher={Association for Computational Linguistics},url={https://www.aclweb.org/anthology/2020.acl-main.347},pages={3764--3769},paper={https://www.aclweb.org/anthology/2020.acl-main.347/},}
ACL 2020
Towards Unsupervised Language Understanding and Generation by Joint Dual Learning
@inproceedings{su2020towards,title={Towards Unsupervised Language Understanding and Generation by Joint Dual Learning},author={Su, Shang-Yu and Huang, Chao-Wei and Chen, Yun-Nung},booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics (ACL 2020)},pages={671--680},year={2020},paper={https://aclanthology.org/2020.acl-main.63/},}
2019
ASRU 2019
Adapting pretrained transformer to lattices for spoken language understanding
@inproceedings{huang2019adapting,title={Adapting pretrained transformer to lattices for spoken language understanding},author={Huang, Chao-Wei and Chen, Yun-Nung},booktitle={2019 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)},pages={845--852},year={2019},organization={IEEE},paper={https://arxiv.org/abs/2011.00780},}
ACL 2019
Dual Supervised Learning for Natural Language Understanding and Generation
@inproceedings{su2019dual,title={Dual Supervised Learning for Natural Language Understanding and Generation},author={Su, Shang-Yu and Huang, Chao-Wei and Chen, Yun-Nung},booktitle={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (ACL 2019)},pages={5472--5477},year={2019},paper={https://aclanthology.org/P19-1545/},}