forked from knmnyn/hugo-blox
-
Notifications
You must be signed in to change notification settings - Fork 25
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #2 from WING-NUS/hugoblox-import-publications
Hugo Blox Builder - Import latest publications
- Loading branch information
Showing
20 changed files
with
533 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
@inproceedings{aksu-etal-2021-velocidapter, | ||
abstract = {We introduce a synthetic dialogue generation framework, Velocidapter, which addresses the corpus availability problem for dialogue comprehension. Velocidapter augments datasets by simulating synthetic conversations for a task-oriented dialogue domain, requiring a small amount of bootstrapping work for each new domain. We evaluate the efficacy of our framework on a task-oriented dialogue comprehension dataset, MRCWOZ, which we curate by annotating questions for slots in the restaurant, taxi, and hotel domains of the MultiWOZ 2.2 dataset (Zang et al., 2020). We run experiments within a low-resource setting, where we pretrain a model on SQuAD, fine-tuning it on either a small original data or on the synthetic data generated by our framework. Velocidapter shows significant improvements using both the transformer-based BERTBase and BiDAF as base models. We further show that the framework is easy to use by novice users and conclude that Velocidapter can greatly help training over task-oriented dialogues, especially for low-resourced emerging domains.}, | ||
address = {Singapore and Online}, | ||
author = {Aksu, Ibrahim Taha and | ||
Liu, Zhengyuan and | ||
Kan, Min-Yen and | ||
Chen, Nancy}, | ||
booktitle = {Proceedings of the 22nd Annual Meeting of the Special Interest Group on Discourse and Dialogue}, | ||
doi = {10.18653/v1/2021.sigdial-1.14}, | ||
editor = {Li, Haizhou and | ||
Levow, Gina-Anne and | ||
Yu, Zhou and | ||
Gupta, Chitralekha and | ||
Sisman, Berrak and | ||
Cai, Siqi and | ||
Vandyke, David and | ||
Dethlefs, Nina and | ||
Wu, Yan and | ||
Li, Junyi Jessy}, | ||
month = {July}, | ||
pages = {133--143}, | ||
publisher = {Association for Computational Linguistics}, | ||
title = {Velocidapter: Task-oriented Dialogue Comprehension Modeling Pairing Synthetic Text Generation with Domain Adaptation}, | ||
url = {https://aclanthology.org/2021.sigdial-1.14}, | ||
year = {2021} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
--- | ||
title: 'Velocidapter: Task-oriented Dialogue Comprehension Modeling Pairing Synthetic | ||
Text Generation with Domain Adaptation' | ||
authors: | ||
- Ibrahim Taha Aksu | ||
- Zhengyuan Liu | ||
- Min-Yen Kan | ||
- Nancy Chen | ||
date: '2021-07-01' | ||
publishDate: '2024-07-05T17:09:42.645613Z' | ||
publication_types: | ||
- paper-conference | ||
publication: '*Proceedings of the 22nd Annual Meeting of the Special Interest Group | ||
on Discourse and Dialogue*' | ||
doi: 10.18653/v1/2021.sigdial-1.14 | ||
abstract: We introduce a synthetic dialogue generation framework, Velocidapter, which | ||
addresses the corpus availability problem for dialogue comprehension. Velocidapter | ||
augments datasets by simulating synthetic conversations for a task-oriented dialogue | ||
domain, requiring a small amount of bootstrapping work for each new domain. We evaluate | ||
the efficacy of our framework on a task-oriented dialogue comprehension dataset, | ||
MRCWOZ, which we curate by annotating questions for slots in the restaurant, taxi, | ||
and hotel domains of the MultiWOZ 2.2 dataset (Zang et al., 2020). We run experiments | ||
within a low-resource setting, where we pretrain a model on SQuAD, fine-tuning it | ||
on either a small original data or on the synthetic data generated by our framework. | ||
Velocidapter shows significant improvements using both the transformer-based BERTBase | ||
and BiDAF as base models. We further show that the framework is easy to use by novice | ||
users and conclude that Velocidapter can greatly help training over task-oriented | ||
dialogues, especially for low-resourced emerging domains. | ||
links: | ||
- name: URL | ||
url: https://aclanthology.org/2021.sigdial-1.14 | ||
--- |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
@inproceedings{aksu-etal-2022-n, | ||
abstract = {Augmentation of task-oriented dialogues has followed standard methods used for plain-text such as back-translation, word-level manipulation, and paraphrasing despite its richly annotated structure. In this work, we introduce an augmentation framework that utilizes belief state annotations to match turns from various dialogues and form new synthetic dialogues in a bottom-up manner. Unlike other augmentation strategies, it operates with as few as five examples. Our augmentation strategy yields significant improvements when both adapting a DST model to a new domain, and when adapting a language model to the DST task, on evaluations with TRADE and TOD-BERT models. Further analysis shows that our model performs better on seen values during training, and it is also more robust to unseen values. We conclude that exploiting belief state annotations enhances dialogue augmentation and results in improved models in n-shot training scenarios.}, | ||
address = {Dublin, Ireland}, | ||
author = {Aksu, Ibrahim and | ||
Liu, Zhengyuan and | ||
Kan, Min-Yen and | ||
Chen, Nancy}, | ||
booktitle = {Findings of the Association for Computational Linguistics: ACL 2022}, | ||
doi = {10.18653/v1/2022.findings-acl.131}, | ||
editor = {Muresan, Smaranda and | ||
Nakov, Preslav and | ||
Villavicencio, Aline}, | ||
month = {May}, | ||
pages = {1659--1671}, | ||
publisher = {Association for Computational Linguistics}, | ||
title = {N-Shot Learning for Augmenting Task-Oriented Dialogue State Tracking}, | ||
url = {https://aclanthology.org/2022.findings-acl.131}, | ||
year = {2022} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
--- | ||
title: N-Shot Learning for Augmenting Task-Oriented Dialogue State Tracking | ||
authors: | ||
- Ibrahim Aksu | ||
- Zhengyuan Liu | ||
- Min-Yen Kan | ||
- Nancy Chen | ||
date: '2022-05-01' | ||
publishDate: '2024-07-05T17:09:42.588862Z' | ||
publication_types: | ||
- paper-conference | ||
publication: '*Findings of the Association for Computational Linguistics: ACL 2022*' | ||
doi: 10.18653/v1/2022.findings-acl.131 | ||
abstract: Augmentation of task-oriented dialogues has followed standard methods used | ||
for plain-text such as back-translation, word-level manipulation, and paraphrasing | ||
despite its richly annotated structure. In this work, we introduce an augmentation | ||
framework that utilizes belief state annotations to match turns from various dialogues | ||
and form new synthetic dialogues in a bottom-up manner. Unlike other augmentation | ||
strategies, it operates with as few as five examples. Our augmentation strategy | ||
yields significant improvements when both adapting a DST model to a new domain, | ||
and when adapting a language model to the DST task, on evaluations with TRADE and | ||
TOD-BERT models. Further analysis shows that our model performs better on seen values | ||
during training, and it is also more robust to unseen values. We conclude that exploiting | ||
belief state annotations enhances dialogue augmentation and results in improved | ||
models in n-shot training scenarios. | ||
links: | ||
- name: URL | ||
url: https://aclanthology.org/2022.findings-acl.131 | ||
--- |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
@inproceedings{dou-etal-2022-towards, | ||
abstract = {In this paper, we study the problem of knowledge-intensive text-to-SQL, in which domain knowledge is necessary to parse expert questions into SQL queries over domain-specific tables. We formalize this scenario by building a new benchmark KnowSQL consisting of domain-specific questions covering various domains. We then address this problem by representing formulaic knowledge rather than by annotating additional data examples. More concretely, we construct a formulaic knowledge bank as a domain knowledge base and propose a framework (ReGrouP) to leverage this formulaic knowledge during parsing. Experiments using ReGrouP demonstrate a significant 28.2% improvement overall on KnowSQL.}, | ||
address = {Abu Dhabi, United Arab Emirates}, | ||
author = {Dou, Longxu and | ||
Gao, Yan and | ||
Liu, Xuqi and | ||
Pan, Mingyang and | ||
Wang, Dingzirui and | ||
Che, Wanxiang and | ||
Zhan, Dechen and | ||
Kan, Min-Yen and | ||
Lou, Jian-Guang}, | ||
booktitle = {Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing}, | ||
doi = {10.18653/v1/2022.emnlp-main.350}, | ||
editor = {Goldberg, Yoav and | ||
Kozareva, Zornitsa and | ||
Zhang, Yue}, | ||
month = {December}, | ||
pages = {5240--5253}, | ||
publisher = {Association for Computational Linguistics}, | ||
title = {Towards Knowledge-Intensive Text-to-SQL Semantic Parsing with Formulaic Knowledge}, | ||
url = {https://aclanthology.org/2022.emnlp-main.350}, | ||
year = {2022} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
--- | ||
title: Towards Knowledge-Intensive Text-to-SQL Semantic Parsing with Formulaic Knowledge | ||
authors: | ||
- Longxu Dou | ||
- Yan Gao | ||
- Xuqi Liu | ||
- Mingyang Pan | ||
- Dingzirui Wang | ||
- Wanxiang Che | ||
- Dechen Zhan | ||
- Min-Yen Kan | ||
- Jian-Guang Lou | ||
date: '2022-12-01' | ||
publishDate: '2024-07-05T17:09:42.603419Z' | ||
publication_types: | ||
- paper-conference | ||
publication: '*Proceedings of the 2022 Conference on Empirical Methods in Natural | ||
Language Processing*' | ||
doi: 10.18653/v1/2022.emnlp-main.350 | ||
abstract: In this paper, we study the problem of knowledge-intensive text-to-SQL, | ||
in which domain knowledge is necessary to parse expert questions into SQL queries | ||
over domain-specific tables. We formalize this scenario by building a new benchmark | ||
KnowSQL consisting of domain-specific questions covering various domains. We then | ||
address this problem by representing formulaic knowledge rather than by annotating | ||
additional data examples. More concretely, we construct a formulaic knowledge bank | ||
as a domain knowledge base and propose a framework (ReGrouP) to leverage this formulaic | ||
knowledge during parsing. Experiments using ReGrouP demonstrate a significant 28.2% | ||
improvement overall on KnowSQL. | ||
links: | ||
- name: URL | ||
url: https://aclanthology.org/2022.emnlp-main.350 | ||
--- |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
@inproceedings{han-etal-2022-mm, | ||
abstract = {Existing multimodal tasks mostly target at the complete input modality setting, i.e., each modality is either complete or completely missing in both training and test sets. However, the randomly missing situations have still been underexplored. In this paper, we present a novel approach named MM-Align to address the missing-modality inference problem. Concretely, we propose 1) an alignment dynamics learning module based on the theory of optimal transport (OT) for missing data imputation; 2) a denoising training algorithm to enhance the quality of imputation as well as the accuracy of model predictions. Compared with previous generative methods which devote to restoring the missing inputs, MM-Align learns to capture and imitate the alignment dynamics between modality sequences. Results of comprehensive experiments on two multimodal tasks empirically demonstrate that our method can perform more accurate and faster inference and alleviate the overfitting issue under different missing conditions.}, | ||
address = {Abu Dhabi, United Arab Emirates}, | ||
author = {Han, Wei and | ||
Chen, Hui and | ||
Kan, Min-Yen and | ||
Poria, Soujanya}, | ||
booktitle = {Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing}, | ||
doi = {10.18653/v1/2022.emnlp-main.717}, | ||
editor = {Goldberg, Yoav and | ||
Kozareva, Zornitsa and | ||
Zhang, Yue}, | ||
month = {December}, | ||
pages = {10498--10511}, | ||
publisher = {Association for Computational Linguistics}, | ||
title = {MM-Align: Learning Optimal Transport-based Alignment Dynamics for Fast and Accurate Inference on Missing Modality Sequences}, | ||
url = {https://aclanthology.org/2022.emnlp-main.717}, | ||
year = {2022} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
--- | ||
title: 'MM-Align: Learning Optimal Transport-based Alignment Dynamics for Fast and | ||
Accurate Inference on Missing Modality Sequences' | ||
authors: | ||
- Wei Han | ||
- Hui Chen | ||
- Min-Yen Kan | ||
- Soujanya Poria | ||
date: '2022-12-01' | ||
publishDate: '2024-07-05T17:09:42.610472Z' | ||
publication_types: | ||
- paper-conference | ||
publication: '*Proceedings of the 2022 Conference on Empirical Methods in Natural | ||
Language Processing*' | ||
doi: 10.18653/v1/2022.emnlp-main.717 | ||
abstract: Existing multimodal tasks mostly target at the complete input modality setting, | ||
i.e., each modality is either complete or completely missing in both training and | ||
test sets. However, the randomly missing situations have still been underexplored. | ||
In this paper, we present a novel approach named MM-Align to address the missing-modality | ||
inference problem. Concretely, we propose 1) an alignment dynamics learning module | ||
based on the theory of optimal transport (OT) for missing data imputation; 2) a | ||
denoising training algorithm to enhance the quality of imputation as well as the | ||
accuracy of model predictions. Compared with previous generative methods which devote | ||
to restoring the missing inputs, MM-Align learns to capture and imitate the alignment | ||
dynamics between modality sequences. Results of comprehensive experiments on two | ||
multimodal tasks empirically demonstrate that our method can perform more accurate | ||
and faster inference and alleviate the overfitting issue under different missing | ||
conditions. | ||
links: | ||
- name: URL | ||
url: https://aclanthology.org/2022.emnlp-main.717 | ||
--- |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,18 @@ | ||
@inproceedings{han-etal-2024-self, | ||
abstract = {Image--text models (ITMs) is the prevalent architecture to solve video question--answering tasks, which requires only a few input frames to save huge computational cost compared to video--language models.However, we find existent ITM video question--answering solutions either 1) adopt simplistic and unintentional sampling strategies, which may miss key frames to offer the answer clues; or 2) sample a large number of frames into divided groups, which the computational sources can not accommodate. In this work, we aim at an efficient sampling method towards the few-frame situations.We first summarize a family of prior sampling methods based on question--frame correlation into a unified one, dubbed *Most Implied Frames* (MIF). Through some primary results and analysis, Through analysis, we form a hypothesis that question-aware sampling is not necessary, from which we further propose the other method *Most Dominant Frames* (MDF).Experimental results on four public datasets and three advanced ITMs demonstrate that our proposed strategies can boost the performance for image--text pretrained models, and have a wide application scenario in terms of model architectures and dataset types. Our code is available at https://github.com/declare-lab/Sealingr̆lhttps://github.com/declare-lab/Sealing.}, | ||
address = {Mexico City, Mexico}, | ||
author = {Han, Wei and | ||
Chen, Hui and | ||
Kan, Min-Yen and | ||
Poria, Soujanya}, | ||
booktitle = {Findings of the Association for Computational Linguistics: NAACL 2024}, | ||
editor = {Duh, Kevin and | ||
Gomez, Helena and | ||
Bethard, Steven}, | ||
month = {June}, | ||
pages = {2522--2534}, | ||
publisher = {Association for Computational Linguistics}, | ||
title = {Self-Adaptive Sampling for Accurate Video Question Answering on Image Text Models}, | ||
url = {https://aclanthology.org/2024.findings-naacl.162}, | ||
year = {2024} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
--- | ||
title: Self-Adaptive Sampling for Accurate Video Question Answering on Image Text | ||
Models | ||
authors: | ||
- Wei Han | ||
- Hui Chen | ||
- Min-Yen Kan | ||
- Soujanya Poria | ||
date: '2024-06-01' | ||
publishDate: '2024-07-05T17:09:42.578623Z' | ||
publication_types: | ||
- paper-conference | ||
publication: '*Findings of the Association for Computational Linguistics: NAACL 2024*' | ||
abstract: Image--text models (ITMs) is the prevalent architecture to solve video question--answering | ||
tasks, which requires only a few input frames to save huge computational cost compared | ||
to video--language models.However, we find existent ITM video question--answering | ||
solutions either 1) adopt simplistic and unintentional sampling strategies, which | ||
may miss key frames to offer the answer clues; or 2) sample a large number of frames | ||
into divided groups, which the computational sources can not accommodate. In this | ||
work, we aim at an efficient sampling method towards the few-frame situations.We | ||
first summarize a family of prior sampling methods based on question--frame correlation | ||
into a unified one, dubbed *Most Implied Frames* (MIF). Through some primary results | ||
and analysis, Through analysis, we form a hypothesis that question-aware sampling | ||
is not necessary, from which we further propose the other method *Most Dominant | ||
Frames* (MDF).Experimental results on four public datasets and three advanced ITMs | ||
demonstrate that our proposed strategies can boost the performance for image--text | ||
pretrained models, and have a wide application scenario in terms of model architectures | ||
and dataset types. Our code is available at https://github.com/declare-lab/Sealingr̆lhttps://github.com/declare-lab/Sealing. | ||
links: | ||
- name: URL | ||
url: https://aclanthology.org/2024.findings-naacl.162 | ||
--- |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,21 @@ | ||
@inproceedings{jain-etal-2022-comparative, | ||
abstract = {We model products′ reviews to generate comparative responses consisting of positive and negative experiences regarding the product. Specifically, we generate a single-sentence, comparative response from a given positive and a negative opinion. We contribute the first dataset for this task of Comparative Snippet Generation from contrasting opinions regarding a product, and an analysis of performance of a pre-trained BERT model to generate such snippets.}, | ||
address = {Dublin, Ireland}, | ||
author = {Jain, Saurabh and | ||
Miao, Yisong and | ||
Kan, Min-Yen}, | ||
booktitle = {Proceedings of the Fifth Workshop on e-Commerce and NLP (ECNLP 5)}, | ||
doi = {10.18653/v1/2022.ecnlp-1.7}, | ||
editor = {Malmasi, Shervin and | ||
Rokhlenko, Oleg and | ||
Ueffing, Nicola and | ||
Guy, Ido and | ||
Agichtein, Eugene and | ||
Kallumadi, Surya}, | ||
month = {May}, | ||
pages = {49--57}, | ||
publisher = {Association for Computational Linguistics}, | ||
title = {Comparative Snippet Generation}, | ||
url = {https://aclanthology.org/2022.ecnlp-1.7}, | ||
year = {2022} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,22 @@ | ||
--- | ||
title: Comparative Snippet Generation | ||
authors: | ||
- Saurabh Jain | ||
- Yisong Miao | ||
- Min-Yen Kan | ||
date: '2022-05-01' | ||
publishDate: '2024-07-05T17:09:42.617512Z' | ||
publication_types: | ||
- paper-conference | ||
publication: '*Proceedings of the Fifth Workshop on e-Commerce and NLP (ECNLP 5)*' | ||
doi: 10.18653/v1/2022.ecnlp-1.7 | ||
abstract: We model products′ reviews to generate comparative responses consisting | ||
of positive and negative experiences regarding the product. Specifically, we generate | ||
a single-sentence, comparative response from a given positive and a negative opinion. | ||
We contribute the first dataset for this task of Comparative Snippet Generation | ||
from contrasting opinions regarding a product, and an analysis of performance of | ||
a pre-trained BERT model to generate such snippets. | ||
links: | ||
- name: URL | ||
url: https://aclanthology.org/2022.ecnlp-1.7 | ||
--- |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,22 @@ | ||
@inproceedings{qin-etal-2022-gl, | ||
abstract = {Due to high data demands of current methods, attention to zero-shot cross-lingual spoken language understanding (SLU) has grown, as such approaches greatly reduce human annotation effort. However, existing models solely rely on shared parameters, which can only perform implicit alignment across languages. We present Global-Local Contrastive Learning Framework (GL-CLeF) to address this shortcoming. Specifically, we employ contrastive learning, leveraging bilingual dictionaries to construct multilingual views of the same utterance, then encourage their representations to be more similar than negative example pairs, which achieves to explicitly align representations of similar sentences across languages. In addition, a key step in GL-CLeF is a proposed Local and Global component, which achieves a fine-grained cross-lingual transfer (i.e., sentence-level Local intent transfer, token-level Local slot transfer, and semantic-level Global transfer across intent and slot). Experiments on MultiATIS++ show that GL-CLeF achieves the best performance and successfully pulls representations of similar sentences across languages closer.}, | ||
address = {Dublin, Ireland}, | ||
author = {Qin, Libo and | ||
Chen, Qiguang and | ||
Xie, Tianbao and | ||
Li, Qixin and | ||
Lou, Jian-Guang and | ||
Che, Wanxiang and | ||
Kan, Min-Yen}, | ||
booktitle = {Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, | ||
doi = {10.18653/v1/2022.acl-long.191}, | ||
editor = {Muresan, Smaranda and | ||
Nakov, Preslav and | ||
Villavicencio, Aline}, | ||
month = {May}, | ||
pages = {2677--2686}, | ||
publisher = {Association for Computational Linguistics}, | ||
title = {GL-CLeF: A Global--Local Contrastive Learning Framework for Cross-lingual Spoken Language Understanding}, | ||
url = {https://aclanthology.org/2022.acl-long.191}, | ||
year = {2022} | ||
} |
Oops, something went wrong.