2018
|
Valor Miró, Juan Daniel ; Baquero-Arnal, Pau; Civera, Jorge; Turró, Carlos; Juan, Alfons Multilingual videos for MOOCs and OER Journal Article Journal of Educational Technology & Society, 21 (2), pp. 1–12, 2018. Abstract | Links | BibTeX | Tags: Machine Translation, MOOCs, multilingual, Speech Recognition, video lecture repositories @article{Miró2018,
title = {Multilingual videos for MOOCs and OER},
author = {Valor Miró, Juan Daniel and Pau Baquero-Arnal and Jorge Civera and Carlos Turró and Alfons Juan},
url = {https://www.mllp.upv.es/wp-content/uploads/2019/11/JETS2018MLLP.pdf
http://hdl.handle.net/10251/122577
https://www.jstor.org/stable/26388375
https://www.j-ets.net/collection/published-issues/21_2},
year = {2018},
date = {2018-01-01},
journal = {Journal of Educational Technology & Society},
volume = {21},
number = {2},
pages = {1--12},
abstract = {Massive Open Online Courses (MOOCs) and Open Educational Resources (OER) are rapidly growing, but are not usually offered in multiple languages due to the lack of cost-effective solutions to translate the different objects comprising them and particularly videos. However, current state-of-the-art automatic speech recognition (ASR) and machine translation (MT) techniques have reached a level of maturity which opens the possibility of producing multilingual video subtitles of publishable quality at low cost. This work summarizes authors' experience in exploring this possibility in two real-life case studies: a MOOC platform and a large video lecture repository. Apart from describing the systems, tools and integration components employed for such purpose, a comprehensive evaluation of the results achieved is provided in terms of quality and efficiency. More precisely, it is shown that draft multilingual subtitles produced by domain-adapted ASR/MT systems reach a level of accuracy that make them worth post-editing, instead of generating them ex novo, saving approximately 25%–75% of the time. Finally, the results reported on user multilingual data consumption reflect that multilingual subtitles have had a very positive impact in our case studies boosting student enrolment, in the case of the MOOC platform, by 70% relative.},
keywords = {Machine Translation, MOOCs, multilingual, Speech Recognition, video lecture repositories},
pubstate = {published},
tppubtype = {article}
}
Massive Open Online Courses (MOOCs) and Open Educational Resources (OER) are rapidly growing, but are not usually offered in multiple languages due to the lack of cost-effective solutions to translate the different objects comprising them and particularly videos. However, current state-of-the-art automatic speech recognition (ASR) and machine translation (MT) techniques have reached a level of maturity which opens the possibility of producing multilingual video subtitles of publishable quality at low cost. This work summarizes authors' experience in exploring this possibility in two real-life case studies: a MOOC platform and a large video lecture repository. Apart from describing the systems, tools and integration components employed for such purpose, a comprehensive evaluation of the results achieved is provided in terms of quality and efficiency. More precisely, it is shown that draft multilingual subtitles produced by domain-adapted ASR/MT systems reach a level of accuracy that make them worth post-editing, instead of generating them ex novo, saving approximately 25%–75% of the time. Finally, the results reported on user multilingual data consumption reflect that multilingual subtitles have had a very positive impact in our case studies boosting student enrolment, in the case of the MOOC platform, by 70% relative. |
2016
|
del-Agua, Miguel Ángel; Piqueras, Santiago; Giménez, Adrià; Sanchis, Alberto; Civera, Jorge; Juan, Alfons ASR Confidence Estimation with Speaker-Adapted Recurrent Neural Networks Inproceedings Proc. of the 17th Annual Conf. of the ISCA (Interspeech 2016), pp. 3464–3468, San Francisco (USA), 2016. Abstract | Links | BibTeX | Tags: BLSTM, Confidence measures, Recurrent Neural Networks, Speaker adaptation, Speech Recognition @inproceedings{del-Agua2016,
title = {ASR Confidence Estimation with Speaker-Adapted Recurrent Neural Networks},
author = {Miguel Ángel del-Agua and Santiago Piqueras and Adrià Giménez and Alberto Sanchis and Jorge Civera and Alfons Juan},
doi = {10.21437/Interspeech.2016-1142},
year = {2016},
date = {2016-09-08},
booktitle = {Proc. of the 17th Annual Conf. of the ISCA (Interspeech 2016)},
pages = {3464--3468},
address = {San Francisco (USA)},
abstract = {Confidence estimation for automatic speech recognition has been very recently improved by using Recurrent Neural Networks (RNNs), and also by speaker adaptation (on the basis of Conditional Random Fields). In this work, we explore how to obtain further improvements by combining RNNs and speaker adaptation. In particular, we explore different speaker-dependent and -independent data representations for Bidirectional Long Short Term Memory RNNs of various topologies. Empirical tests are reported on the LibriSpeech dataset, showing that the best results are achieved by the proposed combination of RNNs and speaker adaptation.},
keywords = {BLSTM, Confidence measures, Recurrent Neural Networks, Speaker adaptation, Speech Recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
Confidence estimation for automatic speech recognition has been very recently improved by using Recurrent Neural Networks (RNNs), and also by speaker adaptation (on the basis of Conditional Random Fields). In this work, we explore how to obtain further improvements by combining RNNs and speaker adaptation. In particular, we explore different speaker-dependent and -independent data representations for Bidirectional Long Short Term Memory RNNs of various topologies. Empirical tests are reported on the LibriSpeech dataset, showing that the best results are achieved by the proposed combination of RNNs and speaker adaptation. |
2012
|
Valor Miró, Juan Daniel ; Pérez González de Martos, Alejandro ; Civera, Jorge ; Juan, Alfons Integrating a State-of-the-Art ASR System into the Opencast Matterhorn Platform Incollection Advances in Speech and Language Technologies for Iberian Languages (IberSpeech 2012), 328 , pp. 237-246, Springer Berlin Heidelberg, 2012, ISBN: 978-3-642-35291-1, (doi: 10.1007/978-3-642-35292-8_20). Links | BibTeX | Tags: Google N-Gram, Language Modeling, Linear Combination, Opencast Matterhorn, Speech Recognition @incollection{Valor2012,
title = {Integrating a State-of-the-Art ASR System into the Opencast Matterhorn Platform},
author = {Valor Miró, Juan Daniel and Pérez González de Martos, Alejandro and Civera, Jorge and Juan, Alfons},
url = {http://hdl.handle.net/10251/35190
http://www.mllp.upv.es/wp-content/uploads/2015/04/paper2.pdf},
isbn = {978-3-642-35291-1},
year = {2012},
date = {2012-01-01},
booktitle = {Advances in Speech and Language Technologies for Iberian Languages (IberSpeech 2012)},
volume = {328},
pages = {237-246},
publisher = {Springer Berlin Heidelberg},
series = {Communications in Computer and Information Science},
note = {doi: 10.1007/978-3-642-35292-8_20},
keywords = {Google N-Gram, Language Modeling, Linear Combination, Opencast Matterhorn, Speech Recognition},
pubstate = {published},
tppubtype = {incollection}
}
|