@inproceedings{09f1c3f0864d4fa9923a0486d1e10549,
title = "Improving mandarin end-To-end speech synthesis by self-Attention and learnable gaussian bias",
abstract = "Compared to conventional speech synthesis, end-To-end speech synthesis has achieved much better naturalness with more simplified system building pipeline. End-To-end framework can generate natural speech directly from characters for English. But for other languages like Chinese, recent studies have indicated that extra engineering features are still needed for model robustness and naturalness, e.g, word boundaries and prosody boundaries, which makes the front-end pipeline as complicated as the traditional approach. To maintain the naturalness of generated speech and discard language-specific expertise as much as possible, in Mandarin TTS, we introduce a novel self-Attention based encoder with learnable Gaussian bias in Tacotron. We evaluate different systems with and without complex prosody information and results show that the proposed approach has the ability to generate stable and natural speech with minimum language-dependent front-end modules.",
keywords = "end-To-end, Gaussian bias, self-Attention, speech synthesis, Tacotron",
author = "Fengyu Yang and Shan Yang and Pengcheng Zhu and Pengju Yan and Lei Xie",
note = "Publisher Copyright: {\textcopyright} 2019 IEEE.; 2019 IEEE Automatic Speech Recognition and Understanding Workshop, ASRU 2019 ; Conference date: 15-12-2019 Through 18-12-2019",
year = "2019",
month = dec,
doi = "10.1109/ASRU46091.2019.9003949",
language = "英语",
series = "2019 IEEE Automatic Speech Recognition and Understanding Workshop, ASRU 2019 - Proceedings",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "208--213",
booktitle = "2019 IEEE Automatic Speech Recognition and Understanding Workshop, ASRU 2019 - Proceedings",
}