@inproceedings{df09736a928e43adbfa55d656c79838a,
title = "CST: Automatic Modulation Recognition Method by Convolution Transformer on Temporal Continuity Features",
abstract = "With the rapid development of deep learning (DL) in recent years, automatic modulation recognition (AMR) with DL has achieved high accuracy. However, insufficient training signal data in complicated channel environments is critical factors that make DL methods difficult to deploy in practice. Aiming to these problems, we propose a novel neural network named convolution signal transformer (CST). The CST is accomplished through three primary modifications: a hierarchy of transformer containing convolution, a novel signal-specific self-attention mechanism to replace the multi-headed self-attention mechanism in Transformer, and a novel convolutional transformer block named convolution-transformer projection (CTP) to leverage a convolutional projection. The simulation results demonstrate that the CST outperforms advanced neural networks on all datasets, which is very beneficial for the deployment of AMR in complicated channel environments.",
keywords = "automatic modulation recognition, few-shot learning, transformer",
author = "Dongbin Hou and Lixin Li and Wensheng Lin and Wei Liang and Zhu Han",
note = "Publisher Copyright: {\textcopyright} 2023 IEEE.; 2023 IEEE Global Communications Conference, GLOBECOM 2023 ; Conference date: 04-12-2023 Through 08-12-2023",
year = "2023",
doi = "10.1109/GLOBECOM54140.2023.10437217",
language = "英语",
series = "Proceedings - IEEE Global Communications Conference, GLOBECOM",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "6091--6096",
booktitle = "GLOBECOM 2023 - 2023 IEEE Global Communications Conference",
}