@inproceedings{fd52e2d49a7c441b93aefa557398d2f3,
title = "A deep reinforcement learning method based on attentional memories",
abstract = "In continuous visual decision-making scenarios, the environment is often partially observable rather than globally observable, and traditional deep reinforcement learning methods are often unable to effectively learn to discover hidden information in partially observable environments. In this work, a continuous visual decision-making algorithm AMU-DQN for partially observable environments is proposed by combining the proposed attentional memories unit(AMU) which can integrate temporal and spatial features in historical sequences and deep Q-network. Due to the special mechanism of the AMU recurrent layer, the AMU-DQN algorithm has the ability to learn favorable hidden information from the historical observation information sequence in a partially observable environment, forming a special attention and memory ability. Extensive simulation experiments show that AMU-DQN exhibits super-high performance in both convergence speed and reward convergence peak.",
keywords = "Attention Mechanism, Decision-making, Recurrent Neural Network, Reinforcement learning",
author = "Libin Sun and Gao Biao and Haobin Shi",
note = "Publisher Copyright: {\textcopyright} 2022 IEEE.; 2022 International Conference on Computer Engineering and Artificial Intelligence, ICCEAI 2022 ; Conference date: 22-07-2022 Through 24-07-2022",
year = "2022",
doi = "10.1109/ICCEAI55464.2022.00108",
language = "英语",
series = "Proceedings - 2022 International Conference on Computer Engineering and Artificial Intelligence, ICCEAI 2022",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "492--497",
editor = "Pan Lin and Yong Yang",
booktitle = "Proceedings - 2022 International Conference on Computer Engineering and Artificial Intelligence, ICCEAI 2022",
}