@inproceedings{42114767130348baa703382829eb7bfb,
title = "Research on online reinforcement learning method based on experience-replay",
abstract = "As for standard reinforcement learning, the key is that the agent's next step is directed by the instantaneous and delayed reporting from constant interaction with the environment and trial and error learning. But it makes the convergence rate slower for actual reinforcement learning; at the same time, inconsistency state will occur in the agent learning process. Therefore, it is necessary for the agent to remember what has been learned within the time specified to improve the convergence and robustness of decision making. With regard to the above-mentioned issues, this paper proposes to accelerate the convergence rate of reinforcement learning by using the function approximation ability of neural network and to improve the robustness of reinforcement learning by using the Memory-based Experience-Replay(ER) algorithm. The experimental results show the effectiveness of the proposed method.",
keywords = "Experience-Replay, Neural Network, Reinforcement Learning",
author = "Ning Hu and Zhijun Ge and Xuanwen Chen and Chunguang Ding and Haobin Shi",
note = "Publisher Copyright: {\textcopyright} 2018 IEEE.; 2018 IEEE International Conference on Information and Automation, ICIA 2018 ; Conference date: 11-08-2018 Through 13-08-2018",
year = "2018",
month = aug,
doi = "10.1109/ICInfA.2018.8812454",
language = "英语",
series = "2018 IEEE International Conference on Information and Automation, ICIA 2018",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "1338--1343",
booktitle = "2018 IEEE International Conference on Information and Automation, ICIA 2018",
}