@inproceedings{2bf284b76b3d4a79bac26598271789ba,
title = "Autonomous navigation method of robot in dynamic environment based on proximal policy optimization",
abstract = "In response to the problem that traditional navigation algorithms have poor adaptability when dealing with complex working environments of robots, this paper studies the robot autonomous navigation method based on deep reinforcement learning (DRL). Compared with traditional navigation methods, the proposed method is more robust and more adaptable to dynamic environments. Firstly, the speed and position of the possible obstacle behavior patterns in the robot working scene are analyzed, and the obstacle model is constructed by demarcating the dangerous area. Secondly, the proximal policy optimization (PPO) is used to design a reward function based on the obstacle dangerous area to define the robot's goal, and the robot is trained to continuously interact and learn with the complex dynamic simulation environment to obtain the maximum reward. Finally, the optimal strategy for path planning in complex environments is found to achieve autonomous navigation of the robot in complex environments. Experimental tests show that the success rate of fixed target navigation reached 100\%, and the success rate of random target navigation reached 70.6\%.",
keywords = "autonomous navigation, DRL, path planning, PPO",
author = "Yifan Cheng and Bing Xiao and Hanyu Qian",
note = "Publisher Copyright: Copyright {\textcopyright} 2025 SPIE.; 2nd International Conference on Power Electronics and Artificial Intelligence, PEAI 2025 ; Conference date: 17-01-2025 Through 19-01-2025",
year = "2025",
doi = "10.1117/12.3066769",
language = "英语",
series = "Proceedings of SPIE - The International Society for Optical Engineering",
publisher = "SPIE",
editor = "Qiang Yang and Mahalle, \{Parikshit N.\} and Xuehe Wang",
booktitle = "Second International Conference on Power Electronics and Artificial Intelligence, PEAI 2025",
}