@inproceedings{478fba4caf394009add9aed950535da8,
title = "Transformer Based Visual Inertial Odometry",
abstract = "Visual inertial odometry (VIO) is a sensor fusion technology used for positioning and navigation. It combines visual sensor and inertial sensor information to estimate the movement and location of the UAV in real time. In recent years deep learning based approaches VIO have shown outstanding performance than traditional geometric methods. However, VIO tasks usually need to capture long-distance feature dependencies to ensure the continuity and consistency of camera motion trajectories in time series. In this study, we introduce a new end to end transformer based VIO framework, named VIO-former, to enable the model to better understand motion features in video sequences. Comprehensive quantitative and qualitative evaluation is conducted on KITTI datasets to test our method. The experimental results shows that our approach can achieve superior performance when compared with the existing methods.",
keywords = "Sensor fusion, Transformer, Visual inertial odometry",
author = "Sicheng Fei and Jingfeng Li and Lei Li and Jie Liang and Jinwen Hu and Dingwen Zhang and Junwei Han",
note = "Publisher Copyright: {\textcopyright} The Author(s), under exclusive license to Springer Nature Singapore Pte Ltd. 2025.; International Conference on Guidance, Navigation and Control, ICGNC 2024 ; Conference date: 09-08-2024 Through 11-08-2024",
year = "2025",
doi = "10.1007/978-981-96-2264-1_54",
language = "英语",
isbn = "9789819622634",
series = "Lecture Notes in Electrical Engineering",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "567--575",
editor = "Liang Yan and Haibin Duan and Yimin Deng",
booktitle = "Advances in Guidance, Navigation and Control - Proceedings of 2024 International Conference on Guidance, Navigation and Control Volume 17",
}