@inproceedings{3fa93f69415d49c4bb0de13eae198762,
title = "DTRANSGAN: DEBLURRING TRANSFORMER BASED ON GENERATIVE ADVERSARIAL NETWORK",
abstract = "Motion deblurring is challenging due to the fast movements of the object or the camera itself. Existing methods usually try to liberate it by training CNN model or Generative Adversarial Networks(GAN). However, their methods can't restore the details very well. In this paper, a Deblurring Transformer based on Generative Adversarial Network(DTransGAN) is proposed to improve the deblurring performance of the vehicles under the surveillance camera scene. The proposed DTransGAN combines the low-level information and the high-level information through skip connection, which saves the original information of the image as much as possible to restore the details. Besides, we replace the convolution layer in the generator with the swin transformer block, which could pay more attention to the reconstruction of details. Finally, we create the vehicle motion blur dataset. It contains two parts, namely the clear image and the corresponding blurry image. Experiments on public datasets and the collected dataset report that DTransGAN achieves the state-of-the-art for motion deblurring task.",
keywords = "Motion deblurring, skip connection, transformer",
author = "Kai Zhuang and Yuan Yuan and Qi Wang",
note = "Publisher Copyright: {\textcopyright} 2022 IEEE.; 29th IEEE International Conference on Image Processing, ICIP 2022 ; Conference date: 16-10-2022 Through 19-10-2022",
year = "2022",
doi = "10.1109/ICIP46576.2022.9897956",
language = "英语",
series = "Proceedings - International Conference on Image Processing, ICIP",
publisher = "IEEE Computer Society",
pages = "701--705",
booktitle = "2022 IEEE International Conference on Image Processing, ICIP 2022 - Proceedings",
}