@inproceedings{498105a6b36b4e5abfca770ca9349af7,
title = "Interpreting Deep Neural Networks through Model Transformation: Literature Review",
abstract = "Machine learning especially deep learning models have achieved state-of-the-art performances in many fields such as automatic driving, speech recognition, facial expression recognition and so on. However, these models are usually less interpretable which means that it is hard for people to understand and trust the decisions they made. This paper focuses on improving interpretability of deep neural network (DNN) through model transformation, in which the behaviors of DNNs are approximated by transparent models, such as decision tree or rules. We provide a comprehensive literature review for model transformation methods from different aspects, including type of interpretable models, structure of model transformation, and type of model transformation. The characteristics and perspectives of the model transformation approach are also explored.",
keywords = "Decision Tree, Deep Neural Network, Interpretability, Model Transformation, Rules",
author = "Meixia Feng and Lianmeng Jiao and Quan Pan",
note = "Publisher Copyright: {\textcopyright} 2022 Technical Committee on Control Theory, Chinese Association of Automation.; 41st Chinese Control Conference, CCC 2022 ; Conference date: 25-07-2022 Through 27-07-2022",
year = "2022",
doi = "10.23919/CCC55666.2022.9902421",
language = "英语",
series = "Chinese Control Conference, CCC",
publisher = "IEEE Computer Society",
pages = "7211--7216",
editor = "Zhijun Li and Jian Sun",
booktitle = "Proceedings of the 41st Chinese Control Conference, CCC 2022",
}