% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@INPROCEEDINGS{Palm:277812,
author = {Hiller, Bjarne C. and Bader, Sebastian and Singh, Devesh
and Kirste, Thomas and Becker, Martin and Dyrba, Martin},
editor = {Palm, Christoph and Breininger, Katharina and Deserno,
Thomas and Handels, Heinz and Maier, Andreas and Maier-Hein,
Klaus H. and Tolxdorff, Thomas M.},
title = {{E}valuating the {F}idelity of {E}xplanations for
{C}onvolutional {N}eural {N}etworks in {A}lzheimer’s
{D}isease {D}etection},
address = {Wiesbaden},
publisher = {Springer Fachmedien Wiesbaden},
reportid = {DZNE-2025-00484},
isbn = {978-3-658-47421-8 (print)},
series = {Informatik aktuell},
pages = {76 - 81},
year = {2025},
comment = {Bildverarbeitung für die Medizin 2025 / Palm, Christoph
(Editor) [https://orcid.org/0000-0001-9468-2871] ; Wiesbaden
: Springer Fachmedien Wiesbaden, 2025, Chapter 18 ; ISSN:
1431-472X=2628-8958 ; ISBN:
978-3-658-47421-8=978-3-658-47422-5 ;
doi:10.1007/978-3-658-47422-5},
booktitle = {Bildverarbeitung für die Medizin 2025
/ Palm, Christoph (Editor)
[https://orcid.org/0000-0001-9468-2871]
; Wiesbaden : Springer Fachmedien
Wiesbaden, 2025, Chapter 18 ; ISSN:
1431-472X=2628-8958 ; ISBN:
978-3-658-47421-8=978-3-658-47422-5 ;
doi:10.1007/978-3-658-47422-5},
abstract = {The black-box nature of deep learning still prevents its
widespread clinical use due to the high risk of hidden
biases and prediction errors. Over the last decade, various
explanation methods have been proposed to reveal the latent
mechanisms of neural networks and support their decisions.
However, interpreting the explanations themselves can be
challenging, and there is still little consensus on how to
evaluate the quality of explanations. To investigate the
fidelity of explanations provided by prominent feature
attribution methods for Convolutional Neural Networks in
Alzheimer’s Disease (AD) detection, this paper applies
relevance-guided perturbation to the Magnetic Resonance
Imaging (MRI) input images. According to the fidelity
metric, the AD class probability showed the steepest decline
when the perturbation was guided by Integrated Gradients or
DeepLift. We conclude by highlighting the role of the
reference image in feature attribution with regard to AD
detection from MRI images. The source code for the
experiments is publicly available on GitHub at
https://github.com/bckrlab/ad-fidelity.},
month = {Mar},
date = {2025-03-09},
organization = {German Conference on Medical Image
Computing, Regensburg (Germany), 9 Mar
2025 - 11 Mar 2025},
cin = {AG Teipel},
cid = {I:(DE-2719)1510100},
pnm = {353 - Clinical and Health Care Research (POF4-353)},
pid = {G:(DE-HGF)POF4-353},
typ = {PUB:(DE-HGF)8 / PUB:(DE-HGF)7},
doi = {10.1007/978-3-658-47422-5_18},
url = {https://pub.dzne.de/record/277812},
}