% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Arco:285030,
      author       = {Arco, Juan E. and Jimenez-Mesa, Carmen and Ortiz, Andrés
                      and Ramírez, Javier and Levin, Johannes and Górriz, Juan
                      M.},
      title        = {{E}xplainable {I}ntermodality {M}edical {I}nformation
                      {T}ransfer {U}sing {S}iamese {A}utoencoders},
      journal      = {IEEE transactions on radiation and plasma medical sciences},
      volume       = {10},
      number       = {2},
      issn         = {2469-7311},
      address      = {New York, NY},
      publisher    = {IEEE},
      reportid     = {DZNE-2026-00155},
      pages        = {192 - 209},
      year         = {2026},
      abstract     = {Medical imaging fusion combines complementary information
                      from multiple modalities to enhance diagnostic accuracy.
                      However, evaluating the quality of fused images remains
                      challenging, with many studies relying solely on
                      classification performance, which may lead to incorrect
                      conclusions. We introduce a novel framework for improving
                      image fusion, focusing on preserving fine-grained details.
                      Our model uses a siamese autoencoder to process T1-MRI and
                      FDG-PET images in the context of Alzheimer’s disease (AD).
                      The framework optimizes fusion by minimizing reconstruction
                      error between generated and input images, while maximizing
                      differences between modalities through cosine distance.
                      Additionally, we propose a supervised variant, incorporating
                      binary cross-entropy loss between diagnostic labels and
                      probabilities. Fusion quality is rigorously assessed through
                      three tests: 1) classification of AD patients and controls
                      using fused images; 2) an atlas-based occlusion test for
                      identifying regions relevant to cognitive decline; and 3)
                      analysis of structural–functional relationships via
                      Euclidean distance. Results show an AUC of 0.92 for AD
                      detection, reveal the involvement of brain regions linked to
                      preclinical AD stages, and demonstrate preserved
                      structural–functional brain networks, indicating that
                      subtle differences are successfully captured through our
                      fusion approach.},
      cin          = {Clinical Research (Munich) / AG Levin},
      ddc          = {624},
      cid          = {I:(DE-2719)1111015 / I:(DE-2719)1111016},
      pnm          = {353 - Clinical and Health Care Research (POF4-353)},
      pid          = {G:(DE-HGF)POF4-353},
      typ          = {PUB:(DE-HGF)16},
      doi          = {10.1109/TRPMS.2025.3577309},
      url          = {https://pub.dzne.de/record/285030},
}