% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Chatterjee:286092,
      author       = {Chatterjee, Soumick and Yassin, Hadya and Dubost, Florian
                      and Nürnberger, Andreas and Speck, Oliver},
      title        = {{W}eakly-supervised segmentation using
                      inherently-explainable classification models and their
                      application to brain tumour classification},
      journal      = {Neurocomputing},
      volume       = {682},
      issn         = {0925-2312},
      address      = {Amsterdam},
      publisher    = {Elsevier},
      reportid     = {DZNE-2026-00388},
      pages        = {133460},
      year         = {2026},
      abstract     = {Deep learning has demonstrated significant potential in
                      medical imaging; however, the opacity of “black-box”
                      models hinders clinical trust, while segmentation tasks
                      typically necessitate laborious, hard-to-obtain pixel-wise
                      annotations. To address these challenges simultaneously,
                      this paper introduces a framework for three inherently
                      explainable classifiers (GP-UNet, GP-ShuffleUNet, and
                      GP-ReconResNet). By integrating a global pooling mechanism,
                      these networks generate localisation heatmaps that directly
                      influence classification decisions, offering inherent
                      interpretability without relying on potentially unreliable
                      post-hoc methods. These heatmaps are subsequently
                      thresholded to achieve weakly-supervised segmentation,
                      requiring only image-level classification labels for
                      training. Validated on two datasets for multi-class brain
                      tumour classification, the proposed models achieved a peak
                      F1-score of 0.93. For the weakly-supervised segmentation
                      task, a median Dice score of 0.728 $(95\%$ CI:
                      0.715–0.739) was recorded. Notably, on a subset of
                      tumour-only images, the best model achieved an accuracy of
                      $98.7\%,$ outperforming state-of-the-art glioma grading
                      binary classifiers. Furthermore, comparative
                      Precision-Recall analysis validated the framework’s
                      robustness against severe class imbalance, establishing a
                      direct correlation between diagnostic confidence and
                      segmentation fidelity. These results demonstrate that the
                      proposed framework successfully combines high diagnostic
                      accuracy with essential transparency, offering a promising
                      direction for trustworthy clinical decision support.},
      cin          = {AG Speck},
      ddc          = {610},
      cid          = {I:(DE-2719)1340009},
      pnm          = {353 - Clinical and Health Care Research (POF4-353)},
      pid          = {G:(DE-HGF)POF4-353},
      typ          = {PUB:(DE-HGF)16},
      doi          = {10.1016/j.neucom.2026.133460},
      url          = {https://pub.dzne.de/record/286092},
}