% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Chatterjee:268456,
      author       = {Chatterjee, Soumick and Saad, Fatima and Sarasaen,
                      Chompunuch and Ghosh, Suhita and Krug, Valerie and Khatun,
                      Rupali and Mishra, Rahul and Desai, Nirja and Radeva, Petia
                      and Rose, Georg and Stober, Sebastian and Speck, Oliver and
                      Nürnberger, Andreas},
      title        = {{E}xploration of {I}nterpretability {T}echniques for {D}eep
                      {COVID}-19 {C}lassification {U}sing {C}hest {X}-ray
                      {I}mages.},
      journal      = {Journal of imaging},
      volume       = {10},
      number       = {2},
      issn         = {2313-433X},
      address      = {Basel},
      publisher    = {MDPI},
      reportid     = {DZNE-2024-00208},
      pages        = {45},
      year         = {2024},
      abstract     = {The outbreak of COVID-19 has shocked the entire world with
                      its fairly rapid spread, and has challenged different
                      sectors. One of the most effective ways to limit its spread
                      is the early and accurate diagnosing of infected patients.
                      Medical imaging, such as X-ray and computed tomography (CT),
                      combined with the potential of artificial intelligence (AI),
                      plays an essential role in supporting medical personnel in
                      the diagnosis process. Thus, in this article, five different
                      deep learning models (ResNet18, ResNet34, InceptionV3,
                      InceptionResNetV2, and DenseNet161) and their ensemble,
                      using majority voting, have been used to classify COVID-19,
                      pneumoniæ and healthy subjects using chest X-ray images.
                      Multilabel classification was performed to predict multiple
                      pathologies for each patient, if present. Firstly, the
                      interpretability of each of the networks was thoroughly
                      studied using local interpretability methods-occlusion,
                      saliency, input X gradient, guided backpropagation,
                      integrated gradients, and DeepLIFT-and using a global
                      technique-neuron activation profiles. The mean micro F1
                      score of the models for COVID-19 classifications ranged from
                      0.66 to 0.875, and was 0.89 for the ensemble of the network
                      models. The qualitative results showed that the ResNets were
                      the most interpretable models. This research demonstrates
                      the importance of using interpretability methods to compare
                      different models before making a decision regarding the best
                      performing model.},
      keywords     = {COVID-19 (Other) / chest X-ray (Other) / deep learning
                      (Other) / interpretability analysis (Other) / model ensemble
                      (Other) / multilabel image classification (Other) /
                      pneumonia (Other)},
      cin          = {AG Speck},
      ddc          = {004},
      cid          = {I:(DE-2719)1340009},
      pnm          = {353 - Clinical and Health Care Research (POF4-353)},
      pid          = {G:(DE-HGF)POF4-353},
      typ          = {PUB:(DE-HGF)16},
      pubmed       = {pmid:38392093},
      pmc          = {pmc:PMC10889835},
      doi          = {10.3390/jimaging10020045},
      url          = {https://pub.dzne.de/record/268456},
}