% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@INPROCEEDINGS{Gicquel:283099,
      author       = {Gicquel, Malo and Flood, Gabrielle and Zhao, Ruoyi and
                      Wuestefeld, Anika and Spotorno, Nicola and Strandberg, Olof
                      and Xiao, Yu and Åström, Kalle and Wisse, Laura E M and
                      van Westen, Danielle and Berron, David and Hansson, Oskar
                      and Vogel, Jacob W},
      title        = {{AI} {S}uperresolution: {C}onverting {T}1‐weighted {MRI}
                      from 3{T} to 7{T} resolution toward enhanced imaging
                      biomarkers for {A}lzheimer’s disease},
      journal      = {Alzheimer's and dementia},
      volume       = {21},
      number       = {Suppl 8},
      issn         = {1552-5260},
      reportid     = {DZNE-2025-01506},
      pages        = {e109817},
      year         = {2025},
      abstract     = {High-resolution (7T) MRI facilitates in vivo imaging of
                      fine anatomical structures selectively affected in
                      Alzheimer's disease (AD), including medial temporal lobe
                      subregions. However, 7T data is challenging to acquire and
                      largely unavailable in clinical settings. Here, we use deep
                      learning to synthesize 7T resolution T1-weighted MRI images
                      from lower-resolution (3T) images.Paired 7T and 3T
                      T1-weighted images were acquired from 178 participants (134
                      clinically unimpaired, 48 impaired) from the Swedish
                      BioFINDER-2 study. To synthesize 7T-resolution images from
                      3T images, we trained two models: a specialized U-Net, and a
                      U-Net mixed with a generative adversarial network
                      (U-Net-GAN) on $80\%$ of the data. We evaluated model
                      performance on the remaining $20\%,$ compared to models from
                      the literature (V-Net, WATNet), using image-based
                      performance metrics and by surveying five blinded MRI
                      professionals based on subjective quality. For n = 11
                      participants, amygdalae were automatically segmented with
                      FastSurfer on 3T and synthetic-7T images, and compared to a
                      manually segmented 'ground truth'. To assess downstream
                      performance, FastSurfer was run on n = 3,168 triplets of
                      matched 3T and AI-generated synthetic-7T images, and a
                      multi-class random forest model classifying clinical
                      diagnosis was trained on both datasets.Synthetic-7T images
                      were generated for images in the test set (Figure 1A). Image
                      metrics suggested the U-Net as the top performing model
                      (Figure 1B), though blinded experts qualitatively rated the
                      GAN-U-Net as the best looking images, exceeding even real 7T
                      images (Figure 1C). Automated segmentations of amygdalae
                      from the synthetic GAN-U-Net model were more similar to
                      manually segmented amygdalae, compared to the original 3T
                      they were synthesized from, in 9/11 images (Figure 2).
                      Classification obtained modest performance
                      $(accuracy∼60\%)$ but did not differ across real or
                      synthetic images (Figure 3A). Synthetic image models used
                      slightly different features for classification (Figure
                      3B).Synthetic T1-weighted images approaching 7T resolution
                      can be generated from 3T images, which may improve image
                      quality and segmentation, without compromising performance
                      in downstream tasks. This approach holds promise for better
                      measurement of deep cortical or subcortical structures
                      relevant to AD. Work is ongoing toward improving
                      performance, generalizability and clinical utility.},
      month         = {Jul},
      date          = {2025-07-27},
      organization  = {Alzheimer’s Association
                       International Conference, Toronto
                       (Canada), 27 Jul 2025 - 31 Jul 2025},
      keywords     = {Humans / Alzheimer Disease: diagnostic imaging / Magnetic
                      Resonance Imaging: methods / Female / Male / Aged / Deep
                      Learning / Neuroimaging: methods / Image Processing,
                      Computer-Assisted: methods / Brain: diagnostic imaging /
                      Sweden},
      cin          = {AG Berron},
      ddc          = {610},
      cid          = {I:(DE-2719)5000070},
      pnm          = {353 - Clinical and Health Care Research (POF4-353)},
      pid          = {G:(DE-HGF)POF4-353},
      typ          = {PUB:(DE-HGF)1 / PUB:(DE-HGF)16},
      pubmed       = {pmid:41434097},
      pmc          = {pmc:PMC12725268},
      doi          = {10.1002/alz70862_109817},
      url          = {https://pub.dzne.de/record/283099},
}