% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@ARTICLE{Faber:165528,
author = {Faber, Jennifer and Kügler, David and Bahrami, Emad and
Heinz, Lea-Sophie and Timmann, Dagmar and Ernst, Thomas M
and Deike-Hofmann, Katerina and Klockgether, Thomas and van
de Warrenburg, Bart and van Gaalen, Judith and Reetz,
Kathrin and Romanzetti, Sandro and Oz, Gulin and Joers,
James M and Diedrichsen, Jorn and Reuter, Martin and Giunti,
Paola and Garcia-Moreno, Hector and Jacobi, Heike and Jende,
Johann and de Vries, Jeroen and Povazan, Michal and Barker,
Peter B and Steiner, Katherina Marie and Krahe, Janna},
collaboration = {Group, ESMI MRI Study},
title = {{C}ereb{N}et: {A} fast and reliable deep-learning pipeline
for detailed cerebellum sub-segmentation.},
journal = {NeuroImage},
volume = {264},
issn = {1053-8119},
address = {Orlando, Fla.},
publisher = {Academic Press},
reportid = {DZNE-2022-01674},
pages = {119703},
year = {2022},
abstract = {Quantifying the volume of the cerebellum and its lobes is
of profound interest in various neurodegenerative and
acquired diseases. Especially for the most common
spinocerebellar ataxias (SCA), for which the first antisense
oligonculeotide-base gene silencing trial has recently
started, there is an urgent need for quantitative, sensitive
imaging markers at pre-symptomatic stages for stratification
and treatment assessment. This work introduces CerebNet, a
fully automated, extensively validated, deep learning method
for the lobular segmentation of the cerebellum, including
the separation of gray and white matter. For training,
validation, and testing, T1-weighted images from 30
participants were manually annotated into cerebellar lobules
and vermal sub-segments, as well as cerebellar white matter.
CerebNet combines FastSurferCNN, a UNet-based 2.5D
segmentation network, with extensive data augmentation, e.g.
realistic non-linear deformations to increase the anatomical
variety, eliminating additional preprocessing steps, such as
spatial normalization or bias field correction. CerebNet
demonstrates a high accuracy (on average 0.87 Dice and
1.742mm Robust Hausdorff Distance across all structures)
outperforming state-of-the-art approaches. Furthermore, it
shows high test-retest reliability (average ICC >0.97 on
OASIS and Kirby) as well as high sensitivity to disease
effects, including the pre-ataxic stage of spinocerebellar
ataxia type 3 (SCA3). CerebNet is compatible with FreeSurfer
and FastSurfer and can analyze a 3D volume within seconds on
a consumer GPU in an end-to-end fashion, thus providing an
efficient and validated solution for assessing cerebellum
sub-structure volumes. We make CerebNet available as
source-code (https://github.com/Deep-MI/FastSurfer).},
keywords = {Humans / Image Processing, Computer-Assisted: methods /
Deep Learning / Magnetic Resonance Imaging: methods /
Reproducibility of Results / Cerebellum: diagnostic imaging
/ CerebNet (Other) / Cerebellum (Other) / Computational
neuroimaging (Other) / Deep learning (Other)},
cin = {Patient Studies (Bonn) / AG Reuter / AG Radbruch / Clinical
Research (Bonn)},
ddc = {610},
cid = {I:(DE-2719)1011101 / I:(DE-2719)1040310 /
I:(DE-2719)5000075 / I:(DE-2719)1011001},
pnm = {353 - Clinical and Health Care Research (POF4-353) / 354 -
Disease Prevention and Healthy Aging (POF4-354)},
pid = {G:(DE-HGF)POF4-353 / G:(DE-HGF)POF4-354},
typ = {PUB:(DE-HGF)16},
pmc = {pmc:PMC9771831},
pubmed = {pmid:36349595},
doi = {10.1016/j.neuroimage.2022.119703},
url = {https://pub.dzne.de/record/165528},
}