% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Li:276343,
      author       = {Li, Jianning and Zhou, Zongwei and Yang, Jiancheng and
                      Pepe, Antonio and Gsaxner, Christina and Luijten, Gijs and
                      Qu, Chongyu and Zhang, Tiezheng and Chen, Xiaoxi and Li,
                      Wenxuan and Wodzinski, Marek and Friedrich, Paul and Xie,
                      Kangxian and Jin, Yuan and Ambigapathy, Narmada and Nasca,
                      Enrico and Solak, Naida and Melito, Gian Marco and Vu, Viet
                      Duc and Memon, Afaque R and Schlachta, Christopher and De
                      Ribaupierre, Sandrine and Patel, Rajnikant and Eagleson, Roy
                      and Chen, Xiaojun and Mächler, Heinrich and Kirschke, Jan
                      Stefan and de la Rosa, Ezequiel and Christ, Patrick
                      Ferdinand and Li, Hongwei Bran and Ellis, David G and
                      Aizenberg, Michele R and Gatidis, Sergios and Küstner,
                      Thomas and Shusharina, Nadya and Heller, Nicholas and
                      Andrearczyk, Vincent and Depeursinge, Adrien and Hatt,
                      Mathieu and Sekuboyina, Anjany and Löffler, Maximilian T
                      and Liebl, Hans and Dorent, Reuben and Vercauteren, Tom and
                      Shapey, Jonathan and Kujawa, Aaron and Cornelissen, Stefan
                      and Langenhuizen, Patrick and Ben-Hamadou, Achraf and Rekik,
                      Ahmed and Pujades, Sergi and Boyer, Edmond and Bolelli,
                      Federico and Grana, Costantino and Lumetti, Luca and Salehi,
                      Hamidreza and Ma, Jun and Zhang, Yao and Gharleghi, Ramtin
                      and Beier, Susann and Sowmya, Arcot and Garza-Villarreal,
                      Eduardo A and Balducci, Thania and Angeles-Valdez, Diego and
                      Souza, Roberto and Rittner, Leticia and Frayne, Richard and
                      Ji, Yuanfeng and Ferrari, Vincenzo and Chatterjee, Soumick
                      and Dubost, Florian and Schreiber, Stefanie and Mattern,
                      Hendrik and Speck, Oliver and Haehn, Daniel and John,
                      Christoph and Nürnberger, Andreas and Pedrosa, João and
                      Ferreira, Carlos and Aresta, Guilherme and Cunha, António
                      and Campilho, Aurélio and Suter, Yannick and Garcia, Jose
                      and Lalande, Alain and Vandenbossche, Vicky and Van Oevelen,
                      Aline and Duquesne, Kate and Mekhzoum, Hamza and
                      Vandemeulebroucke, Jef and Audenaert, Emmanuel and Krebs,
                      Claudia and van Leeuwen, Timo and Vereecke, Evie and
                      Heidemeyer, Hauke and Röhrig, Rainer and Hölzle, Frank and
                      Badeli, Vahid and Krieger, Kathrin and Gunzer, Matthias and
                      Chen, Jianxu and van Meegdenburg, Timo and Dada, Amin and
                      Balzer, Miriam and Fragemann, Jana and Jonske, Frederic and
                      Rempe, Moritz and Malorodov, Stanislav and Bahnsen, Fin H
                      and Seibold, Constantin and Jaus, Alexander and Marinov,
                      Zdravko and Jaeger, Paul F and Stiefelhagen, Rainer and
                      Santos, Ana Sofia and Lindo, Mariana and Ferreira, André
                      and Alves, Victor and Kamp, Michael and Abourayya, Amr and
                      Nensa, Felix and Hörst, Fabian and Brehmer, Alexander and
                      Heine, Lukas and Hanusrichter, Yannik and Weßling, Martin
                      and Dudda, Marcel and Podleska, Lars E and Fink, Matthias A
                      and Keyl, Julius and Tserpes, Konstantinos and Kim,
                      Moon-Sung and Elhabian, Shireen and Lamecker, Hans and
                      Zukić, Dženan and Paniagua, Beatriz and Wachinger,
                      Christian and Urschler, Martin and Duong, Luc and
                      Wasserthal, Jakob and Hoyer, Peter F and Basu, Oliver and
                      Maal, Thomas and Witjes, Max J H and Schiele, Gregor and
                      Chang, Ti-Chiun and Ahmadi, Seyed-Ahmad and Luo, Ping and
                      Menze, Bjoern and Reyes, Mauricio and Deserno, Thomas M and
                      Davatzikos, Christos and Puladi, Behrus and Fua, Pascal and
                      Yuille, Alan L and Kleesiek, Jens and Egger, Jan},
      title        = {{M}ed{S}hape{N}et - a large-scale dataset of 3{D} medical
                      shapes for computer vision.},
      journal      = {Biomedical engineering},
      volume       = {70},
      number       = {1},
      issn         = {0013-5585},
      address      = {Berlin [u.a.]},
      publisher    = {de Gruyter},
      reportid     = {DZNE-2025-00291},
      pages        = {71 - 90},
      year         = {2025},
      abstract     = {The shape is commonly used to describe the objects.
                      State-of-the-art algorithms in medical imaging are
                      predominantly diverging from computer vision, where voxel
                      grids, meshes, point clouds, and implicit surface models are
                      used. This is seen from the growing popularity of ShapeNet
                      (51,300 models) and Princeton ModelNet (127,915 models).
                      However, a large collection of anatomical shapes (e.g.,
                      bones, organs, vessels) and 3D models of surgical
                      instruments is missing.We present MedShapeNet to translate
                      data-driven vision algorithms to medical applications and to
                      adapt state-of-the-art vision algorithms to medical
                      problems. As a unique feature, we directly model the
                      majority of shapes on the imaging data of real patients. We
                      present use cases in classifying brain tumors, skull
                      reconstructions, multi-class anatomy completion, education,
                      and 3D printing.By now, MedShapeNet includes 23 datasets
                      with more than 100,000 shapes that are paired with
                      annotations (ground truth). Our data is freely accessible
                      via a web interface and a Python application programming
                      interface and can be used for discriminative,
                      reconstructive, and variational benchmarks as well as
                      various applications in virtual, augmented, or mixed
                      reality, and 3D printing.MedShapeNet contains medical shapes
                      from anatomy and surgical instruments and will continue to
                      collect data for benchmarks and applications. The project
                      page is: https://medshapenet.ikim.nrw/.},
      keywords     = {Humans / Algorithms / Imaging, Three-Dimensional: methods /
                      Brain Neoplasms: diagnostic imaging / Printing,
                      Three-Dimensional / 3D medical shapes (Other) / anatomy
                      education (Other) / augmented reality (Other) / benchmark
                      (Other) / shapeomics (Other) / virtual reality (Other)},
      cin          = {AG Schreiber / AG Speck},
      ddc          = {610},
      cid          = {I:(DE-2719)1310010 / I:(DE-2719)1340009},
      pnm          = {353 - Clinical and Health Care Research (POF4-353)},
      pid          = {G:(DE-HGF)POF4-353},
      typ          = {PUB:(DE-HGF)16},
      pubmed       = {pmid:39733351},
      doi          = {10.1515/bmt-2024-0396},
      url          = {https://pub.dzne.de/record/276343},
}