var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/service/mendeley/53d1e3c7-2f16-3c81-9a84-dccd45be4841?jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/service/mendeley/53d1e3c7-2f16-3c81-9a84-dccd45be4841?jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/service/mendeley/53d1e3c7-2f16-3c81-9a84-dccd45be4841?jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2022\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Automated pneumothorax triaging in chest X-rays in the New Zealand population using deep-learning algorithms.\n \n \n \n\n\n \n Feng, S.; Liu, Q.; Patel, A.; Bazai, S., U.; Jin, C.; Kim, J., S.; Sarrafzadeh, M.; Azzollini, D.; Yeoh, J.; Kim, E.; Gordon, S.; Jang-Jaccard, J.; Urschler, M.; Barnard, S.; Fong, A.; Simmers, C.; Tarr, G., P.; and Wilson, B.\n\n\n \n\n\n\n Journal of medical imaging and radiation oncology, 66(8): 1035-1043. 12 2022.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Automated pneumothorax triaging in chest X-rays in the New Zealand population  using deep-learning algorithms.},\n type = {article},\n year = {2022},\n keywords = {Algorithms,Artificial Intelligence,Deep Learning,Humans,New Zealand,Pneumothorax,Radiography, Thoracic,Triage,X-Rays,diagnostic imaging},\n pages = {1035-1043},\n volume = {66},\n month = {12},\n id = {7858b8c8-a85c-3b1c-b148-ff4bc89f1c24},\n created = {2023-07-06T06:11:00.386Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2023-07-06T06:11:00.386Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Feng2022},\n source_type = {Journal Article},\n language = {eng},\n country = {Australia},\n patent_owner = {NLM},\n private_publication = {false},\n abstract = {INTRODUCTION: The primary aim was to develop convolutional neural network  (CNN)-based artificial intelligence (AI) models for pneumothorax classification and segmentation for automated chest X-ray (CXR) triaging. A secondary aim was to perform interpretability analysis on the best-performing candidate model to determine whether the model's predictions were susceptible to bias or confounding. METHOD: A CANDID-PTX dataset, that included 19,237 anonymized and manually labelled CXRs, was used for training and testing candidate models for pneumothorax classification and segmentation. Evaluation metrics for classification performance included Area under the receiver operating characteristic curve (AUC-ROC), sensitivity and specificity, whilst segmentation performance was measured using mean Dice and true-positive (TP)-Dice coefficients. Interpretability analysis was performed using Grad-CAM heatmaps. Finally, the best-performing model was implemented for a triage simulation. RESULTS: The best-performing model demonstrated a sensitivity of 0.93, specificity of 0.95 and AUC-ROC of 0.94 in identifying the presence of pneumothorax. A TP-Dice coefficient of 0.69 is given for segmentation performance. In triage simulation, mean reporting delay for pneumothorax-containing CXRs is reduced from 9.8 ± 2 days to 1.0 ± 0.5 days (P-value < 0.001 at 5% significance level), with sensitivity 0.95 and specificity of 0.95 given for the classification performance. Finally, interpretability analysis demonstrated models employed logic understandable to radiologists, with negligible bias or confounding in predictions. CONCLUSION: AI models can automate pneumothorax detection with clinically acceptable accuracy, and potentially reduce reporting delays for urgent findings when implemented as triaging tools.},\n bibtype = {article},\n author = {Feng, Sijing and Liu, Qixiu and Patel, Aakash and Bazai, Sibghat Ullah and Jin, Cheng-Kai and Kim, Ji Soo and Sarrafzadeh, Mikal and Azzollini, Damian and Yeoh, Jason and Kim, Eve and Gordon, Simon and Jang-Jaccard, Julian and Urschler, Martin and Barnard, Stuart and Fong, Amy and Simmers, Cameron and Tarr, Gregory P and Wilson, Ben},\n doi = {10.1111/1754-9485.13393},\n journal = {Journal of medical imaging and radiation oncology},\n number = {8}\n}
\n
\n\n\n
\n INTRODUCTION: The primary aim was to develop convolutional neural network (CNN)-based artificial intelligence (AI) models for pneumothorax classification and segmentation for automated chest X-ray (CXR) triaging. A secondary aim was to perform interpretability analysis on the best-performing candidate model to determine whether the model's predictions were susceptible to bias or confounding. METHOD: A CANDID-PTX dataset, that included 19,237 anonymized and manually labelled CXRs, was used for training and testing candidate models for pneumothorax classification and segmentation. Evaluation metrics for classification performance included Area under the receiver operating characteristic curve (AUC-ROC), sensitivity and specificity, whilst segmentation performance was measured using mean Dice and true-positive (TP)-Dice coefficients. Interpretability analysis was performed using Grad-CAM heatmaps. Finally, the best-performing model was implemented for a triage simulation. RESULTS: The best-performing model demonstrated a sensitivity of 0.93, specificity of 0.95 and AUC-ROC of 0.94 in identifying the presence of pneumothorax. A TP-Dice coefficient of 0.69 is given for segmentation performance. In triage simulation, mean reporting delay for pneumothorax-containing CXRs is reduced from 9.8 ± 2 days to 1.0 ± 0.5 days (P-value < 0.001 at 5% significance level), with sensitivity 0.95 and specificity of 0.95 given for the classification performance. Finally, interpretability analysis demonstrated models employed logic understandable to radiologists, with negligible bias or confounding in predictions. CONCLUSION: AI models can automate pneumothorax detection with clinically acceptable accuracy, and potentially reduce reporting delays for urgent findings when implemented as triaging tools.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n AMLP-Conv, a 3D Axial Long-range Interaction Multilayer Perceptron for CNNs.\n \n \n \n\n\n \n Bonheur, S.; Pienn, M.; Olschewski, H.; Bischof, H.; and Urschler, M.\n\n\n \n\n\n\n In Lian, C.; Cao, X.; Rekik, I.; Xu, X.; and Cui, Z., editor(s), Machine Learning in Medical Imaging, pages 328-337, 2022. Springer Nature Switzerland\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {AMLP-Conv, a 3D Axial Long-range Interaction Multilayer Perceptron for CNNs},\n type = {inproceedings},\n year = {2022},\n pages = {328-337},\n publisher = {Springer Nature Switzerland},\n city = {Cham},\n id = {d6997af8-fe23-3bcb-af79-f5d00beba205},\n created = {2023-07-06T06:11:00.459Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2023-07-06T06:11:00.459Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {10.1007/978-3-031-21014-3_34},\n source_type = {inproceedings},\n private_publication = {false},\n abstract = {While Convolutional neural networks (CNN) have been the backbone of medical image analysis for years, their limited long-range interaction restrains their ability to encode long distance anatomical relationships. On the other hand, the current approach to capture long distance relationships, Transformers, is constrained by their quadratic scaling and their data inefficiency (arising from their lack of inductive biases). In this paper, we introduce the 3D Axial Multilayer Perceptron (AMLP), a long-range interaction module whose complexity scales linearly with spatial dimensions. This module is merged with CNNs to form the AMLP-Conv module, a long-range augmented convolution with strong inductive biases. Once combined with U-Net, our AMLP-Conv module leads to significant improvement, outperforming most transformer based U-Nets on the ACDC dataset, and reaching a new state-of-the-art result on the Multi-Modal Whole Heart Segmentation (MM-WHS) dataset with an almost 1.1% Dice score improvement over the previous scores on the Computed Tomography (CT) modality.},\n bibtype = {inproceedings},\n author = {Bonheur, Savinien and Pienn, Michael and Olschewski, Horst and Bischof, Horst and Urschler, Martin},\n editor = {Lian, Chunfeng and Cao, Xiaohuan and Rekik, Islem and Xu, Xuanang and Cui, Zhiming},\n booktitle = {Machine Learning in Medical Imaging}\n}
\n
\n\n\n
\n While Convolutional neural networks (CNN) have been the backbone of medical image analysis for years, their limited long-range interaction restrains their ability to encode long distance anatomical relationships. On the other hand, the current approach to capture long distance relationships, Transformers, is constrained by their quadratic scaling and their data inefficiency (arising from their lack of inductive biases). In this paper, we introduce the 3D Axial Multilayer Perceptron (AMLP), a long-range interaction module whose complexity scales linearly with spatial dimensions. This module is merged with CNNs to form the AMLP-Conv module, a long-range augmented convolution with strong inductive biases. Once combined with U-Net, our AMLP-Conv module leads to significant improvement, outperforming most transformer based U-Nets on the ACDC dataset, and reaching a new state-of-the-art result on the Multi-Modal Whole Heart Segmentation (MM-WHS) dataset with an almost 1.1% Dice score improvement over the previous scores on the Computed Tomography (CT) modality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n OnlyCaps-Net, a Capsule only Based Neural Network for 2D and 3D Semantic Segmentation.\n \n \n \n\n\n \n Bonheur, S.; Thaler, F.; Pienn, M.; Olschewski, H.; Bischof, H.; and Urschler, M.\n\n\n \n\n\n\n In Wang, L.; Dou, Q.; Fletcher, P., T.; Speidel, S.; and Li, S., editor(s), Medical Image Computing and Computer Assisted Intervention -- MICCAI 2022, pages 340-349, 2022. Springer Nature Switzerland\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {OnlyCaps-Net, a Capsule only Based Neural Network for 2D and 3D Semantic Segmentation},\n type = {inproceedings},\n year = {2022},\n pages = {340-349},\n publisher = {Springer Nature Switzerland},\n city = {Cham},\n id = {265f1b74-b758-3690-be0a-0fa3667cbac8},\n created = {2023-07-06T06:11:00.459Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2023-07-06T06:11:00.459Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {10.1007/978-3-031-16443-9_33},\n source_type = {inproceedings},\n private_publication = {false},\n abstract = {Since their introduction by Sabour et al., capsule networks have been extended to 2D semantic segmentation with the introduction of convolutional capsules. While extended further to 3D semantic segmentation when mixed with Convolutional Neural Networks (CNNs), no capsule-only network (to the best of our knowledge) has been able to reach CNNs' accuracy on multilabel segmentation tasks. In this work, we propose OnlyCaps-Net, the first competitive capsule-only network for 2D and 3D multi-label semantic segmentation. OnlyCaps-Net improves both capsules' accuracy and inference speed by replacing Sabour et al. squashing with the introduction of two novel squashing functions, i.e. softsquash or unitsquash, and the iterative routing with a new parameter free single pass routing, i.e. unit routing. Additionally, OnlyCaps-Net introduces a new parameter efficient convolutional capsule type, i.e. depthwise separable convolutional capsule.},\n bibtype = {inproceedings},\n author = {Bonheur, Savinien and Thaler, Franz and Pienn, Michael and Olschewski, Horst and Bischof, Horst and Urschler, Martin},\n editor = {Wang, Linwei and Dou, Qi and Fletcher, P Thomas and Speidel, Stefanie and Li, Shuo},\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2022}\n}
\n
\n\n\n
\n Since their introduction by Sabour et al., capsule networks have been extended to 2D semantic segmentation with the introduction of convolutional capsules. While extended further to 3D semantic segmentation when mixed with Convolutional Neural Networks (CNNs), no capsule-only network (to the best of our knowledge) has been able to reach CNNs' accuracy on multilabel segmentation tasks. In this work, we propose OnlyCaps-Net, the first competitive capsule-only network for 2D and 3D multi-label semantic segmentation. OnlyCaps-Net improves both capsules' accuracy and inference speed by replacing Sabour et al. squashing with the introduction of two novel squashing functions, i.e. softsquash or unitsquash, and the iterative routing with a new parameter free single pass routing, i.e. unit routing. Additionally, OnlyCaps-Net introduces a new parameter efficient convolutional capsule type, i.e. depthwise separable convolutional capsule.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Anatomy-Aware Inference of the 3D Standing Spine Posture from 2D Radiographs.\n \n \n \n\n\n \n Bayat, A.; Pace, D., F.; Sekuboyina, A.; Payer, C.; Stern, D.; Urschler, M.; Kirschke, J., S.; and Menze, B., H.\n\n\n \n\n\n\n Tomography (Ann Arbor, Mich.), 8(1): 479-496. 2 2022.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Anatomy-Aware Inference of the 3D Standing Spine Posture from 2D Radiographs.},\n type = {article},\n year = {2022},\n keywords = {Humans,Imaging, Three-Dimensional,Posture,Radiography,Spine,Standing Position,diagnostic imaging,methods,physiology},\n pages = {479-496},\n volume = {8},\n month = {2},\n id = {fa062713-ac49-3e44-bf6e-567e8239e23b},\n created = {2023-07-06T06:11:00.460Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2023-07-06T06:11:00.460Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Bayat2022},\n source_type = {Journal Article, Research Support, N.I.H., Extramural, Research Support, Non-U.S. Gov't},\n language = {eng},\n country = {Switzerland},\n patent_owner = {NLM},\n private_publication = {false},\n abstract = {An important factor for the development of spinal degeneration, pain and the  outcome of spinal surgery is known to be the balance of the spine. It must be analyzed in an upright, standing position to ensure physiological loading conditions and visualize load-dependent deformations. Despite the complex 3D shape of the spine, this analysis is currently performed using 2D radiographs, as all frequently used 3D imaging techniques require the patient to be scanned in a prone position. To overcome this limitation, we propose a deep neural network to reconstruct the 3D spinal pose in an upright standing position, loaded naturally. Specifically, we propose a novel neural network architecture, which takes orthogonal 2D radiographs and infers the spine's 3D posture using vertebral shape priors. In this work, we define vertebral shape priors using an atlas and a spine shape prior, incorporating both into our proposed network architecture. We validate our architecture on digitally reconstructed radiographs, achieving a 3D reconstruction Dice of 0.95, indicating an almost perfect 2D-to-3D domain translation. Validating the reconstruction accuracy of a 3D standing spine on real data is infeasible due to the lack of a valid ground truth. Hence, we design a novel experiment for this purpose, using an orientation invariant distance metric, to evaluate our model's ability to synthesize full-3D, upright, and patient-specific spine models. We compare the synthesized spine shapes from clinical upright standing radiographs to the same patient's 3D spinal posture in the prone position from CT.},\n bibtype = {article},\n author = {Bayat, Amirhossein and Pace, Danielle F and Sekuboyina, Anjany and Payer, Christian and Stern, Darko and Urschler, Martin and Kirschke, Jan S and Menze, Bjoern H},\n doi = {10.3390/tomography8010039},\n journal = {Tomography (Ann Arbor, Mich.)},\n number = {1}\n}
\n
\n\n\n
\n An important factor for the development of spinal degeneration, pain and the outcome of spinal surgery is known to be the balance of the spine. It must be analyzed in an upright, standing position to ensure physiological loading conditions and visualize load-dependent deformations. Despite the complex 3D shape of the spine, this analysis is currently performed using 2D radiographs, as all frequently used 3D imaging techniques require the patient to be scanned in a prone position. To overcome this limitation, we propose a deep neural network to reconstruct the 3D spinal pose in an upright standing position, loaded naturally. Specifically, we propose a novel neural network architecture, which takes orthogonal 2D radiographs and infers the spine's 3D posture using vertebral shape priors. In this work, we define vertebral shape priors using an atlas and a spine shape prior, incorporating both into our proposed network architecture. We validate our architecture on digitally reconstructed radiographs, achieving a 3D reconstruction Dice of 0.95, indicating an almost perfect 2D-to-3D domain translation. Validating the reconstruction accuracy of a 3D standing spine on real data is infeasible due to the lack of a valid ground truth. Hence, we design a novel experiment for this purpose, using an orientation invariant distance metric, to evaluate our model's ability to synthesize full-3D, upright, and patient-specific spine models. We compare the synthesized spine shapes from clinical upright standing radiographs to the same patient's 3D spinal posture in the prone position from CT.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Closing the Loop: Graph Networks to Unify Semantic Objects and Visual Features for Multi-object Scenes.\n \n \n \n\n\n \n Kim, J., J., Y.; Urschler, M.; Riddle, P., J.; and Wicker, J., S.\n\n\n \n\n\n\n In 2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 4352-4358, 2022. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Closing the Loop: Graph Networks to Unify Semantic Objects and Visual Features for Multi-object Scenes},\n type = {inproceedings},\n year = {2022},\n pages = {4352-4358},\n id = {070a285d-352e-3e52-b2d8-b30452d13961},\n created = {2023-07-06T06:11:00.537Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2023-07-06T06:11:00.537Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {9981542},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Kim, Jonathan J Y and Urschler, Martin and Riddle, Patricia J and Wicker, Jorg S},\n doi = {10.1109/IROS47612.2022.9981542},\n booktitle = {2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Bone age estimation with the Greulich-Pyle atlas using 3T MR images of hand and wrist.\n \n \n \n \n\n\n \n Widek, T.; Genet, P.; Ehammer, T.; Schwark, T.; Urschler, M.; and Scheurer, E.\n\n\n \n\n\n\n Forensic Science International, 319: 110654. 2 2021.\n \n\n\n\n
\n\n\n\n \n \n \"BoneWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Bone age estimation with the Greulich-Pyle atlas using 3T MR images of hand and wrist},\n type = {article},\n year = {2021},\n pages = {110654},\n volume = {319},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S0379073820305168},\n month = {2},\n id = {5b40e6ef-629f-362b-adfd-94848b578b8f},\n created = {2021-05-10T03:21:21.515Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2021-05-10T03:21:21.515Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Widek2021},\n private_publication = {false},\n bibtype = {article},\n author = {Widek, Thomas and Genet, Pia and Ehammer, Thomas and Schwark, Thorsten and Urschler, Martin and Scheurer, Eva},\n doi = {10.1016/j.forsciint.2020.110654},\n journal = {Forensic Science International}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Framework for the generation of digital twins of cardiac electrophysiology from clinical 12-leads ECGs.\n \n \n \n \n\n\n \n Gillette, K.; Gsell, M., A.; Prassl, A., J.; Karabelas, E.; Reiter, U.; Reiter, G.; Grandits, T.; Payer, C.; Štern, D.; Urschler, M.; Bayer, J., D.; Augustin, C., M.; Neic, A.; Pock, T.; Vigmond, E., J.; and Plank, G.\n\n\n \n\n\n\n Medical Image Analysis, 71: 102080. 7 2021.\n \n\n\n\n
\n\n\n\n \n \n \"AWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A Framework for the generation of digital twins of cardiac electrophysiology from clinical 12-leads ECGs},\n type = {article},\n year = {2021},\n pages = {102080},\n volume = {71},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S1361841521001262},\n month = {7},\n id = {41384fdb-d5d2-3009-a0c2-38b29001671c},\n created = {2021-05-10T03:23:22.314Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2021-05-10T03:23:22.314Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Gillette2021},\n private_publication = {false},\n bibtype = {article},\n author = {Gillette, Karli and Gsell, Matthias A.F. and Prassl, Anton J. and Karabelas, Elias and Reiter, Ursula and Reiter, Gert and Grandits, Thomas and Payer, Christian and Štern, Darko and Urschler, Martin and Bayer, Jason D. and Augustin, Christoph M. and Neic, Aurel and Pock, Thomas and Vigmond, Edward J. and Plank, Gernot},\n doi = {10.1016/j.media.2021.102080},\n journal = {Medical Image Analysis}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n VerSe: A Vertebrae labelling and segmentation benchmark for multi-detector CT images.\n \n \n \n \n\n\n \n Sekuboyina, A.; Husseini, M., E.; Bayat, A.; Löffler, M.; Liebl, H.; Li, H.; Tetteh, G.; Kukačka, J.; Payer, C.; Štern, D.; Urschler, M.; Chen, M.; Cheng, D.; Lessmann, N.; Hu, Y.; Wang, T.; Yang, D.; Xu, D.; Ambellan, F.; Amiranashvili, T.; Ehlke, M.; Lamecker, H.; Lehnert, S.; Lirio, M.; de Olaguer, N., P.; Ramm, H.; Sahu, M.; Tack, A.; Zachow, S.; Jiang, T.; Ma, X.; Angerman, C.; Wang, X.; Brown, K.; Kirszenberg, A.; Puybareau, É.; Chen, D.; Bai, Y.; Rapazzo, B., H.; Yeah, T.; Zhang, A.; Xu, S.; Hou, F.; He, Z.; Zeng, C.; Xiangshang, Z.; Liming, X.; Netherton, T., J.; Mumme, R., P.; Court, L., E.; Huang, Z.; He, C.; Wang, L.; Ling, S., H.; Huỳnh, L., D.; Boutry, N.; Jakubicek, R.; Chmelik, J.; Mulay, S.; Sivaprakasam, M.; Paetzold, J., C.; Shit, S.; Ezhov, I.; Wiestler, B.; Glocker, B.; Valentinitsch, A.; Rempfler, M.; Menze, B., H.; and Kirschke, J., S.\n\n\n \n\n\n\n Medical Image Analysis, 73: 102166. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"VerSe:Website\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {VerSe: A Vertebrae labelling and segmentation benchmark for multi-detector CT images},\n type = {article},\n year = {2021},\n keywords = {Labelling,Segmentation,Spine,Vertebrae},\n pages = {102166},\n volume = {73},\n websites = {https://www.sciencedirect.com/science/article/pii/S1361841521002127},\n id = {d1e0b20b-5795-3b61-9bf3-6505a548825b},\n created = {2023-07-06T06:11:00.422Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2023-07-06T06:11:00.422Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {SEKUBOYINA2021102166},\n source_type = {article},\n private_publication = {false},\n abstract = {Vertebral labelling and segmentation are two fundamental tasks in an automated spine processing pipeline. Reliable and accurate processing of spine images is expected to benefit clinical decision support systems for diagnosis, surgery planning, and population-based analysis of spine and bone health. However, designing automated algorithms for spine processing is challenging predominantly due to considerable variations in anatomy and acquisition protocols and due to a severe shortage of publicly available data. Addressing these limitations, the Large Scale Vertebrae Segmentation Challenge (VerSe) was organised in conjunction with the International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI) in 2019 and 2020, with a call for algorithms tackling the labelling and segmentation of vertebrae. Two datasets containing a total of 374 multi-detector CT scans from 355 patients were prepared and 4505 vertebrae have individually been annotated at voxel level by a human-machine hybrid algorithm (https://osf.io/nqjyw/, https://osf.io/t98fz/). A total of 25 algorithms were benchmarked on these datasets. In this work, we present the results of this evaluation and further investigate the performance variation at the vertebra level, scan level, and different fields of view. We also evaluate the generalisability of the approaches to an implicit domain shift in data by evaluating the top-performing algorithms of one challenge iteration on data from the other iteration. The principal takeaway from VerSe: the performance of an algorithm in labelling and segmenting a spine scan hinges on its ability to correctly identify vertebrae in cases of rare anatomical variations. The VerSe content and code can be accessed at: https://github.com/anjany/verse.},\n bibtype = {article},\n author = {Sekuboyina, Anjany and Husseini, Malek E and Bayat, Amirhossein and Löffler, Maximilian and Liebl, Hans and Li, Hongwei and Tetteh, Giles and Kukačka, Jan and Payer, Christian and Štern, Darko and Urschler, Martin and Chen, Maodong and Cheng, Dalong and Lessmann, Nikolas and Hu, Yujin and Wang, Tianfu and Yang, Dong and Xu, Daguang and Ambellan, Felix and Amiranashvili, Tamaz and Ehlke, Moritz and Lamecker, Hans and Lehnert, Sebastian and Lirio, Marilia and de Olaguer, Nicolás Pérez and Ramm, Heiko and Sahu, Manish and Tack, Alexander and Zachow, Stefan and Jiang, Tao and Ma, Xinjun and Angerman, Christoph and Wang, Xin and Brown, Kevin and Kirszenberg, Alexandre and Puybareau, Élodie and Chen, Di and Bai, Yiwei and Rapazzo, Brandon H and Yeah, Timyoas and Zhang, Amber and Xu, Shangliang and Hou, Feng and He, Zhiqiang and Zeng, Chan and Xiangshang, Zheng and Liming, Xu and Netherton, Tucker J and Mumme, Raymond P and Court, Laurence E and Huang, Zixun and He, Chenhang and Wang, Li-Wen and Ling, Sai Ho and Huỳnh, Lê Duy and Boutry, Nicolas and Jakubicek, Roman and Chmelik, Jiri and Mulay, Supriti and Sivaprakasam, Mohanasankar and Paetzold, Johannes C and Shit, Suprosanna and Ezhov, Ivan and Wiestler, Benedikt and Glocker, Ben and Valentinitsch, Alexander and Rempfler, Markus and Menze, Björn H and Kirschke, Jan S},\n doi = {https://doi.org/10.1016/j.media.2021.102166},\n journal = {Medical Image Analysis}\n}
\n
\n\n\n
\n Vertebral labelling and segmentation are two fundamental tasks in an automated spine processing pipeline. Reliable and accurate processing of spine images is expected to benefit clinical decision support systems for diagnosis, surgery planning, and population-based analysis of spine and bone health. However, designing automated algorithms for spine processing is challenging predominantly due to considerable variations in anatomy and acquisition protocols and due to a severe shortage of publicly available data. Addressing these limitations, the Large Scale Vertebrae Segmentation Challenge (VerSe) was organised in conjunction with the International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI) in 2019 and 2020, with a call for algorithms tackling the labelling and segmentation of vertebrae. Two datasets containing a total of 374 multi-detector CT scans from 355 patients were prepared and 4505 vertebrae have individually been annotated at voxel level by a human-machine hybrid algorithm (https://osf.io/nqjyw/, https://osf.io/t98fz/). A total of 25 algorithms were benchmarked on these datasets. In this work, we present the results of this evaluation and further investigate the performance variation at the vertebra level, scan level, and different fields of view. We also evaluate the generalisability of the approaches to an implicit domain shift in data by evaluating the top-performing algorithms of one challenge iteration on data from the other iteration. The principal takeaway from VerSe: the performance of an algorithm in labelling and segmenting a spine scan hinges on its ability to correctly identify vertebrae in cases of rare anatomical variations. The VerSe content and code can be accessed at: https://github.com/anjany/verse.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Curation of the CANDID-PTX Dataset with Free-Text Reports.\n \n \n \n\n\n \n Feng, S.; Azzollini, D.; Kim, J., S.; Jin, C.; Gordon, S., P.; Yeoh, J.; Kim, E.; Han, M.; Lee, A.; Patel, A.; Wu, J.; Urschler, M.; Fong, A.; Simmers, C.; Tarr, G., P.; Barnard, S.; and Wilson, B.\n\n\n \n\n\n\n Radiology. Artificial intelligence, 3(6): e210136. 11 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Curation of the CANDID-PTX Dataset with Free-Text Reports.},\n type = {article},\n year = {2021},\n pages = {e210136},\n volume = {3},\n month = {11},\n id = {dc6e2a0f-9f16-37fe-b4bf-ff16fda5aac9},\n created = {2023-07-06T06:11:00.522Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2023-07-06T06:11:00.522Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Feng2021},\n source_type = {Journal Article},\n language = {eng},\n country = {United States},\n patent_owner = {NLM},\n private_publication = {false},\n abstract = {Supplemental material is available for this article. Keywords: Conventional  Radiography, Thorax, Trauma, Ribs, Catheters, Segmentation, Diagnosis, Classification, Supervised Learning, Machine Learning © RSNA, 2021.},\n bibtype = {article},\n author = {Feng, Sijing and Azzollini, Damian and Kim, Ji Soo and Jin, Cheng-Kai and Gordon, Simon P and Yeoh, Jason and Kim, Eve and Han, Mina and Lee, Andrew and Patel, Aakash and Wu, Joy and Urschler, Martin and Fong, Amy and Simmers, Cameron and Tarr, Gregory P and Barnard, Stuart and Wilson, Ben},\n doi = {10.1148/ryai.2021210136},\n journal = {Radiology. Artificial intelligence},\n number = {6}\n}
\n
\n\n\n
\n Supplemental material is available for this article. Keywords: Conventional Radiography, Thorax, Trauma, Ribs, Catheters, Segmentation, Diagnosis, Classification, Supervised Learning, Machine Learning © RSNA, 2021.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n SymbioLCD: Ensemble-Based Loop Closure Detection using CNN-Extracted Objects and Visual Bag-of-Words.\n \n \n \n\n\n \n Kim, J., J., Y.; Urschler, M.; Riddle, P., J.; and Wicker, J., S.\n\n\n \n\n\n\n In 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 5425, 2021. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {SymbioLCD: Ensemble-Based Loop Closure Detection using CNN-Extracted Objects and Visual Bag-of-Words},\n type = {inproceedings},\n year = {2021},\n pages = {5425},\n id = {622fc7a1-3660-3c39-ae2f-803dc646d3f3},\n created = {2023-07-06T06:11:00.640Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2023-07-06T06:11:00.640Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {9636622},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Kim, Jonathan J Y and Urschler, Martin and Riddle, Patricia J and Wicker, Jörg S},\n doi = {10.1109/IROS51168.2021.9636622},\n booktitle = {2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Coarse to fine vertebrae localization and segmentation with spatialconfiguration-Net and U-Net.\n \n \n \n\n\n \n Payer, C.; Štern, D.; Bischof, H.; and Urschler, M.\n\n\n \n\n\n\n In VISIGRAPP 2020 - Proceedings of the 15th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications, 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Coarse to fine vertebrae localization and segmentation with spatialconfiguration-Net and U-Net},\n type = {inproceedings},\n year = {2020},\n keywords = {SpatialConfiguration-Net,U-Net,VerSe 2019 Challenge,Vertebrae Localization,Vertebrae Segmentation},\n id = {cddfbc84-8eb0-3c03-b1bc-1c5b63a8944c},\n created = {2020-06-20T01:37:12.778Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2020-06-20T01:37:12.778Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Payer2020},\n private_publication = {false},\n abstract = {Localization and segmentation of vertebral bodies from spine CT volumes are crucial for pathological diagnosis, surgical planning, and postoperative assessment. However, fully automatic analysis of spine CT volumes is difficult due to the anatomical variation of pathologies, noise caused by screws and implants, and the large range of different field-of-views. We propose a fully automatic coarse to fine approach for vertebrae localization and segmentation based on fully convolutional CNNs. In a three-step approach, at first, a U-Net localizes the rough position of the spine. Then, the SpatialConfiguration-Net performs vertebrae localization and identification using heatmap regression. Finally, a U-Net performs binary segmentation of each identified vertebrae in a high resolution, before merging the individual predictions into the resulting multi-label vertebrae segmentation. The evaluation shows top performance of our approach, ranking first place and winning the MICCAI 2019 Large Scale Vertebrae Segmentation Challenge (VerSe 2019).},\n bibtype = {inproceedings},\n author = {Payer, Christian and Štern, Darko and Bischof, Horst and Urschler, Martin},\n doi = {10.5220/0008975201240133},\n booktitle = {VISIGRAPP 2020 - Proceedings of the 15th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications}\n}
\n
\n\n\n
\n Localization and segmentation of vertebral bodies from spine CT volumes are crucial for pathological diagnosis, surgical planning, and postoperative assessment. However, fully automatic analysis of spine CT volumes is difficult due to the anatomical variation of pathologies, noise caused by screws and implants, and the large range of different field-of-views. We propose a fully automatic coarse to fine approach for vertebrae localization and segmentation based on fully convolutional CNNs. In a three-step approach, at first, a U-Net localizes the rough position of the spine. Then, the SpatialConfiguration-Net performs vertebrae localization and identification using heatmap regression. Finally, a U-Net performs binary segmentation of each identified vertebrae in a high resolution, before merging the individual predictions into the resulting multi-label vertebrae segmentation. The evaluation shows top performance of our approach, ranking first place and winning the MICCAI 2019 Large Scale Vertebrae Segmentation Challenge (VerSe 2019).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Inferring the 3D Standing Spine Posture from 2D Radiographs.\n \n \n \n\n\n \n Bayat, A.; Sekuboyina, A.; Paetzold, J.; Payer, C.; Stern, D.; Urschler, M.; Kirschke, J.; and Menze, B.\n\n\n \n\n\n\n Volume 12266 LNCS 2020.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@book{\n title = {Inferring the 3D Standing Spine Posture from 2D Radiographs},\n type = {book},\n year = {2020},\n source = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\n keywords = {3D reconstruction,Digitally reconstructed radiographs,Fully convolutional neworks,Spine posture},\n volume = {12266 LNCS},\n id = {79fd554c-be0e-35bd-b10b-049e2ba5d2d4},\n created = {2020-10-24T23:59:00.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2021-05-10T03:11:28.935Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n citation_key = {Bayat2020},\n private_publication = {false},\n abstract = {© 2020, Springer Nature Switzerland AG. The treatment of degenerative spinal disorders requires an understanding of the individual spinal anatomy and curvature in 3D. An upright spinal pose (i.e. standing) under natural weight bearing is crucial for such bio-mechanical analysis. 3D volumetric imaging modalities (e.g. CT and MRI) are performed in patients lying down. On the other hand, radiographs are captured in an upright pose, but result in 2D projections. This work aims to integrate the two realms, i.e. it combines the upright spinal curvature from radiographs with the 3D vertebral shape from CT imaging for synthesizing an upright 3D model of spine, loaded naturally. Specifically, we propose a novel neural network architecture working vertebra-wise, termed TransVert, which takes orthogonal 2D radiographs and infers the spine’s 3D posture. We validate our architecture on digitally reconstructed radiographs, achieving a 3D reconstruction Dice of 95.52 %, indicating an almost perfect 2D-to-3D domain translation. Deploying our model on clinical radiographs, we successfully synthesise full-3D, upright, patient-specific spine models for the first time.},\n bibtype = {book},\n author = {Bayat, A. and Sekuboyina, A. and Paetzold, J.C. and Payer, C. and Stern, D. and Urschler, M. and Kirschke, J.S. and Menze, B.H.},\n doi = {10.1007/978-3-030-59725-2_75}\n}
\n
\n\n\n
\n © 2020, Springer Nature Switzerland AG. The treatment of degenerative spinal disorders requires an understanding of the individual spinal anatomy and curvature in 3D. An upright spinal pose (i.e. standing) under natural weight bearing is crucial for such bio-mechanical analysis. 3D volumetric imaging modalities (e.g. CT and MRI) are performed in patients lying down. On the other hand, radiographs are captured in an upright pose, but result in 2D projections. This work aims to integrate the two realms, i.e. it combines the upright spinal curvature from radiographs with the 3D vertebral shape from CT imaging for synthesizing an upright 3D model of spine, loaded naturally. Specifically, we propose a novel neural network architecture working vertebra-wise, termed TransVert, which takes orthogonal 2D radiographs and infers the spine’s 3D posture. We validate our architecture on digitally reconstructed radiographs, achieving a 3D reconstruction Dice of 95.52 %, indicating an almost perfect 2D-to-3D domain translation. Deploying our model on clinical radiographs, we successfully synthesise full-3D, upright, patient-specific spine models for the first time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Uncertainty Estimation in Landmark Localization Based on Gaussian Heatmaps.\n \n \n \n\n\n \n Payer, C.; Urschler, M.; Bischof, H.; and Štern, D.\n\n\n \n\n\n\n Volume 12443 LNCS 2020.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@book{\n title = {Uncertainty Estimation in Landmark Localization Based on Gaussian Heatmaps},\n type = {book},\n year = {2020},\n source = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\n keywords = {Landmark localization,Uncertainty estimation},\n volume = {12443 LNCS},\n id = {856aa1c9-8376-388e-8ee7-b5aa8276f444},\n created = {2020-10-26T23:59:00.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2021-05-10T03:33:14.698Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Payer2020a},\n private_publication = {false},\n abstract = {© 2020, Springer Nature Switzerland AG. In landmark localization, due to ambiguities in defining their exact position, landmark annotations may suffer from both large inter- and intra-observer variabilites, which result in uncertain annotations. Therefore, predicting a single coordinate for a landmark is not sufficient for modeling the distribution of possible landmark locations. We propose to learn the Gaussian covariances of target heatmaps, such that covariances for pointed heatmaps correspond to more certain landmarks and covariances for flat heatmaps to more uncertain or ambiguous landmarks. By fitting Gaussian functions to the predicted heatmaps, our method is able to obtain landmark location distributions, which model location uncertainties. We show on a dataset of left hand radiographs and on a dataset of lateral cephalograms that the predicted uncertainties correlate with the landmark error, as well as inter-observer variabilities.},\n bibtype = {book},\n author = {Payer, C. and Urschler, M. and Bischof, H. and Štern, D.},\n doi = {10.1007/978-3-030-60365-6_5}\n}
\n
\n\n\n
\n © 2020, Springer Nature Switzerland AG. In landmark localization, due to ambiguities in defining their exact position, landmark annotations may suffer from both large inter- and intra-observer variabilites, which result in uncertain annotations. Therefore, predicting a single coordinate for a landmark is not sufficient for modeling the distribution of possible landmark locations. We propose to learn the Gaussian covariances of target heatmaps, such that covariances for pointed heatmaps correspond to more certain landmarks and covariances for flat heatmaps to more uncertain or ambiguous landmarks. By fitting Gaussian functions to the predicted heatmaps, our method is able to obtain landmark location distributions, which model location uncertainties. We show on a dataset of left hand radiographs and on a dataset of lateral cephalograms that the predicted uncertainties correlate with the landmark error, as well as inter-observer variabilities.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Variational inference and bayesian cnns for uncertainty estimation in multi-factorial bone age prediction.\n \n \n \n\n\n \n Eggenreich, S.; Payer, C.; Urschler, M.; and Štern, D.\n\n\n \n\n\n\n 2020.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{\n title = {Variational inference and bayesian cnns for uncertainty estimation in multi-factorial bone age prediction},\n type = {misc},\n year = {2020},\n source = {arXiv},\n id = {31674720-f717-3211-ab8d-e49c8cfcac29},\n created = {2020-10-27T23:59:00.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2021-05-10T03:11:28.229Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n citation_key = {Eggenreich2020},\n private_publication = {false},\n abstract = {Copyright © 2020, arXiv, All rights reserved. Additionally to the extensive use in clinical medicine, biological age (BA) in legal medicine is used to assess unknown chronological age (CA) in applications where identification documents are not available. Automatic methods for age estimation proposed in the literature are predicting point estimates, which can be misleading without the quantification of predictive uncertainty. In our multi-factorial age estimation method from MRI data, we used the Variational Inference approach to estimate the uncertainty of a Bayesian CNN model. Distinguishing model uncertainty from data uncertainty, we interpreted data uncertainty as biological variation, i.e. the range of possible CA of subjects having the same BA.},\n bibtype = {misc},\n author = {Eggenreich, S. and Payer, C. and Urschler, M. and Štern, D.}\n}
\n
\n\n\n
\n Copyright © 2020, arXiv, All rights reserved. Additionally to the extensive use in clinical medicine, biological age (BA) in legal medicine is used to assess unknown chronological age (CA) in applications where identification documents are not available. Automatic methods for age estimation proposed in the literature are predicting point estimates, which can be misleading without the quantification of predictive uncertainty. In our multi-factorial age estimation method from MRI data, we used the Variational Inference approach to estimate the uncertainty of a Bayesian CNN model. Distinguishing model uncertainty from data uncertainty, we interpreted data uncertainty as biological variation, i.e. the range of possible CA of subjects having the same BA.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n The four-minute approach revisited: accelerating MRI-based multi-factorial age estimation.\n \n \n \n\n\n \n Neumayer, B.; Lesch, A.; Thaler, F.; Widek, T.; Tschauner, S.; De Tobel, J.; Ehammer, T.; Kirnbauer, B.; Boldt, J.; van Wijk, M.; Stollberger, R.; and Urschler, M.\n\n\n \n\n\n\n International Journal of Legal Medicine, 134(4). 2020.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {The four-minute approach revisited: accelerating MRI-based multi-factorial age estimation},\n type = {article},\n year = {2020},\n keywords = {Age determination by skeleton,Age determination by teeth,Imaging,Neural network models,Reproducibility of results,Three-dimensional},\n volume = {134},\n id = {430e85e2-d8a7-3389-9283-d1f024cfd79c},\n created = {2020-01-02T23:59:00.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2021-03-02T13:58:22.988Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {true},\n abstract = {© 2019, The Author(s). Objectives: This feasibility study aimed to investigate the reliability of multi-factorial age estimation based on MR data of the hand, wisdom teeth and the clavicles with reduced acquisition time. Methods: The raw MR data of 34 volunteers—acquired on a 3T system and using acquisition times (TA) of 3:46 min (hand), 5:29 min (clavicles) and 10:46 min (teeth)—were retrospectively undersampled applying the commercially available CAIPIRINHA technique. Automatic and radiological age estimation methods were applied to the original image data as well as undersampled data to investigate the reliability of age estimates with decreasing acquisition time. Reliability was investigated determining standard deviation (SSD) and mean (MSD) of signed differences, intra-class correlation (ICC) and by performing Bland-Altman analysis. Results: Automatic age estimation generally showed very high reliability (SSD < 0.90 years) even for very short acquisition times (SSD ≈ 0.20 years for a total TA of 4 min). Radiological age estimation provided highly reliable results for images of the hand (ICC ≥ 0.96) and the teeth (ICC ≥ 0.79) for short acquisition times (TA = 16 s for the hand, TA = 2:21 min for the teeth), imaging data of the clavicles allowed for moderate acceleration (TA = 1:25 min, ICC ≥ 0.71). Conclusions: The results demonstrate that reliable multi-factorial age estimation based on MRI of the hand, wisdom teeth and the clavicles can be performed using images acquired with a total acquisition time of 4 min.},\n bibtype = {article},\n author = {Neumayer, B. and Lesch, A. and Thaler, F. and Widek, T. and Tschauner, S. and De Tobel, J. and Ehammer, T. and Kirnbauer, B. and Boldt, J. and van Wijk, M. and Stollberger, R. and Urschler, M.},\n doi = {10.1007/s00414-019-02231-w},\n journal = {International Journal of Legal Medicine},\n number = {4}\n}
\n
\n\n\n
\n © 2019, The Author(s). Objectives: This feasibility study aimed to investigate the reliability of multi-factorial age estimation based on MR data of the hand, wisdom teeth and the clavicles with reduced acquisition time. Methods: The raw MR data of 34 volunteers—acquired on a 3T system and using acquisition times (TA) of 3:46 min (hand), 5:29 min (clavicles) and 10:46 min (teeth)—were retrospectively undersampled applying the commercially available CAIPIRINHA technique. Automatic and radiological age estimation methods were applied to the original image data as well as undersampled data to investigate the reliability of age estimates with decreasing acquisition time. Reliability was investigated determining standard deviation (SSD) and mean (MSD) of signed differences, intra-class correlation (ICC) and by performing Bland-Altman analysis. Results: Automatic age estimation generally showed very high reliability (SSD < 0.90 years) even for very short acquisition times (SSD ≈ 0.20 years for a total TA of 4 min). Radiological age estimation provided highly reliable results for images of the hand (ICC ≥ 0.96) and the teeth (ICC ≥ 0.79) for short acquisition times (TA = 16 s for the hand, TA = 2:21 min for the teeth), imaging data of the clavicles allowed for moderate acceleration (TA = 1:25 min, ICC ≥ 0.71). Conclusions: The results demonstrate that reliable multi-factorial age estimation based on MRI of the hand, wisdom teeth and the clavicles can be performed using images acquired with a total acquisition time of 4 min.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Automatic Age Estimation and Majority Age Classification From Multi-Factorial MRI Data.\n \n \n \n \n\n\n \n Stern, D.; Payer, C.; Giuliani, N.; and Urschler, M.\n\n\n \n\n\n\n IEEE Journal of Biomedical and Health Informatics, 23(4): 1392-1403. 7 2019.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Automatic Age Estimation and Majority Age Classification From Multi-Factorial MRI Data},\n type = {article},\n year = {2019},\n keywords = {Information fusion,age estimation,convolutional neural network,magnetic resonance imaging,majority age classification,multi-factorial},\n pages = {1392-1403},\n volume = {23},\n websites = {https://ieeexplore.ieee.org/document/8470073/},\n month = {7},\n id = {49adad65-c47c-3592-9d87-d116f916e434},\n created = {2018-09-29T17:57:07.819Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2020-01-23T23:19:31.299Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Stern2019JBHI},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Age estimation from radiologic data is an important topic both in clinical medicine as well as in forensic applications, where it is used to assess unknown chronological age or to discriminate minors from adults. In this paper, we propose an automatic multi-factorial age estimation method based on MRI data of hand, clavicle, and teeth to extend the maximal age range from up to 19 years, as commonly used for age assessment based on hand bones, to up to 25 years, when combined with clavicle bones and wisdom teeth. Fusing age-relevant information from all three anatomical sites, our method utilizes a deep convolutional neural network that is trained on a dataset of 322 subjects in the age range between 13 and 25 years, to achieve a mean absolute prediction error in regressing chronological age of 1.01± 0.74 years. Furthermore, when used for majority age classification, we show that a classifier derived from thresholding our regression-based predictor is better suited than a classifier directly trained with a classification loss, especially when taking into account that those cases of minors being wrongly classified as adults need to be minimized. In conclusion, we overcome the limitations of the multi-factorial methods currently used in forensic practice, i.e., dependence on ionizing radiation, subjectivity in quantifying age-relevant information, and lack of an established approach to fuse this information from individual anatomical sites.},\n bibtype = {article},\n author = {Stern, Darko and Payer, Christian and Giuliani, Nicola and Urschler, Martin},\n doi = {10.1109/JBHI.2018.2869606},\n journal = {IEEE Journal of Biomedical and Health Informatics},\n number = {4}\n}
\n
\n\n\n
\n Age estimation from radiologic data is an important topic both in clinical medicine as well as in forensic applications, where it is used to assess unknown chronological age or to discriminate minors from adults. In this paper, we propose an automatic multi-factorial age estimation method based on MRI data of hand, clavicle, and teeth to extend the maximal age range from up to 19 years, as commonly used for age assessment based on hand bones, to up to 25 years, when combined with clavicle bones and wisdom teeth. Fusing age-relevant information from all three anatomical sites, our method utilizes a deep convolutional neural network that is trained on a dataset of 322 subjects in the age range between 13 and 25 years, to achieve a mean absolute prediction error in regressing chronological age of 1.01± 0.74 years. Furthermore, when used for majority age classification, we show that a classifier derived from thresholding our regression-based predictor is better suited than a classifier directly trained with a classification loss, especially when taking into account that those cases of minors being wrongly classified as adults need to be minimized. In conclusion, we overcome the limitations of the multi-factorial methods currently used in forensic practice, i.e., dependence on ionizing radiation, subjectivity in quantifying age-relevant information, and lack of an established approach to fuse this information from individual anatomical sites.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quantitative CT‐derived vessel metrics in idiopathic pulmonary fibrosis: A structure–function study.\n \n \n \n \n\n\n \n Jacob, J.; Pienn, M.; Payer, C.; Urschler, M.; Kokosi, M.; Devaraj, A.; Wells, A., U.; and Olschewski, H.\n\n\n \n\n\n\n Respirology, 24(5): 445-452. 5 2019.\n \n\n\n\n
\n\n\n\n \n \n \"QuantitativeWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Quantitative CT‐derived vessel metrics in idiopathic pulmonary fibrosis: A structure–function study},\n type = {article},\n year = {2019},\n keywords = {interstitial lung disease,lung fibrosis,radiology and other imaging,respiratory structure and function},\n pages = {445-452},\n volume = {24},\n websites = {https://onlinelibrary.wiley.com/doi/abs/10.1111/resp.13485},\n month = {5},\n day = {20},\n id = {1e8d1eab-4851-37c7-9dc3-0ebc7c459825},\n created = {2019-04-15T12:56:30.675Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:20.533Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Jacob2019},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Background and objective: This study aimed to investigate whether quantitative lung vessel morphology determined by a new fully automated algorithm is associated with functional indices in idiopathic pulmonary fibrosis (IPF). Methods: A total of 152 IPF patients had vessel volume, density, tortuosity and heterogeneity quantified from computed tomography (CT) images by a fully automated algorithm. Separate quantitation of vessel metrics in pulmonary arteries and veins was performed in 106 patients. Results were evaluated against readouts from lung function tests. Results: Normalized vessel volume expressed as a percentage of total lung volume was moderately correlated with functional indices on univariable linear regression analysis: forced vital capacity (R 2 = 0.27, P < 1 × 10 −6 ), diffusion capacity for carbon monoxide (DL CO ; R 2 = 0.12, P = 3 × 10 −5 ), total lung capacity (TLC; R 2 = 0.45, P < 1 × 10 −6 ) and composite physiologic index (CPI; R 2 = 0.28, P < 1 × 10 −6 ). Normalized vessel volume was correlated with vessel density but not with vessel heterogeneity. Quantitatively derived vessel metrics (and artery and vein subdivision scores) were not significantly linked with the transfer factor for carbon monoxide (K CO ), and only weakly with DL CO . On multivariable linear regression analysis, normalized vessel volume and vessel heterogeneity were independently linked with DL CO , TLC and CPI indicating that they capture different aspects of lung damage. Artery–vein separation provided no additional information beyond that captured in the whole vasculature. Conclusion: Our study confirms previous observations of links between vessel volume and functional measures of disease severity in IPF using a new vessel quantitation tool. Additionally, the new tool shows independent linkages of normalized vessel volume and vessel heterogeneity with functional indices. Quantitative vessel metrics do not appear to reflect vasculopathic damage in IPF.},\n bibtype = {article},\n author = {Jacob, Joseph and Pienn, Michael and Payer, Christian and Urschler, Martin and Kokosi, Maria and Devaraj, Anand and Wells, Athol U. and Olschewski, Horst},\n doi = {10.1111/resp.13485},\n journal = {Respirology},\n number = {5}\n}
\n
\n\n\n
\n Background and objective: This study aimed to investigate whether quantitative lung vessel morphology determined by a new fully automated algorithm is associated with functional indices in idiopathic pulmonary fibrosis (IPF). Methods: A total of 152 IPF patients had vessel volume, density, tortuosity and heterogeneity quantified from computed tomography (CT) images by a fully automated algorithm. Separate quantitation of vessel metrics in pulmonary arteries and veins was performed in 106 patients. Results were evaluated against readouts from lung function tests. Results: Normalized vessel volume expressed as a percentage of total lung volume was moderately correlated with functional indices on univariable linear regression analysis: forced vital capacity (R 2 = 0.27, P < 1 × 10 −6 ), diffusion capacity for carbon monoxide (DL CO ; R 2 = 0.12, P = 3 × 10 −5 ), total lung capacity (TLC; R 2 = 0.45, P < 1 × 10 −6 ) and composite physiologic index (CPI; R 2 = 0.28, P < 1 × 10 −6 ). Normalized vessel volume was correlated with vessel density but not with vessel heterogeneity. Quantitatively derived vessel metrics (and artery and vein subdivision scores) were not significantly linked with the transfer factor for carbon monoxide (K CO ), and only weakly with DL CO . On multivariable linear regression analysis, normalized vessel volume and vessel heterogeneity were independently linked with DL CO , TLC and CPI indicating that they capture different aspects of lung damage. Artery–vein separation provided no additional information beyond that captured in the whole vasculature. Conclusion: Our study confirms previous observations of links between vessel volume and functional measures of disease severity in IPF using a new vessel quantitation tool. Additionally, the new tool shows independent linkages of normalized vessel volume and vessel heterogeneity with functional indices. Quantitative vessel metrics do not appear to reflect vasculopathic damage in IPF.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluation of algorithms for Multi-Modality Whole Heart Segmentation: An open-access grand challenge.\n \n \n \n \n\n\n \n Zhuang, X.; Li, L.; Payer, C.; Štern, D.; Urschler, M.; Heinrich, M., P.; Oster, J.; Wang, C.; Smedby, Ö.; Bian, C.; Yang, X.; Heng, P.; Mortazi, A.; Bagci, U.; Yang, G.; Sun, C.; Galisot, G.; Ramel, J.; Brouard, T.; Tong, Q.; Si, W.; Liao, X.; Zeng, G.; Shi, Z.; Zheng, G.; Wang, C.; MacGillivray, T.; Newby, D.; Rhode, K.; Ourselin, S.; Mohiaddin, R.; Keegan, J.; Firmin, D.; and Yang, G.\n\n\n \n\n\n\n Medical Image Analysis, 58: 101537. 12 2019.\n \n\n\n\n
\n\n\n\n \n \n \"EvaluationWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Evaluation of algorithms for Multi-Modality Whole Heart Segmentation: An open-access grand challenge},\n type = {article},\n year = {2019},\n keywords = {Benchmark,Challenge,Multi-modality,Whole Heart Segmentation},\n pages = {101537},\n volume = {58},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S1361841519300751},\n month = {12},\n id = {0fb729b1-17bf-3c0e-a654-dc2a3e7140ee},\n created = {2019-11-08T00:41:25.119Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:23.175Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Zhuang2019MIA},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Knowledge of whole heart anatomy is a prerequisite for many clinical applications. Whole heart segmentation (WHS), which delineates substructures of the heart, can be very valuable for modeling and analysis of the anatomy and functions of the heart. However, automating this segmentation can be challenging due to the large variation of the heart shape, and different image qualities of the clinical data. To achieve this goal, an initial set of training data is generally needed for constructing priors or for training. Furthermore, it is difficult to perform comparisons between different methods, largely due to differences in the datasets and evaluation metrics used. This manuscript presents the methodologies and evaluation results for the WHS algorithms selected from the submissions to the Multi-Modality Whole Heart Segmentation (MM-WHS) challenge, in conjunction with MICCAI 2017. The challenge provided 120 three-dimensional cardiac images covering the whole heart, including 60 CT and 60 MRI volumes, all acquired in clinical environments with manual delineation. Ten algorithms for CT data and eleven algorithms for MRI data, submitted from twelve groups, have been evaluated. The results showed that the performance of CT WHS was generally better than that of MRI WHS. The segmentation of the substructures for different categories of patients could present different levels of challenge due to the difference in imaging and variations of heart shapes. The deep learning (DL)-based methods demonstrated great potential, though several of them reported poor results in the blinded evaluation. Their performance could vary greatly across different network structures and training strategies. The conventional algorithms, mainly based on multi-atlas segmentation, demonstrated good performance, though the accuracy and computational efficiency could be limited. The challenge, including provision of the annotated training data and the blinded evaluation for submitted algorithms on the test data, continues as an ongoing benchmarking resource via its homepage (www.sdspeople.fudan.edu.cn/zhuangxiahai/0/mmwhs/).},\n bibtype = {article},\n author = {Zhuang, Xiahai and Li, Lei and Payer, Christian and Štern, Darko and Urschler, Martin and Heinrich, Mattias P. and Oster, Julien and Wang, Chunliang and Smedby, Örjan and Bian, Cheng and Yang, Xin and Heng, Pheng-Ann and Mortazi, Aliasghar and Bagci, Ulas and Yang, Guanyu and Sun, Chenchen and Galisot, Gaetan and Ramel, Jean-Yves and Brouard, Thierry and Tong, Qianqian and Si, Weixin and Liao, Xiangyun and Zeng, Guodong and Shi, Zenglin and Zheng, Guoyan and Wang, Chengjia and MacGillivray, Tom and Newby, David and Rhode, Kawal and Ourselin, Sebastien and Mohiaddin, Raad and Keegan, Jennifer and Firmin, David and Yang, Guang},\n doi = {10.1016/j.media.2019.101537},\n journal = {Medical Image Analysis}\n}
\n
\n\n\n
\n Knowledge of whole heart anatomy is a prerequisite for many clinical applications. Whole heart segmentation (WHS), which delineates substructures of the heart, can be very valuable for modeling and analysis of the anatomy and functions of the heart. However, automating this segmentation can be challenging due to the large variation of the heart shape, and different image qualities of the clinical data. To achieve this goal, an initial set of training data is generally needed for constructing priors or for training. Furthermore, it is difficult to perform comparisons between different methods, largely due to differences in the datasets and evaluation metrics used. This manuscript presents the methodologies and evaluation results for the WHS algorithms selected from the submissions to the Multi-Modality Whole Heart Segmentation (MM-WHS) challenge, in conjunction with MICCAI 2017. The challenge provided 120 three-dimensional cardiac images covering the whole heart, including 60 CT and 60 MRI volumes, all acquired in clinical environments with manual delineation. Ten algorithms for CT data and eleven algorithms for MRI data, submitted from twelve groups, have been evaluated. The results showed that the performance of CT WHS was generally better than that of MRI WHS. The segmentation of the substructures for different categories of patients could present different levels of challenge due to the difference in imaging and variations of heart shapes. The deep learning (DL)-based methods demonstrated great potential, though several of them reported poor results in the blinded evaluation. Their performance could vary greatly across different network structures and training strategies. The conventional algorithms, mainly based on multi-atlas segmentation, demonstrated good performance, though the accuracy and computational efficiency could be limited. The challenge, including provision of the annotated training data and the blinded evaluation for submitted algorithms on the test data, continues as an ongoing benchmarking resource via its homepage (www.sdspeople.fudan.edu.cn/zhuangxiahai/0/mmwhs/).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated age estimation from MRI volumes of the hand.\n \n \n \n \n\n\n \n Štern, D.; Payer, C.; and Urschler, M.\n\n\n \n\n\n\n Medical Image Analysis, 58: 101538. 12 2019.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Automated age estimation from MRI volumes of the hand},\n type = {article},\n year = {2019},\n pages = {101538},\n volume = {58},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S1361841518304791},\n month = {12},\n id = {0cc5043f-6d7c-3649-902d-295efa3935a9},\n created = {2019-11-08T00:41:50.204Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:22.486Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Stern2019MIA},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Highly relevant for both clinical and legal medicine applications, the established radiological methods for estimating unknown age in children and adolescents are based on visual examination of bone ossification in X-ray images of the hand. Our group has initiated the development of fully automatic age estimation methods from 3D MRI scans of the hand, in order to simultaneously overcome the problems of the radiological methods including (1) exposure to ionizing radiation, (2) necessity to define new, MRI specific staging systems, and (3) subjective influence of the examiner. The present work provides a theoretical background for understanding the nonlinear regression problem of biological age estimation and chronological age approximation. Based on this theoretical background, we comprehensively evaluate machine learning methods (random forests, deep convolutional neural networks) with different simplifications of the image information used as an input for learning. Trained on a large dataset of 328 MR images, we compare the performance of the different input strategies and demonstrate unprecedented results. For estimating biological age, we obtain a mean absolute error of 0.37 ± 0.51 years for the age range of the subjects ≤ 18 years, i.e. where bone ossification has not yet saturated. Finally, we validate our findings by adapting our best performing method to 2D images and applying it to a publicly available dataset of X-ray images, showing that we are in line with the state-of-the-art automatic methods for this task.},\n bibtype = {article},\n author = {Štern, Darko and Payer, Christian and Urschler, Martin},\n doi = {10.1016/j.media.2019.101538},\n journal = {Medical Image Analysis}\n}
\n
\n\n\n
\n Highly relevant for both clinical and legal medicine applications, the established radiological methods for estimating unknown age in children and adolescents are based on visual examination of bone ossification in X-ray images of the hand. Our group has initiated the development of fully automatic age estimation methods from 3D MRI scans of the hand, in order to simultaneously overcome the problems of the radiological methods including (1) exposure to ionizing radiation, (2) necessity to define new, MRI specific staging systems, and (3) subjective influence of the examiner. The present work provides a theoretical background for understanding the nonlinear regression problem of biological age estimation and chronological age approximation. Based on this theoretical background, we comprehensively evaluate machine learning methods (random forests, deep convolutional neural networks) with different simplifications of the image information used as an input for learning. Trained on a large dataset of 328 MR images, we compare the performance of the different input strategies and demonstrate unprecedented results. For estimating biological age, we obtain a mean absolute error of 0.37 ± 0.51 years for the age range of the subjects ≤ 18 years, i.e. where bone ossification has not yet saturated. Finally, we validate our findings by adapting our best performing method to 2D images and applying it to a publicly available dataset of X-ray images, showing that we are in line with the state-of-the-art automatic methods for this task.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Segmenting and tracking cell instances with cosine embeddings and recurrent hourglass networks.\n \n \n \n \n\n\n \n Payer, C.; Štern, D.; Feiner, M.; Bischof, H.; and Urschler, M.\n\n\n \n\n\n\n Medical Image Analysis, 57: 106-119. 10 2019.\n \n\n\n\n
\n\n\n\n \n \n \"SegmentingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Segmenting and tracking cell instances with cosine embeddings and recurrent hourglass networks},\n type = {article},\n year = {2019},\n keywords = {Cell,Embeddings,Instances,Recurrent,Segmentation,Tracking,Video},\n pages = {106-119},\n volume = {57},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S136184151930057X},\n month = {10},\n day = {6},\n id = {9d6bc750-83cd-3195-ab0b-b20dc8ee4c7d},\n created = {2019-11-08T00:42:18.269Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2020-01-29T22:08:21.763Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Payer2019a},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Differently to semantic segmentation, instance segmentation assigns unique labels to each individual instance of the same object class. In this work, we propose a novel recurrent fully convolutional network architecture for tracking such instance segmentations over time, which is highly relevant, e.g., in biomedical applications involving cell growth and migration. Our network architecture incorporates convolutional gated recurrent units (ConvGRU) into a stacked hourglass network to utilize temporal information, e.g., from microscopy videos. Moreover, we train our network with a novel embedding loss based on cosine similarities, such that the network predicts unique embeddings for every instance throughout videos, even in the presence of dynamic structural changes due to mitosis of cells. To create the final tracked instance segmentations, the pixel-wise embeddings are clustered among subsequent video frames by using the mean shift algorithm. After showing the performance of the instance segmentation on a static in-house dataset of muscle fibers from H&E-stained microscopy images, we also evaluate our proposed recurrent stacked hourglass network regarding instance segmentation and tracking performance on six datasets from the ISBI celltracking challenge, where it delivers state-of-the-art results.},\n bibtype = {article},\n author = {Payer, Christian and Štern, Darko and Feiner, Marlies and Bischof, Horst and Urschler, Martin},\n doi = {10.1016/j.media.2019.06.015},\n journal = {Medical Image Analysis}\n}
\n
\n\n\n
\n Differently to semantic segmentation, instance segmentation assigns unique labels to each individual instance of the same object class. In this work, we propose a novel recurrent fully convolutional network architecture for tracking such instance segmentations over time, which is highly relevant, e.g., in biomedical applications involving cell growth and migration. Our network architecture incorporates convolutional gated recurrent units (ConvGRU) into a stacked hourglass network to utilize temporal information, e.g., from microscopy videos. Moreover, we train our network with a novel embedding loss based on cosine similarities, such that the network predicts unique embeddings for every instance throughout videos, even in the presence of dynamic structural changes due to mitosis of cells. To create the final tracked instance segmentations, the pixel-wise embeddings are clustered among subsequent video frames by using the mean shift algorithm. After showing the performance of the instance segmentation on a static in-house dataset of muscle fibers from H&E-stained microscopy images, we also evaluate our proposed recurrent stacked hourglass network regarding instance segmentation and tracking performance on six datasets from the ISBI celltracking challenge, where it delivers state-of-the-art results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Integrating spatial configuration into heatmap regression based CNNs for landmark localization.\n \n \n \n \n\n\n \n Payer, C.; Štern, D.; Bischof, H.; and Urschler, M.\n\n\n \n\n\n\n Medical Image Analysis, 54: 207-219. 5 2019.\n \n\n\n\n
\n\n\n\n \n \n \"IntegratingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Integrating spatial configuration into heatmap regression based CNNs for landmark localization},\n type = {article},\n year = {2019},\n keywords = {Anatomical landmarks,Fully convolutional networks,Heatmap regression,Localization},\n pages = {207-219},\n volume = {54},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S1361841518305784},\n month = {5},\n id = {af1f2af2-fd2c-346f-98fe-d2893121d281},\n created = {2019-11-08T00:43:16.467Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:21.158Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Payer2019MIAHeatmap},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {In many medical image analysis applications, only a limited amount of training data is available due to the costs of image acquisition and the large manual annotation effort required from experts. Training recent state-of-the-art machine learning methods like convolutional neural networks (CNNs) from small datasets is a challenging task. In this work on anatomical landmark localization, we propose a CNN architecture that learns to split the localization task into two simpler sub-problems, reducing the overall need for large training datasets. Our fully convolutional SpatialConfiguration-Net (SCN) learns this simplification due to multiplying the heatmap predictions of its two components and by training the network in an end-to-end manner. Thus, the SCN dedicates one component to locally accurate but ambiguous candidate predictions, while the other component improves robustness to ambiguities by incorporating the spatial configuration of landmarks. In our extensive experimental evaluation, we show that the proposed SCN outperforms related methods in terms of landmark localization error on a variety of size-limited 2D and 3D landmark localization datasets, i.e., hand radiographs, lateral cephalograms, hand MRIs, and spine CTs.},\n bibtype = {article},\n author = {Payer, Christian and Štern, Darko and Bischof, Horst and Urschler, Martin},\n doi = {10.1016/j.media.2019.03.007},\n journal = {Medical Image Analysis}\n}
\n
\n\n\n
\n In many medical image analysis applications, only a limited amount of training data is available due to the costs of image acquisition and the large manual annotation effort required from experts. Training recent state-of-the-art machine learning methods like convolutional neural networks (CNNs) from small datasets is a challenging task. In this work on anatomical landmark localization, we propose a CNN architecture that learns to split the localization task into two simpler sub-problems, reducing the overall need for large training datasets. Our fully convolutional SpatialConfiguration-Net (SCN) learns this simplification due to multiplying the heatmap predictions of its two components and by training the network in an end-to-end manner. Thus, the SCN dedicates one component to locally accurate but ambiguous candidate predictions, while the other component improves robustness to ambiguities by incorporating the spatial configuration of landmarks. In our extensive experimental evaluation, we show that the proposed SCN outperforms related methods in terms of landmark localization error on a variety of size-limited 2D and 3D landmark localization datasets, i.e., hand radiographs, lateral cephalograms, hand MRIs, and spine CTs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Matwo-CapsNet: A Multi-label Semantic Segmentation Capsules Network.\n \n \n \n \n\n\n \n Bonheur, S.; Štern, D.; Payer, C.; Pienn, M.; Olschewski, H.; and Urschler, M.\n\n\n \n\n\n\n In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 664-672, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Matwo-CapsNet:Website\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Matwo-CapsNet: A Multi-label Semantic Segmentation Capsules Network},\n type = {inproceedings},\n year = {2019},\n keywords = {capsules network,chest X-ray,convolutional neural network,multi-label,semantic segmentation},\n pages = {664-672},\n websites = {http://link.springer.com/10.1007/978-3-030-32254-0_74},\n id = {99903a88-e42a-35b4-b6ec-4dc9033abd6c},\n created = {2019-11-08T01:47:27.269Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2020-01-29T22:08:21.777Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Bonheur2019},\n private_publication = {false},\n abstract = {Despite some design limitations, CNNs have been largely adopted by the computer vision community due to their efficacy and versatility. Introduced by Sabour et al. to circumvent some limitations of CNNs, capsules replace scalars with vectors to encode appearance feature representation, allowing better preservation of spatial relationships between whole objects and its parts. They also introduced the dynamic routing mechanism, which allows to weight the contributions of parts to a whole object differently at each inference step. Recently, Hinton et al. have proposed to solely encode pose information to model such part-whole relationships. Additionally, they used a matrix instead of a vector encoding in the capsules framework. In this work, we introduce several improvements to the capsules framework, allowing it to be applied for multi-label semantic segmentation. More specifically, we combine pose and appearance information encoded as matrices into a new type of capsule, i.e. Matwo-Caps. Additionally, we propose a novel routing mechanism, i.e. Dual Routing, which effectively combines these two kinds of information. We evaluate our resulting Matwo-CapsNet on the JSRT chest X-ray dataset by comparing it to SegCaps, a capsule based network for binary segmentation, as well as to other CNN based state-of-the-art segmentation methods, where we show that our Matwo-CapsNet achieves competitive results, while requiring only a fraction of the parameters of other previously proposed methods.},\n bibtype = {inproceedings},\n author = {Bonheur, Savinien and Štern, Darko and Payer, Christian and Pienn, Michael and Olschewski, Horst and Urschler, Martin},\n doi = {10.1007/978-3-030-32254-0_74},\n booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Despite some design limitations, CNNs have been largely adopted by the computer vision community due to their efficacy and versatility. Introduced by Sabour et al. to circumvent some limitations of CNNs, capsules replace scalars with vectors to encode appearance feature representation, allowing better preservation of spatial relationships between whole objects and its parts. They also introduced the dynamic routing mechanism, which allows to weight the contributions of parts to a whole object differently at each inference step. Recently, Hinton et al. have proposed to solely encode pose information to model such part-whole relationships. Additionally, they used a matrix instead of a vector encoding in the capsules framework. In this work, we introduce several improvements to the capsules framework, allowing it to be applied for multi-label semantic segmentation. More specifically, we combine pose and appearance information encoded as matrices into a new type of capsule, i.e. Matwo-Caps. Additionally, we propose a novel routing mechanism, i.e. Dual Routing, which effectively combines these two kinds of information. We evaluate our resulting Matwo-CapsNet on the JSRT chest X-ray dataset by comparing it to SegCaps, a capsule based network for binary segmentation, as well as to other CNN based state-of-the-art segmentation methods, where we show that our Matwo-CapsNet achieves competitive results, while requiring only a fraction of the parameters of other previously proposed methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Evaluating Spatial Configuration Constrained CNNs for Localizing Facial and Body Pose Landmarks.\n \n \n \n\n\n \n Payer, C.; Štern, D.; and Urschler, M.\n\n\n \n\n\n\n In International Conference Image and Vision Computing New Zealand, 2019. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Evaluating Spatial Configuration Constrained CNNs for Localizing Facial and Body Pose Landmarks},\n type = {inproceedings},\n year = {2019},\n keywords = {anatomical landmark localization,convolutional neural network,facial image analysis,human pose estimation},\n id = {f534e6f4-f6e8-33f1-9904-1887869bd3e2},\n created = {2020-06-20T01:37:12.781Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2020-06-20T01:37:12.781Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Payer2019b},\n private_publication = {false},\n abstract = {Landmark localization is a widely used task required in medical image analysis and computer vision applications. Formulated in a heatmap regression framework, we have recently proposed a CNN architecture that learns on its own to split the localization task into two simpler sub-problems, dedicating one component to locally accurate but ambiguous predictions, while the other component improves robustness by incorporating the spatial configuration of landmarks to remove ambiguities. We learn this simplification in our SpatialConfiguration-Net (SCN) by multiplying the heatmap predictions of its two components and by training the network in and end-to-end manner, thus achieving regularization similar to e.g. a hand-crafted Markov Random Field model. While we have previously shown localization results solely on data from 2D and 3D medical imaging modalities, in this work our aim is to study the generalization capabilities of our SpatialConfiguration-Net to computer vision problems. Therefore, we evaluate our performance both in terms of accuracy and robustness on a facial alignment task, where we improve upon the state-of-the-art methods, as well as on a human body pose estimation task, where we demonstrate results in line with the recent state-of-the-art.},\n bibtype = {inproceedings},\n author = {Payer, Christian and Štern, Darko and Urschler, Martin},\n doi = {10.1109/IVCNZ48456.2019.8961000},\n booktitle = {International Conference Image and Vision Computing New Zealand}\n}
\n
\n\n\n
\n Landmark localization is a widely used task required in medical image analysis and computer vision applications. Formulated in a heatmap regression framework, we have recently proposed a CNN architecture that learns on its own to split the localization task into two simpler sub-problems, dedicating one component to locally accurate but ambiguous predictions, while the other component improves robustness by incorporating the spatial configuration of landmarks to remove ambiguities. We learn this simplification in our SpatialConfiguration-Net (SCN) by multiplying the heatmap predictions of its two components and by training the network in and end-to-end manner, thus achieving regularization similar to e.g. a hand-crafted Markov Random Field model. While we have previously shown localization results solely on data from 2D and 3D medical imaging modalities, in this work our aim is to study the generalization capabilities of our SpatialConfiguration-Net to computer vision problems. Therefore, we evaluate our performance both in terms of accuracy and robustness on a facial alignment task, where we improve upon the state-of-the-art methods, as well as on a human body pose estimation task, where we demonstrate results in line with the recent state-of-the-art.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (11)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Integrating geometric configuration and appearance information into a unified framework for anatomical landmark localization.\n \n \n \n \n\n\n \n Urschler, M.; Ebner, T.; and Štern, D.\n\n\n \n\n\n\n Medical Image Analysis, 43(1): 23-36. 1 2018.\n \n\n\n\n
\n\n\n\n \n \n \"IntegratingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Integrating geometric configuration and appearance information into a unified framework for anatomical landmark localization},\n type = {article},\n year = {2018},\n keywords = {Anatomical landmarks,Coordinate descent,Localization,Random regression forest},\n pages = {23-36},\n volume = {43},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S1361841517301342},\n month = {1},\n id = {4aeab567-2e1b-384e-b7f0-f6b2c9abd7e2},\n created = {2017-10-02T20:18:33.832Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:27.346Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2018},\n notes = {blabla},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54,0639c1e6-48e1-4349-8e7c-deb53662bbda},\n private_publication = {false},\n abstract = {In approaches for automatic localization of multiple anatomical landmarks, disambiguation of locally similar structures as obtained by locally accurate candidate generation is often performed by solely including high level knowledge about geometric landmark configuration. In our novel localization approach, we propose to combine both image appearance information and geometric landmark configuration into a unified random forest framework integrated into an optimization procedure that iteratively refines joint landmark predictions by using the coordinate descent algorithm. Depending on how strong multiple landmarks are correlated in a specific localization task, this integration has the benefit that it remains flexible in deciding whether appearance information or the geometric configuration of multiple landmarks is the stronger cue for solving a localization problem both accurately and robustly. Furthermore, no preliminary choice on how to encode a graphical model describing landmark configuration has to be made. In an extensive evaluation on five challenging datasets involving different 2D and 3D imaging modalities, we show that our proposed method is widely applicable and delivers state-of-the-art results when compared to various other related methods.},\n bibtype = {article},\n author = {Urschler, Martin and Ebner, Thomas and Štern, Darko},\n doi = {10.1016/j.media.2017.09.003},\n journal = {Medical Image Analysis},\n number = {1}\n}
\n
\n\n\n
\n In approaches for automatic localization of multiple anatomical landmarks, disambiguation of locally similar structures as obtained by locally accurate candidate generation is often performed by solely including high level knowledge about geometric landmark configuration. In our novel localization approach, we propose to combine both image appearance information and geometric landmark configuration into a unified random forest framework integrated into an optimization procedure that iteratively refines joint landmark predictions by using the coordinate descent algorithm. Depending on how strong multiple landmarks are correlated in a specific localization task, this integration has the benefit that it remains flexible in deciding whether appearance information or the geometric configuration of multiple landmarks is the stronger cue for solving a localization problem both accurately and robustly. Furthermore, no preliminary choice on how to encode a graphical model describing landmark configuration has to be made. In an extensive evaluation on five challenging datasets involving different 2D and 3D imaging modalities, we show that our proposed method is widely applicable and delivers state-of-the-art results when compared to various other related methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-label Whole Heart Segmentation Using CNNs and Anatomical Label Configurations.\n \n \n \n \n\n\n \n Payer, C.; Štern, D.; Bischof, H.; and Urschler, M.\n\n\n \n\n\n\n Volume 10663 LNCS . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 190-198. Pop, M., editor(s). Springer, Cham, 2018.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2018},\n keywords = {Anatomical label configurations,Convolutional neural network,Heart,Multi-label,Segmentation},\n pages = {190-198},\n volume = {10663 LNCS},\n websites = {http://link.springer.com/10.1007/978-3-319-75541-0_20},\n publisher = {Springer, Cham},\n id = {d569cd62-b18c-3831-84c1-d4d373ce5e72},\n created = {2018-02-06T09:00:30.756Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:29.332Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Payer2018MMWHS},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {We propose a pipeline of two fully convolutional networks for automatic multi-label whole heart segmentation from CT and MRI volumes. At first, a convolutional neural network (CNN) localizes the center of the bounding box around all heart structures, such that the subsequent segmentation CNN can focus on this region. Trained in an end-to-end manner, the segmentation CNN transforms intermediate label predictions to positions of other labels. Thus, the network learns from the relative positions among labels and focuses on anatomically feasible configurations. Results on the MICCAI 2017 Multi-Modality Whole Heart Segmentation (MM-WHS) challenge show that the proposed architecture performs well on the provided CT and MRI training volumes, delivering in a three-fold cross validation an average Dice Similarity Coefficient over all heart substructures of 88.9% and 79.0%, respectively. Moreover, on the MM-WHS challenge test data we rank first for CT and second for MRI with a whole heart segmentation Dice score of 90.8% and 87%, respectively, leading to an overall first ranking among all participants.},\n bibtype = {inbook},\n author = {Payer, Christian and Štern, Darko and Bischof, Horst and Urschler, Martin},\n editor = {Pop, Mihaela},\n doi = {10.1007/978-3-319-75541-0_20},\n chapter = {Multi-label Whole Heart Segmentation Using CNNs and Anatomical Label Configurations},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n We propose a pipeline of two fully convolutional networks for automatic multi-label whole heart segmentation from CT and MRI volumes. At first, a convolutional neural network (CNN) localizes the center of the bounding box around all heart structures, such that the subsequent segmentation CNN can focus on this region. Trained in an end-to-end manner, the segmentation CNN transforms intermediate label predictions to positions of other labels. Thus, the network learns from the relative positions among labels and focuses on anatomically feasible configurations. Results on the MICCAI 2017 Multi-Modality Whole Heart Segmentation (MM-WHS) challenge show that the proposed architecture performs well on the provided CT and MRI training volumes, delivering in a three-fold cross validation an average Dice Similarity Coefficient over all heart substructures of 88.9% and 79.0%, respectively. Moreover, on the MM-WHS challenge test data we rank first for CT and second for MRI with a whole heart segmentation Dice score of 90.8% and 87%, respectively, leading to an overall first ranking among all participants.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pulmonary Lobe Segmentation in CT Images using Alpha-Expansion.\n \n \n \n \n\n\n \n Giuliani, N.; Payer, C.; Pienn, M.; Olschewski, H.; and Urschler, M.\n\n\n \n\n\n\n In Proceedings of the 13th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications, volume 4, pages 387-394, 2018. SCITEPRESS - Science and Technology Publications\n \n\n\n\n
\n\n\n\n \n \n \"PulmonaryWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Pulmonary Lobe Segmentation in CT Images using Alpha-Expansion},\n type = {inproceedings},\n year = {2018},\n keywords = {Alpha-expansion,Discrete optimization,Graph cuts,Lung lobe segmentation},\n pages = {387-394},\n volume = {4},\n websites = {http://www.scitepress.org/DigitalLibrary/Link.aspx?doi=10.5220/0006624103870394},\n publisher = {SCITEPRESS - Science and Technology Publications},\n city = {Funchal},\n id = {626686b9-263b-359a-b756-5c305bce1cb5},\n created = {2018-02-08T16:32:04.865Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:31.865Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Giuliani2018},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Fully-automatic lung lobe segmentation in pathological lungs is still a challenging task. A new approach for automatic lung lobe segmentation is presented based on airways, vessels, fissures and prior knowledge on lobar shape. The anatomical information and prior knowledge are combined into an energy equation, which is minimized via graph cuts to yield an optimal segmentation. The algorithm is quantitatively validated on an in-house dataset of 25 scans and on the LObe and Lung Analysis 2011 (LOLA11) dataset, which contains a range of different challenging lungs (total of 55) with respect to lobe segmentation. Both experiments achieved solid results including a median absolute distance from manually set fissure markers of 1.04mm (interquartile range: 0.88-1.09mm) on the in-house dataset and a score of 0.866 on the LOLA11 dataset. We conclude that our proposed method is robust even in case of pathologies.},\n bibtype = {inproceedings},\n author = {Giuliani, Nicola and Payer, Christian and Pienn, Michael and Olschewski, Horst and Urschler, Martin},\n doi = {10.5220/0006624103870394},\n booktitle = {Proceedings of the 13th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications}\n}
\n
\n\n\n
\n Fully-automatic lung lobe segmentation in pathological lungs is still a challenging task. A new approach for automatic lung lobe segmentation is presented based on airways, vessels, fissures and prior knowledge on lobar shape. The anatomical information and prior knowledge are combined into an energy equation, which is minimized via graph cuts to yield an optimal segmentation. The algorithm is quantitatively validated on an in-house dataset of 25 scans and on the LObe and Lung Analysis 2011 (LOLA11) dataset, which contains a range of different challenging lungs (total of 55) with respect to lobe segmentation. Both experiments achieved solid results including a median absolute distance from manually set fissure markers of 1.04mm (interquartile range: 0.88-1.09mm) on the in-house dataset and a score of 0.866 on the LOLA11 dataset. We conclude that our proposed method is robust even in case of pathologies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reducing acquisition time for MRI-based forensic age estimation.\n \n \n \n \n\n\n \n Neumayer, B.; Schloegl, M.; Payer, C.; Widek, T.; Tschauner, S.; Ehammer, T.; Stollberger, R.; and Urschler, M.\n\n\n \n\n\n\n Scientific Reports, 8(1): 2063. 12 2018.\n \n\n\n\n
\n\n\n\n \n \n \"ReducingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Reducing acquisition time for MRI-based forensic age estimation},\n type = {article},\n year = {2018},\n pages = {2063},\n volume = {8},\n websites = {https://dx.doi.org/10.1038/s41598-018-20475-1,http://www.nature.com/articles/s41598-018-20475-1},\n month = {12},\n day = {1},\n id = {d0f3a84b-5e7f-3a88-9e9d-27569c4eeb08},\n created = {2018-02-18T20:51:32.912Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:31.233Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Neumayer2018},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Radiology-based estimation of a living person's unknown age has recently attracted increasing attention due to large numbers of undocumented immigrants entering Europe. To avoid the application of X-ray-based imaging techniques, magnetic resonance imaging (MRI) has been suggested as an alternative imaging modality. Unfortunately, MRI requires prolonged acquisition times, which potentially represents an additional stressor for young refugees. To eliminate this shortcoming, we investigated the degree of reduction in acquisition time that still led to reliable age estimates. Two radiologists randomly assessed original images and two sets of retrospectively undersampled data of 15 volunteers (N = 45 data sets) applying an established radiological age estimation method to images of the hand and wrist. Additionally, a neural network-based age estimation method analyzed four sets of further undersampled images from the 15 volunteers (N = 105 data sets). Furthermore, we compared retrospectively undersampled and acquired undersampled data for three volunteers. To assess reliability with increasing degree of undersampling, intra-rater and inter-rater agreement were analyzed computing signed differences and intra-class correlation. While our findings have to be confirmed by a larger prospective study, the results from both radiological and automatic age estimation showed that reliable age estimation was still possible for acquisition times of 15 seconds.},\n bibtype = {article},\n author = {Neumayer, Bernhard and Schloegl, Matthias and Payer, Christian and Widek, Thomas and Tschauner, Sebastian and Ehammer, Thomas and Stollberger, Rudolf and Urschler, Martin},\n doi = {10.1038/s41598-018-20475-1},\n journal = {Scientific Reports},\n number = {1}\n}
\n
\n\n\n
\n Radiology-based estimation of a living person's unknown age has recently attracted increasing attention due to large numbers of undocumented immigrants entering Europe. To avoid the application of X-ray-based imaging techniques, magnetic resonance imaging (MRI) has been suggested as an alternative imaging modality. Unfortunately, MRI requires prolonged acquisition times, which potentially represents an additional stressor for young refugees. To eliminate this shortcoming, we investigated the degree of reduction in acquisition time that still led to reliable age estimates. Two radiologists randomly assessed original images and two sets of retrospectively undersampled data of 15 volunteers (N = 45 data sets) applying an established radiological age estimation method to images of the hand and wrist. Additionally, a neural network-based age estimation method analyzed four sets of further undersampled images from the 15 volunteers (N = 105 data sets). Furthermore, we compared retrospectively undersampled and acquired undersampled data for three volunteers. To assess reliability with increasing degree of undersampling, intra-rater and inter-rater agreement were analyzed computing signed differences and intra-class correlation. While our findings have to be confirmed by a larger prospective study, the results from both radiological and automatic age estimation showed that reliable age estimation was still possible for acquisition times of 15 seconds.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Altersschätzung im Strafverfahren?!.\n \n \n \n\n\n \n Pfeifer, M.; Urschler, M.; Kerbacher, S.; and Riener-Hofer, R.\n\n\n \n\n\n\n Journal für Strafrecht, 2018(2): 124-128. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Altersschätzung im Strafverfahren?!},\n type = {article},\n year = {2018},\n pages = {124-128},\n volume = {2018},\n id = {dc3d62cc-54ea-335f-baa1-be838489b520},\n created = {2018-02-22T08:05:49.362Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:28.687Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Pfeifer2018},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {article},\n author = {Pfeifer, Michael and Urschler, Martin and Kerbacher, Sophie and Riener-Hofer, Reingard},\n journal = {Journal für Strafrecht},\n number = {2}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Healthy Lung Vessel Morphology Derived From Thoracic Computed Tomography.\n \n \n \n \n\n\n \n Pienn, M.; Burgard, C.; Payer, C.; Avian, A.; Urschler, M.; Stollberger, R.; Olschewski, A.; Olschewski, H.; Johnson, T.; Meinel, F., G.; and Bálint, Z.\n\n\n \n\n\n\n Frontiers in Physiology, 9(APR): 346. 4 2018.\n \n\n\n\n
\n\n\n\n \n \n \"HealthyWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Healthy Lung Vessel Morphology Derived From Thoracic Computed Tomography},\n type = {article},\n year = {2018},\n keywords = {Artery/vein separation,Automated image analysis,Computed tomography,Healthy reference values,Morphology,Pulmonary circulation},\n pages = {346},\n volume = {9},\n websites = {http://journal.frontiersin.org/article/10.3389/fphys.2018.00346/full},\n month = {4},\n day = {10},\n id = {e665b2f7-3931-3c7d-bbdf-2410abd24f17},\n created = {2018-04-22T22:36:25.624Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:23.887Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Pienn2018},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Knowledge of the lung vessel morphology in healthy subjects is necessary to improve our understanding about the functional network of the lung and to recognize pathologic deviations beyond the normal inter-subject variation. Established values of normal lung morphology have been derived from necropsy material of only very few subjects. In order to determine morphologic readouts from a large number of healthy subjects, computed tomography pulmonary angiography (CTPA) datasets, negative for pulmonary embolism, and other thoracic pathologies, were analyzed using a fully-automatic, in-house developed artery/vein separation algorithm. The number, volume, and tortuosity of the vessels in a diameter range between 2 and 10 mm were determined. Visual inspection of all datasets was used to exclude subjects with poor image quality or inadequate artery/vein separation from the analysis. Validation of the algorithm was performed manually by a radiologist on randomly selected subjects. In 123 subjects (men/women: 55/68), aged 59 ± 17 years, the median overlap between visual inspection and fully-automatic segmentation was 94.6% (69.2-99.9%). The median number of vessel segments in the ranges of 8-10, 6-8, 4-6, and 2-4 mm diameter was 9, 34, 134, and 797, respectively. Number of vessel segments divided by the subject's lung volume was 206 vessels/L with arteries and veins contributing almost equally. In women this vessel density was about 15% higher than in men. Median arterial and venous volumes were 1.52 and 1.54% of the lung volume, respectively. Tortuosity was best described with the sum-of-angles metric and was 142.1 rad/m (138.3-144.5 rad/m). In conclusion, our fully-automatic artery/vein separation algorithm provided reliable measures of pulmonary arteries and veins with respect to age and gender. There was a large variation between subjects in all readouts. No relevant dependence on age, gender, or vessel type was observed. These data may provide reference values for morphometric analysis of lung vessels.},\n bibtype = {article},\n author = {Pienn, Michael and Burgard, Caroline and Payer, Christian and Avian, Alexander and Urschler, Martin and Stollberger, Rudolf and Olschewski, Andrea and Olschewski, Horst and Johnson, Thorsten and Meinel, Felix G. and Bálint, Zoltán},\n doi = {10.3389/fphys.2018.00346},\n journal = {Frontiers in Physiology},\n number = {APR}\n}
\n
\n\n\n
\n Knowledge of the lung vessel morphology in healthy subjects is necessary to improve our understanding about the functional network of the lung and to recognize pathologic deviations beyond the normal inter-subject variation. Established values of normal lung morphology have been derived from necropsy material of only very few subjects. In order to determine morphologic readouts from a large number of healthy subjects, computed tomography pulmonary angiography (CTPA) datasets, negative for pulmonary embolism, and other thoracic pathologies, were analyzed using a fully-automatic, in-house developed artery/vein separation algorithm. The number, volume, and tortuosity of the vessels in a diameter range between 2 and 10 mm were determined. Visual inspection of all datasets was used to exclude subjects with poor image quality or inadequate artery/vein separation from the analysis. Validation of the algorithm was performed manually by a radiologist on randomly selected subjects. In 123 subjects (men/women: 55/68), aged 59 ± 17 years, the median overlap between visual inspection and fully-automatic segmentation was 94.6% (69.2-99.9%). The median number of vessel segments in the ranges of 8-10, 6-8, 4-6, and 2-4 mm diameter was 9, 34, 134, and 797, respectively. Number of vessel segments divided by the subject's lung volume was 206 vessels/L with arteries and veins contributing almost equally. In women this vessel density was about 15% higher than in men. Median arterial and venous volumes were 1.52 and 1.54% of the lung volume, respectively. Tortuosity was best described with the sum-of-angles metric and was 142.1 rad/m (138.3-144.5 rad/m). In conclusion, our fully-automatic artery/vein separation algorithm provided reliable measures of pulmonary arteries and veins with respect to age and gender. There was a large variation between subjects in all readouts. No relevant dependence on age, gender, or vessel type was observed. These data may provide reference values for morphometric analysis of lung vessels.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Pulmonary lobe segmentation in CT images using alpha-expansion.\n \n \n \n\n\n \n Giuliani, N.; Payer, C.; Pienn, M.; Olschewski, H.; and Urschler, M.\n\n\n \n\n\n\n In VISIGRAPP 2018 - Proceedings of the 13th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications, volume 4, pages 387-394, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Pulmonary lobe segmentation in CT images using alpha-expansion},\n type = {inproceedings},\n year = {2018},\n keywords = {Alpha-expansion,Discrete optimization,Graph cuts,Lung lobe segmentation},\n pages = {387-394},\n volume = {4},\n id = {5a21d8e9-0429-3a87-aa12-5f1597d553ed},\n created = {2018-06-10T01:37:45.608Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:43:59.111Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Giuliani2018a},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Fully-automatic lung lobe segmentation in pathological lungs is still a challenging task. A new approach for automatic lung lobe segmentation is presented based on airways, vessels, fissures and prior knowledge on lobar shape. The anatomical information and prior knowledge are combined into an energy equation, which is minimized via graph cuts to yield an optimal segmentation. The algorithm is quantitatively validated on an in-house dataset of 25 scans and on the LObe and Lung Analysis 2011 (LOLA11) dataset, which contains a range of different challenging lungs (total of 55) with respect to lobe segmentation. Both experiments achieved solid results including a median absolute distance from manually set fissure markers of 1.04mm (interquartile range: 0.88-1.09mm) on the in-house dataset and a score of 0.866 on the LOLA11 dataset. We conclude that our proposed method is robust even in case of pathologies.},\n bibtype = {inproceedings},\n author = {Giuliani, Nicola and Payer, Christian and Pienn, Michael and Olschewski, Horst and Urschler, Martin},\n doi = {10.5220/0006624103870394},\n booktitle = {VISIGRAPP 2018 - Proceedings of the 13th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications}\n}
\n
\n\n\n
\n Fully-automatic lung lobe segmentation in pathological lungs is still a challenging task. A new approach for automatic lung lobe segmentation is presented based on airways, vessels, fissures and prior knowledge on lobar shape. The anatomical information and prior knowledge are combined into an energy equation, which is minimized via graph cuts to yield an optimal segmentation. The algorithm is quantitatively validated on an in-house dataset of 25 scans and on the LObe and Lung Analysis 2011 (LOLA11) dataset, which contains a range of different challenging lungs (total of 55) with respect to lobe segmentation. Both experiments achieved solid results including a median absolute distance from manually set fissure markers of 1.04mm (interquartile range: 0.88-1.09mm) on the in-house dataset and a score of 0.866 on the LOLA11 dataset. We conclude that our proposed method is robust even in case of pathologies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Integrated computer-aided forensic case analysis, presentation, and documentation based on multimodal 3D data.\n \n \n \n \n\n\n \n Bornik, A.; Urschler, M.; Schmalstieg, D.; Bischof, H.; Krauskopf, A.; Schwark, T.; Scheurer, E.; and Yen, K.\n\n\n \n\n\n\n Forensic Science International, 287: 12-24. 6 2018.\n \n\n\n\n
\n\n\n\n \n \n \"IntegratedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Integrated computer-aided forensic case analysis, presentation, and documentation based on multimodal 3D data},\n type = {article},\n year = {2018},\n keywords = {3D visualization,Case illustration,Forensic case analysis,Forensic imaging,Forensigraphy,Software tool},\n pages = {12-24},\n volume = {287},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S0379073818301282},\n month = {6},\n id = {8e89c3b4-634e-3e6a-9520-5c8b8727f9c6},\n created = {2018-09-04T04:52:28.894Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:25.297Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Bornik2018},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Three-dimensional (3D) crime scene documentation using 3D scanners and medical imaging modalities like computed tomography (CT) and magnetic resonance imaging (MRI) are increasingly applied in forensic casework. Together with digital photography, these modalities enable comprehensive and non-invasive recording of forensically relevant information regarding injuries/pathologies inside the body and on its surface. Furthermore, it is possible to capture traces and items at crime scenes. Such digitally secured evidence has the potential to similarly increase case understanding by forensic experts and non-experts in court. Unlike photographs and 3D surface models, images from CT and MRI are not self-explanatory. Their interpretation and understanding requires radiological knowledge. Findings in tomography data must not only be revealed, but should also be jointly studied with all the 2D and 3D data available in order to clarify spatial interrelations and to optimally exploit the data at hand. This is technically challenging due to the heterogeneous data representations including volumetric data, polygonal 3D models, and images. This paper presents a novel computer-aided forensic toolbox providing tools to support the analysis, documentation, annotation, and illustration of forensic cases using heterogeneous digital data. Conjoint visualization of data from different modalities in their native form and efficient tools to visually extract and emphasize findings help experts to reveal unrecognized correlations and thereby enhance their case understanding. Moreover, the 3D case illustrations created for case analysis represent an efficient means to convey the insights gained from case analysis to forensic non-experts involved in court proceedings like jurists and laymen. The capability of the presented approach in the context of case analysis, its potential to speed up legal procedures and to ultimately enhance legal certainty is demonstrated by introducing a number of representative forensic cases.},\n bibtype = {article},\n author = {Bornik, Alexander and Urschler, Martin and Schmalstieg, Dieter and Bischof, Horst and Krauskopf, Astrid and Schwark, Thorsten and Scheurer, Eva and Yen, Kathrin},\n doi = {10.1016/j.forsciint.2018.03.031},\n journal = {Forensic Science International}\n}
\n
\n\n\n
\n Three-dimensional (3D) crime scene documentation using 3D scanners and medical imaging modalities like computed tomography (CT) and magnetic resonance imaging (MRI) are increasingly applied in forensic casework. Together with digital photography, these modalities enable comprehensive and non-invasive recording of forensically relevant information regarding injuries/pathologies inside the body and on its surface. Furthermore, it is possible to capture traces and items at crime scenes. Such digitally secured evidence has the potential to similarly increase case understanding by forensic experts and non-experts in court. Unlike photographs and 3D surface models, images from CT and MRI are not self-explanatory. Their interpretation and understanding requires radiological knowledge. Findings in tomography data must not only be revealed, but should also be jointly studied with all the 2D and 3D data available in order to clarify spatial interrelations and to optimally exploit the data at hand. This is technically challenging due to the heterogeneous data representations including volumetric data, polygonal 3D models, and images. This paper presents a novel computer-aided forensic toolbox providing tools to support the analysis, documentation, annotation, and illustration of forensic cases using heterogeneous digital data. Conjoint visualization of data from different modalities in their native form and efficient tools to visually extract and emphasize findings help experts to reveal unrecognized correlations and thereby enhance their case understanding. Moreover, the 3D case illustrations created for case analysis represent an efficient means to convey the insights gained from case analysis to forensic non-experts involved in court proceedings like jurists and laymen. The capability of the presented approach in the context of case analysis, its potential to speed up legal procedures and to ultimately enhance legal certainty is demonstrated by introducing a number of representative forensic cases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Instance Segmentation and Tracking with Cosine Embeddings and Recurrent Hourglass Networks.\n \n \n \n \n\n\n \n Payer, C.; Štern, D.; Neff, T.; Bischof, H.; and Urschler, M.\n\n\n \n\n\n\n Volume 11071 LNCS . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 3-11. Springer, Cham, 2018.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2018},\n keywords = {Cell,Embeddings,Instances,Recurrent,Segmentation,Tracking,Video},\n pages = {3-11},\n volume = {11071 LNCS},\n websites = {http://link.springer.com/10.1007/978-3-030-00934-2_1},\n publisher = {Springer, Cham},\n id = {44d519ed-bcbc-3af5-b22b-4b8120b061e6},\n created = {2018-09-04T04:52:28.895Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:28.036Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Payer2018MICCAI},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Different to semantic segmentation, instance segmentation assigns unique labels to each individual instance of the same class. In this work, we propose a novel recurrent fully convolutional network architecture for tracking such instance segmentations over time. The network architecture incorporates convolutional gated recurrent units (ConvGRU) into a stacked hourglass network to utilize temporal video information. Furthermore, we train the network with a novel embedding loss based on cosine similarities, such that the network predicts unique embeddings for every instance throughout videos. Afterwards, these embeddings are clustered among subsequent video frames to create the final tracked instance segmentations. We evaluate the recurrent hourglass network by segmenting left ventricles in MR videos of the heart, where it outperforms a network that does not incorporate video information. Furthermore, we show applicability of the cosine embedding loss for segmenting leaf instances on still images of plants. Finally, we evaluate the framework for instance segmentation and tracking on six datasets of the ISBI celltracking challenge, where it shows state-of-the-art performance.},\n bibtype = {inbook},\n author = {Payer, Christian and Štern, Darko and Neff, Thomas and Bischof, Horst and Urschler, Martin},\n doi = {10.1007/978-3-030-00934-2_1},\n chapter = {Instance Segmentation and Tracking with Cosine Embeddings and Recurrent Hourglass Networks},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Different to semantic segmentation, instance segmentation assigns unique labels to each individual instance of the same class. In this work, we propose a novel recurrent fully convolutional network architecture for tracking such instance segmentations over time. The network architecture incorporates convolutional gated recurrent units (ConvGRU) into a stacked hourglass network to utilize temporal video information. Furthermore, we train the network with a novel embedding loss based on cosine similarities, such that the network predicts unique embeddings for every instance throughout videos. Afterwards, these embeddings are clustered among subsequent video frames to create the final tracked instance segmentations. We evaluate the recurrent hourglass network by segmenting left ventricles in MR videos of the heart, where it outperforms a network that does not incorporate video information. Furthermore, we show applicability of the cosine embedding loss for segmenting leaf instances on still images of plants. Finally, we evaluate the framework for instance segmentation and tracking on six datasets of the ISBI celltracking challenge, where it shows state-of-the-art performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse-View CT Reconstruction Using Wasserstein GANs.\n \n \n \n \n\n\n \n Thaler, F.; Hammernik, K.; Payer, C.; Urschler, M.; and Štern, D.\n\n\n \n\n\n\n Volume 11074 LNCS . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 75-82. Springer, Cham, 2018.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2018},\n keywords = {Computed tomography,Convolutional neural networks,Generative adversarial networks,L1 loss,Sparse-view reconstruction},\n pages = {75-82},\n volume = {11074 LNCS},\n websites = {http://link.springer.com/10.1007/978-3-030-00129-2_9},\n publisher = {Springer, Cham},\n id = {3cd6d8f8-50ac-3844-b01a-b5b58cd3195f},\n created = {2018-09-04T04:52:29.679Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:25.928Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Thaler2018},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {We propose a 2D computed tomography (CT) slice image reconstruction method from a limited number of projection images using Wasserstein generative adversarial networks (wGAN). Our wGAN optimizes the 2D CT image reconstruction by utilizing an adversarial loss to improve the perceived image quality as well as an L1 content loss to enforce structural similarity to the target image. We evaluate our wGANs using different weight factors between the two loss functions and compare to a convolutional neural network (CNN) optimized on L1 and the Filtered Backprojection (FBP) method. The evaluation shows that the results generated by the machine learning based approaches are substantially better than those from the FBP method. In contrast to the blurrier looking images generated by the CNNs trained on L1, the wGANs results appear sharper and seem to contain more structural information. We show that a certain amount of projection data is needed to get a correct representation of the anatomical correspondences.},\n bibtype = {inbook},\n author = {Thaler, Franz and Hammernik, Kerstin and Payer, Christian and Urschler, Martin and Štern, Darko},\n doi = {10.1007/978-3-030-00129-2_9},\n chapter = {Sparse-View CT Reconstruction Using Wasserstein GANs},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n We propose a 2D computed tomography (CT) slice image reconstruction method from a limited number of projection images using Wasserstein generative adversarial networks (wGAN). Our wGAN optimizes the 2D CT image reconstruction by utilizing an adversarial loss to improve the perceived image quality as well as an L1 content loss to enforce structural similarity to the target image. We evaluate our wGANs using different weight factors between the two loss functions and compare to a convolutional neural network (CNN) optimized on L1 and the Filtered Backprojection (FBP) method. The evaluation shows that the results generated by the machine learning based approaches are substantially better than those from the FBP method. In contrast to the blurrier looking images generated by the CNNs trained on L1, the wGANs results appear sharper and seem to contain more structural information. We show that a certain amount of projection data is needed to get a correct representation of the anatomical correspondences.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Generative Adversarial Networks to Synthetically Augment Data for Deep Learning based Image Segmentation.\n \n \n \n\n\n \n Neff, T.; Payer, C.; Štern, D.; and Urschler, M.\n\n\n \n\n\n\n In Proceedings of the OAGM Workshop 2018: Medical Image Analysis, Hall/Tyrol, Austria, pages 22-29, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Generative Adversarial Networks to Synthetically Augment Data for Deep Learning based Image Segmentation},\n type = {inproceedings},\n year = {2018},\n pages = {22-29},\n id = {e0d3f062-b87e-3f45-9924-f20342bfe804},\n created = {2018-09-04T04:52:29.681Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:29.964Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Neff2018},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {In recent years, deep learning based methods achieved state-of-the-art performance in many computer vision tasks. However, these methods are typically supervised, and require large amounts of annotated data to train. Acquisition of annotated data can be a costly endeavor, especially for methods requiring pixel-wise annotations such as image segmentation. To circumvent these costs and train on smaller datasets, data augmentation is commonly used to synthetically generate additional training data. A major downside of standard data augmentation methods is that they require knowledge of the underlying task in order to perform well, and introduce additional hyperparameters into the deep learning setup. With the goal to alleviate these issues, we evaluate a data augmentation strategy utilizing Generative Adversarial Networks (GANs). While GANs have shown potential for image synthesis when trained on large datasets, their potential given small, annotated datasets (as is common in e.g. medical image analysis) has not been analyzed in much detail yet. We want to evaluate if GAN-based data augmentation using state-of-the-art methods, such as the Wasserstein GAN with gradient penalty, is a viable strategy for small datasets. We extensively evaluate our method on two image segmentation tasks: medical image segmentation of the left lung of the SCR Lung Database and semantic segmentation of the Cityscapes dataset. For the medical segmentation task, we show that our GAN-based augmentation performs as well as standard data augmentation, and training on purely synthetic data outperforms previously reported results. For the more challenging Cityscapes evaluation, we report that our GAN-based augmentation scheme is competitive with standard data augmentation methods.},\n bibtype = {inproceedings},\n author = {Neff, Thomas and Payer, Christian and Štern, Darko and Urschler, Martin},\n doi = {10.3217/978-3-85125-603-1-07},\n booktitle = {Proceedings of the OAGM Workshop 2018: Medical Image Analysis, Hall/Tyrol, Austria}\n}
\n
\n\n\n
\n In recent years, deep learning based methods achieved state-of-the-art performance in many computer vision tasks. However, these methods are typically supervised, and require large amounts of annotated data to train. Acquisition of annotated data can be a costly endeavor, especially for methods requiring pixel-wise annotations such as image segmentation. To circumvent these costs and train on smaller datasets, data augmentation is commonly used to synthetically generate additional training data. A major downside of standard data augmentation methods is that they require knowledge of the underlying task in order to perform well, and introduce additional hyperparameters into the deep learning setup. With the goal to alleviate these issues, we evaluate a data augmentation strategy utilizing Generative Adversarial Networks (GANs). While GANs have shown potential for image synthesis when trained on large datasets, their potential given small, annotated datasets (as is common in e.g. medical image analysis) has not been analyzed in much detail yet. We want to evaluate if GAN-based data augmentation using state-of-the-art methods, such as the Wasserstein GAN with gradient penalty, is a viable strategy for small datasets. We extensively evaluate our method on two image segmentation tasks: medical image segmentation of the left lung of the SCR Lung Database and semantic segmentation of the Cityscapes dataset. For the medical segmentation task, we show that our GAN-based augmentation performs as well as standard data augmentation, and training on purely synthetic data outperforms previously reported results. For the more challenging Cityscapes evaluation, we report that our GAN-based augmentation scheme is competitive with standard data augmentation methods.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Forensische Altersdiagnostik mit Fokus auf den Lebenden.\n \n \n \n\n\n \n Urschler, M.; Pfeifer, M.; Štern, D.; and Widek, T.\n\n\n \n\n\n\n Forensigraphie - Möglichkeiten und Grenzen IT-gestützter klinisch-forensischer Bildgebung, pages 189-221. Bergauer, C.; Riener-Hofer, R.; Schwark, T.; and Staudegger, E., editor(s). Jan Sramek Verlag Wien, 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2017},\n pages = {189-221},\n publisher = {Jan Sramek Verlag Wien},\n id = {6e66fd3e-6bce-3f9a-8da4-a678e61c33d5},\n created = {2017-09-02T14:42:29.856Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:34.422Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2017},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {inbook},\n author = {Urschler, Martin and Pfeifer, Michael and Štern, Darko and Widek, Thomas},\n editor = {Bergauer, C and Riener-Hofer, R and Schwark, T and Staudegger, E},\n chapter = {Forensische Altersdiagnostik mit Fokus auf den Lebenden},\n title = {Forensigraphie - Möglichkeiten und Grenzen IT-gestützter klinisch-forensischer Bildgebung}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Segmentation and classification of colon glands with deep convolutional neural networks and total variation regularization.\n \n \n \n \n\n\n \n Kainz, P.; Pfeiffer, M.; and Urschler, M.\n\n\n \n\n\n\n PeerJ, 5(10): e3874. 10 2017.\n \n\n\n\n
\n\n\n\n \n \n \"SegmentationWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Segmentation and classification of colon glands with deep convolutional neural networks and total variation regularization},\n type = {article},\n year = {2017},\n keywords = {Colon glands,Deep learning,Malignancy classification,Segmentation},\n pages = {e3874},\n volume = {5},\n websites = {https://peerj.com/articles/3874},\n month = {10},\n day = {3},\n id = {4c98f83d-d48c-3052-8b1f-3338732b9fbf},\n created = {2017-10-18T10:21:01.742Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:35.678Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Kainz2017},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Segmentation of histopathology sections is a necessary preprocessing step for digital pathology. Due to the large variability of biological tissue, machine learning techniques have shown superior performance over conventional image processing methods. Here we present our deep neural network-based approach for segmentation and classification of glands in tissue of benign and malignant colorectal cancer, which was developed to participate in the GlaS@MICCAI2015 colon gland segmentation challenge. We use two distinct deep convolutional neural networks (CNN) for pixel-wise classification of Hematoxylin-Eosin stained images. While the first classifier separates glands from background, the second classifier identifies gland-separating structures. In a subsequent step, a figure-ground segmentation based on weighted total variation produces the final segmentation result by regularizing the CNN predictions. We present both quantitative and qualitative segmentation results on the recently released and publicly available Warwick-QU colon adenocarcinoma dataset associated with the GlaS@MICCAI2015 challenge and compare our approach to the simultaneously developed other approaches that participated in the same challenge. On two test sets, we demonstrate our segmentation performance and show that we achieve a tissue classification accuracy of 98% and 95%, making use of the inherent capability of our system to distinguish between benign and malignant tissue. Our results show that deep learning approaches can yield highly accurate and reproducible results for biomedical image analysis, with the potential to significantly improve the quality and speed of medical diagnoses.},\n bibtype = {article},\n author = {Kainz, Philipp and Pfeiffer, Michael and Urschler, Martin},\n doi = {10.7717/peerj.3874},\n journal = {PeerJ},\n number = {10}\n}
\n
\n\n\n
\n Segmentation of histopathology sections is a necessary preprocessing step for digital pathology. Due to the large variability of biological tissue, machine learning techniques have shown superior performance over conventional image processing methods. Here we present our deep neural network-based approach for segmentation and classification of glands in tissue of benign and malignant colorectal cancer, which was developed to participate in the GlaS@MICCAI2015 colon gland segmentation challenge. We use two distinct deep convolutional neural networks (CNN) for pixel-wise classification of Hematoxylin-Eosin stained images. While the first classifier separates glands from background, the second classifier identifies gland-separating structures. In a subsequent step, a figure-ground segmentation based on weighted total variation produces the final segmentation result by regularizing the CNN predictions. We present both quantitative and qualitative segmentation results on the recently released and publicly available Warwick-QU colon adenocarcinoma dataset associated with the GlaS@MICCAI2015 challenge and compare our approach to the simultaneously developed other approaches that participated in the same challenge. On two test sets, we demonstrate our segmentation performance and show that we achieve a tissue classification accuracy of 98% and 95%, making use of the inherent capability of our system to distinguish between benign and malignant tissue. Our results show that deep learning approaches can yield highly accurate and reproducible results for biomedical image analysis, with the potential to significantly improve the quality and speed of medical diagnoses.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Detection and volume estimation of artificial hematomas in the subcutaneous fatty tissue: comparison of different MR sequences at 3.0 T.\n \n \n \n \n\n\n \n Ogris, K.; Petrovic, A.; Scheicher, S.; Sprenger, H.; Urschler, M.; Hassler, E., M.; Yen, K.; and Scheurer, E.\n\n\n \n\n\n\n Forensic Science, Medicine, and Pathology, 13(2): 135-144. 6 2017.\n \n\n\n\n
\n\n\n\n \n \n \"DetectionWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Detection and volume estimation of artificial hematomas in the subcutaneous fatty tissue: comparison of different MR sequences at 3.0 T},\n type = {article},\n year = {2017},\n keywords = {3.0 T MRI,Forensic medicine,Hematoma,Porcine tissue model,Subcutaneous fatty tissue,Volume measurement},\n pages = {135-144},\n volume = {13},\n websites = {http://link.springer.com/10.1007/s12024-017-9847-8},\n month = {6},\n day = {1},\n id = {1fef339c-8359-30ab-89a9-805846a72032},\n created = {2017-12-02T19:49:53.472Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:33.148Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Ogris2017},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {In legal medicine, reliable localization and analysis of hematomas in subcutaneous fatty tissue is required for forensic reconstruction. Due to the absence of ionizing radiation, magnetic resonance imaging (MRI) is particularly suited to examining living persons with forensically relevant injuries. However, there is limited experience regarding MRI signal properties of hemorrhage in soft tissue. The aim of this study was to evaluate MR sequences with respect to their ability to show high contrast between hematomas and subcutaneous fatty tissue as well as to reliably determine the volume of artificial hematomas. Porcine tissue models were prepared by injecting blood into the subcutaneous fatty tissue to create artificial hematomas. MR images were acquired at 3T and four blinded observers conducted manual segmentation of the hematomas. To assess segmentability, the agreement of measured volume with the known volume of injected blood was statistically analyzed. A physically motivated normalization taking into account partial volume effect was applied to the data to ensure comparable results among differently sized hematomas. The inversion recovery sequence exhibited the best segmentability rate, whereas the T1T2w turbo spin echo sequence showed the most accurate results regarding volume estimation. Both sequences led to reproducible volume estimations. This study demonstrates that MRI is a promising forensic tool to assess and visualize even very small amounts of blood in soft tissue. The presented results enable the improvement of protocols for detection and volume determination of hemorrhage in forensically relevant cases and also provide fundamental knowledge for future in-vivo examinations.},\n bibtype = {article},\n author = {Ogris, Kathrin and Petrovic, Andreas and Scheicher, Sylvia and Sprenger, Hanna and Urschler, Martin and Hassler, Eva Maria and Yen, Kathrin and Scheurer, Eva},\n doi = {10.1007/s12024-017-9847-8},\n journal = {Forensic Science, Medicine, and Pathology},\n number = {2}\n}
\n
\n\n\n
\n In legal medicine, reliable localization and analysis of hematomas in subcutaneous fatty tissue is required for forensic reconstruction. Due to the absence of ionizing radiation, magnetic resonance imaging (MRI) is particularly suited to examining living persons with forensically relevant injuries. However, there is limited experience regarding MRI signal properties of hemorrhage in soft tissue. The aim of this study was to evaluate MR sequences with respect to their ability to show high contrast between hematomas and subcutaneous fatty tissue as well as to reliably determine the volume of artificial hematomas. Porcine tissue models were prepared by injecting blood into the subcutaneous fatty tissue to create artificial hematomas. MR images were acquired at 3T and four blinded observers conducted manual segmentation of the hematomas. To assess segmentability, the agreement of measured volume with the known volume of injected blood was statistically analyzed. A physically motivated normalization taking into account partial volume effect was applied to the data to ensure comparable results among differently sized hematomas. The inversion recovery sequence exhibited the best segmentability rate, whereas the T1T2w turbo spin echo sequence showed the most accurate results regarding volume estimation. Both sequences led to reproducible volume estimations. This study demonstrates that MRI is a promising forensic tool to assess and visualize even very small amounts of blood in soft tissue. The presented results enable the improvement of protocols for detection and volume determination of hemorrhage in forensically relevant cases and also provide fundamental knowledge for future in-vivo examinations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gland segmentation in colon histology images: The glas challenge contest.\n \n \n \n \n\n\n \n Sirinukunwattana, K.; Pluim, J., P.; Chen, H.; Qi, X.; Heng, P.; Guo, Y., B.; Wang, L., Y.; Matuszewski, B., J.; Bruni, E.; Sanchez, U.; Böhm, A.; Ronneberger, O.; Cheikh, B., B.; Racoceanu, D.; Kainz, P.; Pfeiffer, M.; Urschler, M.; Snead, D., R.; and Rajpoot, N., M.\n\n\n \n\n\n\n Medical Image Analysis, 35(1): 489-502. 1 2017.\n \n\n\n\n
\n\n\n\n \n \n \"GlandWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Gland segmentation in colon histology images: The glas challenge contest},\n type = {article},\n year = {2017},\n keywords = {Colon cancer,Digital pathology,Histology image analysis,Intestinal gland,Segmentation},\n pages = {489-502},\n volume = {35},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S1361841516301542},\n month = {1},\n id = {f35064e2-769c-339e-bcf5-c9c29042864c},\n created = {2017-12-02T19:49:53.654Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:40.717Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Sirinukunwattana2017},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Colorectal adenocarcinoma originating in intestinal glandular structures is the most common form of colon cancer. In clinical practice, the morphology of intestinal glands, including architectural appearance and glandular formation, is used by pathologists to inform prognosis and plan the treatment of individual patients. However, achieving good inter-observer as well as intra-observer reproducibility of cancer grading is still a major challenge in modern pathology. An automated approach which quantifies the morphology of glands is a solution to the problem. This paper provides an overview to the Gland Segmentation in Colon Histology Images Challenge Contest (GlaS) held at MICCAI’2015. Details of the challenge, including organization, dataset and evaluation criteria, are presented, along with the method descriptions and evaluation results from the top performing methods.},\n bibtype = {article},\n author = {Sirinukunwattana, Korsuk and Pluim, Josien P.W. and Chen, Hao and Qi, Xiaojuan and Heng, Pheng-Ann and Guo, Yun Bo and Wang, Li Yang and Matuszewski, Bogdan J. and Bruni, Elia and Sanchez, Urko and Böhm, Anton and Ronneberger, Olaf and Cheikh, Bassem Ben and Racoceanu, Daniel and Kainz, Philipp and Pfeiffer, Michael and Urschler, Martin and Snead, David R.J. and Rajpoot, Nasir M.},\n doi = {10.1016/j.media.2016.08.008},\n journal = {Medical Image Analysis},\n number = {1}\n}
\n
\n\n\n
\n Colorectal adenocarcinoma originating in intestinal glandular structures is the most common form of colon cancer. In clinical practice, the morphology of intestinal glands, including architectural appearance and glandular formation, is used by pathologists to inform prognosis and plan the treatment of individual patients. However, achieving good inter-observer as well as intra-observer reproducibility of cancer grading is still a major challenge in modern pathology. An automated approach which quantifies the morphology of glands is a solution to the problem. This paper provides an overview to the Gland Segmentation in Colon Histology Images Challenge Contest (GlaS) held at MICCAI’2015. Details of the challenge, including organization, dataset and evaluation criteria, are presented, along with the method descriptions and evaluation results from the top performing methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluation and comparison of 3D intervertebral disc localization and segmentation methods for 3D T2 MR data: A grand challenge.\n \n \n \n \n\n\n \n Zheng, G.; Chu, C.; Belavý, D., L.; Ibragimov, B.; Korez, R.; Vrtovec, T.; Hutt, H.; Everson, R.; Meakin, J.; Andrade, I., L.; Glocker, B.; Chen, H.; Dou, Q.; Heng, P.; Wang, C.; Forsberg, D.; Neubert, A.; Fripp, J.; Urschler, M.; Stern, D.; Wimmer, M.; Novikov, A., A.; Cheng, H.; Armbrecht, G.; Felsenberg, D.; and Li, S.\n\n\n \n\n\n\n Medical Image Analysis, 35(1): 327-344. 1 2017.\n \n\n\n\n
\n\n\n\n \n \n \"EvaluationWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Evaluation and comparison of 3D intervertebral disc localization and segmentation methods for 3D T2 MR data: A grand challenge},\n type = {article},\n year = {2017},\n keywords = {Challenge,Evaluation,Intervertebral disc,Localization,MRI,Segmentation},\n pages = {327-344},\n volume = {35},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S1361841516301530},\n month = {1},\n id = {063d9446-66ee-3735-9db7-80c464f0a67b},\n created = {2017-12-02T19:49:53.738Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:38.203Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Zheng2017},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {The evaluation of changes in Intervertebral Discs (IVDs) with 3D Magnetic Resonance (MR) Imaging (MRI) can be of interest for many clinical applications. This paper presents the evaluation of both IVD localization and IVD segmentation methods submitted to the Automatic 3D MRI IVD Localization and Segmentation challenge, held at the 2015 International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI2015) with an on-site competition. With the construction of a manually annotated reference data set composed of 25 3D T2-weighted MR images acquired from two different studies and the establishment of a standard validation framework, quantitative evaluation was performed to compare the results of methods submitted to the challenge. Experimental results show that overall the best localization method achieves a mean localization distance of 0.8 mm and the best segmentation method achieves a mean Dice of 91.8%, a mean average absolute distance of 1.1 mm and a mean Hausdorff distance of 4.3 mm, respectively. The strengths and drawbacks of each method are discussed, which provides insights into the performance of different IVD localization and segmentation methods.},\n bibtype = {article},\n author = {Zheng, Guoyan and Chu, Chengwen and Belavý, Daniel L. and Ibragimov, Bulat and Korez, Robert and Vrtovec, Tomaž and Hutt, Hugo and Everson, Richard and Meakin, Judith and Andrade, Isabel Lŏpez and Glocker, Ben and Chen, Hao and Dou, Qi and Heng, Pheng-Ann and Wang, Chunliang and Forsberg, Daniel and Neubert, Aleš and Fripp, Jurgen and Urschler, Martin and Stern, Darko and Wimmer, Maria and Novikov, Alexey A. and Cheng, Hui and Armbrecht, Gabriele and Felsenberg, Dieter and Li, Shuo},\n doi = {10.1016/j.media.2016.08.005},\n journal = {Medical Image Analysis},\n number = {1}\n}
\n
\n\n\n
\n The evaluation of changes in Intervertebral Discs (IVDs) with 3D Magnetic Resonance (MR) Imaging (MRI) can be of interest for many clinical applications. This paper presents the evaluation of both IVD localization and IVD segmentation methods submitted to the Automatic 3D MRI IVD Localization and Segmentation challenge, held at the 2015 International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI2015) with an on-site competition. With the construction of a manually annotated reference data set composed of 25 3D T2-weighted MR images acquired from two different studies and the establishment of a standard validation framework, quantitative evaluation was performed to compare the results of methods submitted to the challenge. Experimental results show that overall the best localization method achieves a mean localization distance of 0.8 mm and the best segmentation method achieves a mean Dice of 91.8%, a mean average absolute distance of 1.1 mm and a mean Hausdorff distance of 4.3 mm, respectively. The strengths and drawbacks of each method are discussed, which provides insights into the performance of different IVD localization and segmentation methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Generative Adversarial Network based Synthesis for Supervised Medical Image Segmentation.\n \n \n \n\n\n \n Neff, T.; Payer, C.; Štern, D.; and Urschler, M.\n\n\n \n\n\n\n In Proceedings of the OAGM&ARW Joint Workshop 2017: Vision, Automation and Robotics, pages 140-145, 2017. Verlag der TU Graz\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Generative Adversarial Network based Synthesis for Supervised Medical Image Segmentation},\n type = {inproceedings},\n year = {2017},\n pages = {140-145},\n publisher = {Verlag der TU Graz},\n city = {Vienna},\n id = {1fe014dc-3a7f-3585-a1ef-cee00d3c739b},\n created = {2018-02-08T16:32:04.839Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:42.616Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Neff2017},\n notes = {Oral, Best Paper Award},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {— Modern deep learning methods achieve state-of-the-art results in many computer vision tasks. While these methods perform well when trained on large datasets, deep learning methods suffer from overfitting and lack of gener-alization given smaller datasets. Especially in medical image analysis, acquisition of both imaging data and corresponding ground-truth annotations (e.g. pixel-wise segmentation masks) as required for supervised tasks, is time consuming and costly, since experts are needed to manually annotate data. In this work we study this problem by proposing a new variant of Generative Adversarial Networks (GANs), which, in addition to synthesized medical images, also generates segmentation masks for the use in supervised medical image analysis applications. We evaluate our approach on a lung segmentation task involving thorax X-ray images, and show that GANs have the potential to be used for synthesizing training data in this specific application.},\n bibtype = {inproceedings},\n author = {Neff, Thomas and Payer, Christian and Štern, Darko and Urschler, Martin},\n doi = {10.3217/978-3-85125-524-9-30},\n booktitle = {Proceedings of the OAGM&ARW Joint Workshop 2017: Vision, Automation and Robotics}\n}
\n
\n\n\n
\n — Modern deep learning methods achieve state-of-the-art results in many computer vision tasks. While these methods perform well when trained on large datasets, deep learning methods suffer from overfitting and lack of gener-alization given smaller datasets. Especially in medical image analysis, acquisition of both imaging data and corresponding ground-truth annotations (e.g. pixel-wise segmentation masks) as required for supervised tasks, is time consuming and costly, since experts are needed to manually annotate data. In this work we study this problem by proposing a new variant of Generative Adversarial Networks (GANs), which, in addition to synthesized medical images, also generates segmentation masks for the use in supervised medical image analysis applications. We evaluate our approach on a lung segmentation task involving thorax X-ray images, and show that GANs have the potential to be used for synthesizing training data in this specific application.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Forensic age estimation by morphometric analysis of the manubrium from 3D MR images.\n \n \n \n \n\n\n \n Martínez Vera, N., P.; Höller, J.; Widek, T.; Neumayer, B.; Ehammer, T.; and Urschler, M.\n\n\n \n\n\n\n Forensic Science International, 277: 21-29. 8 2017.\n \n\n\n\n
\n\n\n\n \n \n \"ForensicWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Forensic age estimation by morphometric analysis of the manubrium from 3D MR images},\n type = {article},\n year = {2017},\n keywords = {Forensic age estimation,MRI,Manubrium,Morphometry,Principal component analysis},\n pages = {21-29},\n volume = {277},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S0379073817301767},\n month = {8},\n id = {c80f789f-5bfe-303f-b73c-5433fc2cb53e},\n created = {2018-02-18T20:51:33.034Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:43.249Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {MartinezVera2017},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Forensic age estimation research based on skeletal structures focuses on patterns of growth and development using different bones. In this work, our aim was to study growth-related evolution of the manubrium in living adolescents and young adults using magnetic resonance imaging (MRI), which is an image acquisition modality that does not involve ionizing radiation. In a first step, individual manubrium and subject features were correlated with age, which confirmed a statistically significant change of manubrium volume (Mvol : p < 0.01, R2¯=0.50) and surface area (Msur : p < 0.01, R2¯=0.53) for the studied age range. Additionally, shapes of the manubria were for the first time investigated using principal component analysis. The decomposition of the data in principal components allowed to analyse the contribution of each component to total shape variation. With 13 principal components, ∼96% of shape variation could be described (Mshp : p < 0.01, R2¯=0.60). Multiple linear regression analysis modelled the relationship between the statistically best correlated variables and age. Models including manubrium shape, volume or surface area divided by the height of the subject (Y ∼ MshpMsur/Sh : p < 0.01, R2¯=0.71; Y ∼ MshpMvol/Sh : p < 0.01, R2¯=0.72) presented a standard error of estimate of two years. In order to estimate the accuracy of these two manubrium-based age estimation models, cross validation experiments predicting age on held-out test sets were performed. Median absolute difference of predicted and known chronological age was 1.18 years for the best performing model (Y ∼ MshpMsur/Sh : p < 0.01, Rp2=0.67). In conclusion, despite limitations in determining legal majority age, manubrium morphometry analysis presented statistically significant results for skeletal age estimation, which indicates that this bone structure may be considered as a new candidate in multi-factorial MRI-based age estimation.},\n bibtype = {article},\n author = {Martínez Vera, Naira P. and Höller, Johannes and Widek, Thomas and Neumayer, Bernhard and Ehammer, Thomas and Urschler, Martin},\n doi = {10.1016/j.forsciint.2017.05.005},\n journal = {Forensic Science International}\n}
\n
\n\n\n
\n Forensic age estimation research based on skeletal structures focuses on patterns of growth and development using different bones. In this work, our aim was to study growth-related evolution of the manubrium in living adolescents and young adults using magnetic resonance imaging (MRI), which is an image acquisition modality that does not involve ionizing radiation. In a first step, individual manubrium and subject features were correlated with age, which confirmed a statistically significant change of manubrium volume (Mvol : p < 0.01, R2¯=0.50) and surface area (Msur : p < 0.01, R2¯=0.53) for the studied age range. Additionally, shapes of the manubria were for the first time investigated using principal component analysis. The decomposition of the data in principal components allowed to analyse the contribution of each component to total shape variation. With 13 principal components, ∼96% of shape variation could be described (Mshp : p < 0.01, R2¯=0.60). Multiple linear regression analysis modelled the relationship between the statistically best correlated variables and age. Models including manubrium shape, volume or surface area divided by the height of the subject (Y ∼ MshpMsur/Sh : p < 0.01, R2¯=0.71; Y ∼ MshpMvol/Sh : p < 0.01, R2¯=0.72) presented a standard error of estimate of two years. In order to estimate the accuracy of these two manubrium-based age estimation models, cross validation experiments predicting age on held-out test sets were performed. Median absolute difference of predicted and known chronological age was 1.18 years for the best performing model (Y ∼ MshpMsur/Sh : p < 0.01, Rp2=0.67). In conclusion, despite limitations in determining legal majority age, manubrium morphometry analysis presented statistically significant results for skeletal age estimation, which indicates that this bone structure may be considered as a new candidate in multi-factorial MRI-based age estimation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-factorial Age Estimation from Skeletal and Dental MRI Volumes.\n \n \n \n \n\n\n \n Štern, D.; Kainz, P.; Payer, C.; and Urschler, M.\n\n\n \n\n\n\n Volume 10541 LNCS . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 61-69. Wang, Q.; Shi, Y.; Suk, H.; and Suzuki, K., editor(s). Springer, Cham, 2017.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2017},\n keywords = {Convolutional neural network,Forensic age estimation,Information fusion,Multi-factorial method,Random forest},\n pages = {61-69},\n volume = {10541 LNCS},\n websites = {http://link.springer.com/10.1007/978-3-319-67389-9_8},\n publisher = {Springer, Cham},\n city = {Quebec City},\n id = {1b795d76-fe9e-38d5-95f7-5f161d91ad73},\n created = {2018-02-18T20:51:33.061Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:53.526Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Stern2017MLMI},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54,765d4979-f7e3-4b28-b474-743531ac799d},\n private_publication = {false},\n abstract = {Age estimation from radiologic data is an important topic in forensic medicine to assess chronological age or to discriminate minors from adults, e.g. asylum seekers lacking valid identification documents. In this work we propose automatic multi-factorial age estimation methods based on MRI data to extend the maximal age range from 19 years, as commonly used for age assessment based on hand bones, up to 25 years, when combined with wisdom teeth and clavicles. Mimicking how radiologists perform age estimation, our proposed method based on deep convolutional neural networks achieves a result of 1.14 \\pm 0.96 years of mean absolute error in predicting chronological age. Further, when fine-tuning the same network for majority age classification, we show an improvement in sensitivity of the multi-factorial system compared to solely relying on the hand.},\n bibtype = {inbook},\n author = {Štern, Darko and Kainz, Philipp and Payer, Christian and Urschler, Martin},\n editor = {Wang, Q. and Shi, Y. and Suk, H.-I. and Suzuki, K.},\n doi = {10.1007/978-3-319-67389-9_8},\n chapter = {Multi-factorial Age Estimation from Skeletal and Dental MRI Volumes},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Age estimation from radiologic data is an important topic in forensic medicine to assess chronological age or to discriminate minors from adults, e.g. asylum seekers lacking valid identification documents. In this work we propose automatic multi-factorial age estimation methods based on MRI data to extend the maximal age range from 19 years, as commonly used for age assessment based on hand bones, up to 25 years, when combined with wisdom teeth and clavicles. Mimicking how radiologists perform age estimation, our proposed method based on deep convolutional neural networks achieves a result of 1.14 \\pm 0.96 years of mean absolute error in predicting chronological age. Further, when fine-tuning the same network for majority age classification, we show an improvement in sensitivity of the multi-factorial system compared to solely relying on the hand.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Optimizing the 3D-reconstruction technique for serial block-face scanning electron microscopy.\n \n \n \n \n\n\n \n Wernitznig, S.; Sele, M.; Urschler, M.; Zankel, A.; Pölt, P.; Rind, F., C.; and Leitinger, G.\n\n\n \n\n\n\n Journal of Neuroscience Methods, 264: 16-24. 5 2016.\n \n\n\n\n
\n\n\n\n \n \n \"OptimizingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Optimizing the 3D-reconstruction technique for serial block-face scanning electron microscopy},\n type = {article},\n year = {2016},\n keywords = {3D-reconstruction,Locust,Semi-automatic segmentation,Serial block-face scanning electron microscopy},\n pages = {16-24},\n volume = {264},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S0165027016000777},\n month = {5},\n id = {a17dc9d9-7ea4-3555-9c9f-00fb4631bf92},\n created = {2016-05-05T11:33:37.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:51.519Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Wernitznig2016},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Background: Elucidating the anatomy of neuronal circuits and localizing the synaptic connections between neurons, can give us important insights in how the neuronal circuits work. We are using serial block-face scanning electron microscopy (SBEM) to investigate the anatomy of a collision detection circuit including the Lobula Giant Movement Detector (LGMD) neuron in the locust, Locusta migratoria. For this, thousands of serial electron micrographs are produced that allow us to trace the neuronal branching pattern. New method: The reconstruction of neurons was previously done manually by drawing cell outlines of each cell in each image separately. This approach was very time consuming and troublesome. To make the process more efficient a new interactive software was developed. It uses the contrast between the neuron under investigation and its surrounding for semi-automatic segmentation. Results: For segmentation the user sets starting regions manually and the algorithm automatically selects a volume within the neuron until the edges corresponding to the neuronal outline are reached. Internally the algorithm optimizes a 3D active contour segmentation model formulated as a cost function taking the SEM image edges into account. This reduced the reconstruction time, while staying close to the manual reference segmentation result. Comparison with existing methods: Our algorithm is easy to use for a fast segmentation process, unlike previous methods it does not require image training nor an extended computing capacity. Conclusion: Our semi-automatic segmentation algorithm led to a dramatic reduction in processing time for the 3D-reconstruction of identified neurons.},\n bibtype = {article},\n author = {Wernitznig, Stefan and Sele, Mariella and Urschler, Martin and Zankel, Armin and Pölt, Peter and Rind, F. Claire and Leitinger, Gerd},\n doi = {10.1016/j.jneumeth.2016.02.019},\n journal = {Journal of Neuroscience Methods}\n}
\n
\n\n\n
\n Background: Elucidating the anatomy of neuronal circuits and localizing the synaptic connections between neurons, can give us important insights in how the neuronal circuits work. We are using serial block-face scanning electron microscopy (SBEM) to investigate the anatomy of a collision detection circuit including the Lobula Giant Movement Detector (LGMD) neuron in the locust, Locusta migratoria. For this, thousands of serial electron micrographs are produced that allow us to trace the neuronal branching pattern. New method: The reconstruction of neurons was previously done manually by drawing cell outlines of each cell in each image separately. This approach was very time consuming and troublesome. To make the process more efficient a new interactive software was developed. It uses the contrast between the neuron under investigation and its surrounding for semi-automatic segmentation. Results: For segmentation the user sets starting regions manually and the algorithm automatically selects a volume within the neuron until the edges corresponding to the neuronal outline are reached. Internally the algorithm optimizes a 3D active contour segmentation model formulated as a cost function taking the SEM image edges into account. This reduced the reconstruction time, while staying close to the manual reference segmentation result. Comparison with existing methods: Our algorithm is easy to use for a fast segmentation process, unlike previous methods it does not require image training nor an extended computing capacity. Conclusion: Our semi-automatic segmentation algorithm led to a dramatic reduction in processing time for the 3D-reconstruction of identified neurons.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n From individual hand bone age estimates to fully automated age estimation via learning-based information fusion.\n \n \n \n \n\n\n \n Stern, D.; and Urschler, M.\n\n\n \n\n\n\n In 2016 IEEE 13th International Symposium on Biomedical Imaging (ISBI), volume 2016-June, pages 150-154, 4 2016. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"FromWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {From individual hand bone age estimates to fully automated age estimation via learning-based information fusion},\n type = {inproceedings},\n year = {2016},\n keywords = {age estimation,hand bones,information fusion,magnetic resonance (MR),regression random forest},\n pages = {150-154},\n volume = {2016-June},\n websites = {https://ieeexplore.ieee.org/document/7493232/},\n month = {4},\n publisher = {IEEE},\n city = {Prague},\n id = {d9c946ca-acdc-3982-918c-e562f37c0885},\n created = {2016-06-13T17:12:00.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:53.455Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Stern2016c},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Increasingly important for both clinical and forensic medicine, radiological age estimation is performed by fusing independent bone age estimates from hand images. In this work, we show that the artificial separation into bone independent age estimates as used in established fusion techniques can be overcome. Thus, we treat aging as a global developmental process, by implicitly fusing developmental information from different bones in a dedicated regression algorithm. With 0.82 ± 0.56 years absolute deviation from chronological age on a database of 132 3D MR hand images, the results of this novel automatic algorithm are inline with radiologists performing visual examinations.},\n bibtype = {inproceedings},\n author = {Stern, Darko and Urschler, Martin},\n doi = {10.1109/ISBI.2016.7493232},\n booktitle = {2016 IEEE 13th International Symposium on Biomedical Imaging (ISBI)}\n}
\n
\n\n\n
\n Increasingly important for both clinical and forensic medicine, radiological age estimation is performed by fusing independent bone age estimates from hand images. In this work, we show that the artificial separation into bone independent age estimates as used in established fusion techniques can be overcome. Thus, we treat aging as a global developmental process, by implicitly fusing developmental information from different bones in a dedicated regression algorithm. With 0.82 ± 0.56 years absolute deviation from chronological age on a database of 132 3D MR hand images, the results of this novel automatic algorithm are inline with radiologists performing visual examinations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Intervertebral Disc Localization and Segmentation in 3D MR Images Based on Regression Forests and Active Contours.\n \n \n \n \n\n\n \n Urschler, M.; Hammernik, K.; Ebner, T.; and Štern, D.\n\n\n \n\n\n\n Volume 9402 . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 130-140. Vrtovec, T.; Yao, J.; Glocker, B.; Klinder, T.; Frangi, A., F.; Zheng, G.; and Li, S., editor(s). Springer, Cham, 2016.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2016},\n pages = {130-140},\n volume = {9402},\n websites = {http://link.springer.com/10.1007/978-3-319-41827-8_13},\n publisher = {Springer, Cham},\n city = {Munich},\n id = {52fd89f9-1a10-3869-9d3d-f12b24e860a6},\n created = {2017-12-02T19:49:53.005Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:45.161Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2016a},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {We introduce a fully automatic localization and segmentation pipeline for three-dimensional (3D) intervertebral discs (IVDs), consisting of a regression-based prediction of vertebral bodies and IVD positions as well as a 3D geodesic active contour segmentation delineating the IVDs. The approach was evaluated on the data set of the challenge in conjunction with the 3rd MICCAI Workshop & Challenge on Computational Methods and Clinical Applications for Spine Imaging -MICCAI– CSI2015, that consists of 15 magnetic resonance images of the lumbar spine with given ground truth segmentations. Based on a localization accuracy of 3.9±1.6 mm, we achieve segmentation results in terms of the Dice similarity coefficient of 89.1 ±2.9% averaged over the whole data set.},\n bibtype = {inbook},\n author = {Urschler, Martin and Hammernik, Kerstin and Ebner, Thomas and Štern, Darko},\n editor = {Vrtovec, Tomaž and Yao, Jianhua and Glocker, Ben and Klinder, Tobias and Frangi, Alejandro F and Zheng, Guoyan and Li, Shuo},\n doi = {10.1007/978-3-319-41827-8_13},\n chapter = {Automatic Intervertebral Disc Localization and Segmentation in 3D MR Images Based on Regression Forests and Active Contours},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n We introduce a fully automatic localization and segmentation pipeline for three-dimensional (3D) intervertebral discs (IVDs), consisting of a regression-based prediction of vertebral bodies and IVD positions as well as a 3D geodesic active contour segmentation delineating the IVDs. The approach was evaluated on the data set of the challenge in conjunction with the 3rd MICCAI Workshop & Challenge on Computational Methods and Clinical Applications for Spine Imaging -MICCAI– CSI2015, that consists of 15 magnetic resonance images of the lumbar spine with given ground truth segmentations. Based on a localization accuracy of 3.9±1.6 mm, we achieve segmentation results in terms of the Dice similarity coefficient of 89.1 ±2.9% averaged over the whole data set.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated integer programming based separation of arteries and veins from thoracic CT images.\n \n \n \n \n\n\n \n Payer, C.; Pienn, M.; Bálint, Z.; Shekhovtsov, A.; Talakic, E.; Nagy, E.; Olschewski, A.; Olschewski, H.; and Urschler, M.\n\n\n \n\n\n\n Medical Image Analysis, 34(12): 109-122. 12 2016.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Automated integer programming based separation of arteries and veins from thoracic CT images},\n type = {article},\n year = {2016},\n keywords = {Artery-vein separation,Computed tomography,Integer program,Lung,Vascular tree reconstruction},\n pages = {109-122},\n volume = {34},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S1361841516300317},\n month = {12},\n id = {17d81127-d11b-32fe-8301-41cc9b93236a},\n created = {2017-12-02T19:49:53.363Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:52.804Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Payer2016MIA},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Automated computer-aided analysis of lung vessels has shown to yield promising results for non-invasive diagnosis of lung diseases. To detect vascular changes which affect pulmonary arteries and veins differently, both compartments need to be identified. We present a novel, fully automatic method that separates arteries and veins in thoracic computed tomography images, by combining local as well as global properties of pulmonary vessels. We split the problem into two parts: the extraction of multiple distinct vessel subtrees, and their subsequent labeling into arteries and veins. Subtree extraction is performed with an integer program (IP), based on local vessel geometry. As naively solving this IP is time-consuming, we show how to drastically reduce computational effort by reformulating it as a Markov Random Field. Afterwards, each subtree is labeled as either arterial or venous by a second IP, using two anatomical properties of pulmonary vessels: the uniform distribution of arteries and veins, and the parallel configuration and close proximity of arteries and bronchi. We evaluate algorithm performance by comparing the results with 25 voxel-based manual reference segmentations. On this dataset, we show good performance of the subtree extraction, consisting of very few non-vascular structures (median value: 0.9%) and merged subtrees (median value: 0.6%). The resulting separation of arteries and veins achieves a median voxel-based overlap of 96.3% with the manual reference segmentations, outperforming a state-of-the-art interactive method. In conclusion, our novel approach provides an opportunity to become an integral part of computer aided pulmonary diagnosis, where artery/vein separation is important.},\n bibtype = {article},\n author = {Payer, Christian and Pienn, Michael and Bálint, Zoltán and Shekhovtsov, Alexander and Talakic, Emina and Nagy, Eszter and Olschewski, Andrea and Olschewski, Horst and Urschler, Martin},\n doi = {10.1016/j.media.2016.05.002},\n journal = {Medical Image Analysis},\n number = {12}\n}
\n
\n\n\n
\n Automated computer-aided analysis of lung vessels has shown to yield promising results for non-invasive diagnosis of lung diseases. To detect vascular changes which affect pulmonary arteries and veins differently, both compartments need to be identified. We present a novel, fully automatic method that separates arteries and veins in thoracic computed tomography images, by combining local as well as global properties of pulmonary vessels. We split the problem into two parts: the extraction of multiple distinct vessel subtrees, and their subsequent labeling into arteries and veins. Subtree extraction is performed with an integer program (IP), based on local vessel geometry. As naively solving this IP is time-consuming, we show how to drastically reduce computational effort by reformulating it as a Markov Random Field. Afterwards, each subtree is labeled as either arterial or venous by a second IP, using two anatomical properties of pulmonary vessels: the uniform distribution of arteries and veins, and the parallel configuration and close proximity of arteries and bronchi. We evaluate algorithm performance by comparing the results with 25 voxel-based manual reference segmentations. On this dataset, we show good performance of the subtree extraction, consisting of very few non-vascular structures (median value: 0.9%) and merged subtrees (median value: 0.6%). The resulting separation of arteries and veins achieves a median voxel-based overlap of 96.3% with the manual reference segmentations, outperforming a state-of-the-art interactive method. In conclusion, our novel approach provides an opportunity to become an integral part of computer aided pulmonary diagnosis, where artery/vein separation is important.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A multi-center milestone study of clinical vertebral CT segmentation.\n \n \n \n \n\n\n \n Yao, J.; Burns, J., E.; Forsberg, D.; Seitel, A.; Rasoulian, A.; Abolmaesumi, P.; Hammernik, K.; Urschler, M.; Ibragimov, B.; Korez, R.; Vrtovec, T.; Castro-Mateos, I.; Pozo, J., M.; Frangi, A., F.; Summers, R., M.; and Li, S.\n\n\n \n\n\n\n Computerized Medical Imaging and Graphics, 49: 16-28. 4 2016.\n \n\n\n\n
\n\n\n\n \n \n \"AWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A multi-center milestone study of clinical vertebral CT segmentation},\n type = {article},\n year = {2016},\n pages = {16-28},\n volume = {49},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S0895611115001937},\n month = {4},\n id = {6fd86b65-9c66-3b47-bf05-117a22eddaf3},\n created = {2017-12-02T19:49:53.592Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:54.087Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Yao2016},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {A multiple center milestone study of clinical vertebra segmentation is presented in this paper. Vertebra segmentation is a fundamental step for spinal image analysis and intervention. The first half of the study was conducted in the spine segmentation challenge in 2014 International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI) Workshop on Computational Spine Imaging (CSI 2014). The objective was to evaluate the performance of several state-of-the-art vertebra segmentation algorithms on computed tomography (CT) scans using ten training and five testing dataset, all healthy cases; the second half of the study was conducted after the challenge, where additional 5 abnormal cases are used for testing to evaluate the performance under abnormal cases. Dice coefficients and absolute surface distances were used as evaluation metrics. Segmentation of each vertebra as a single geometric unit, as well as separate segmentation of vertebra substructures, was evaluated. Five teams participated in the comparative study. The top performers in the study achieved Dice coefficient of 0.93 in the upper thoracic, 0.95 in the lower thoracic and 0.96 in the lumbar spine for healthy cases, and 0.88 in the upper thoracic, 0.89 in the lower thoracic and 0.92 in the lumbar spine for osteoporotic and fractured cases. The strengths and weaknesses of each method as well as future suggestion for improvement are discussed. This is the first multi-center comparative study for vertebra segmentation methods, which will provide an up-to-date performance milestone for the fast growing spinal image analysis and intervention.},\n bibtype = {article},\n author = {Yao, Jianhua and Burns, Joseph E. and Forsberg, Daniel and Seitel, Alexander and Rasoulian, Abtin and Abolmaesumi, Purang and Hammernik, Kerstin and Urschler, Martin and Ibragimov, Bulat and Korez, Robert and Vrtovec, Tomaž and Castro-Mateos, Isaac and Pozo, Jose M. and Frangi, Alejandro F. and Summers, Ronald M. and Li, Shuo},\n doi = {10.1016/j.compmedimag.2015.12.006},\n journal = {Computerized Medical Imaging and Graphics}\n}
\n
\n\n\n
\n A multiple center milestone study of clinical vertebra segmentation is presented in this paper. Vertebra segmentation is a fundamental step for spinal image analysis and intervention. The first half of the study was conducted in the spine segmentation challenge in 2014 International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI) Workshop on Computational Spine Imaging (CSI 2014). The objective was to evaluate the performance of several state-of-the-art vertebra segmentation algorithms on computed tomography (CT) scans using ten training and five testing dataset, all healthy cases; the second half of the study was conducted after the challenge, where additional 5 abnormal cases are used for testing to evaluate the performance under abnormal cases. Dice coefficients and absolute surface distances were used as evaluation metrics. Segmentation of each vertebra as a single geometric unit, as well as separate segmentation of vertebra substructures, was evaluated. Five teams participated in the comparative study. The top performers in the study achieved Dice coefficient of 0.93 in the upper thoracic, 0.95 in the lower thoracic and 0.96 in the lumbar spine for healthy cases, and 0.88 in the upper thoracic, 0.89 in the lower thoracic and 0.92 in the lumbar spine for osteoporotic and fractured cases. The strengths and weaknesses of each method as well as future suggestion for improvement are discussed. This is the first multi-center comparative study for vertebra segmentation methods, which will provide an up-to-date performance milestone for the fast growing spinal image analysis and intervention.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n From Local to Global Random Regression Forests: Exploring Anatomical Landmark Localization.\n \n \n \n \n\n\n \n Štern, D.; Ebner, T.; and Urschler, M.\n\n\n \n\n\n\n Volume 9901 LNCS . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 221-229. Ourselin, S.; Joskowicz, L.; Sabuncu, M.; Unal, G.; and Wells, W., editor(s). Springer, Cham, 2016.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2016},\n pages = {221-229},\n volume = {9901 LNCS},\n websites = {http://link.springer.com/10.1007/978-3-319-46723-8_26},\n publisher = {Springer, Cham},\n city = {Athens},\n id = {0016eb39-7138-3956-9a68-a6770ec892ba},\n created = {2018-02-18T20:51:32.701Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:55.432Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Stern2016a},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {State of the art anatomical landmark localization algorithms pair local Random Forest (RF) detection with disambiguation of locally similar structures by including high level knowledge about relative landmark locations. In this work we pursue the question,how much high-level knowledge is needed in addition to a single landmark localization RF to implicitly model the global configuration of multiple,potentially ambiguous landmarks. We further propose a novel RF localization algorithm that distinguishes locally similar structures by automatically identifying them,exploring the back-projection of the response from accurate local RF predictions. In our experiments we show that this approach achieves competitive results in single and multi-landmark localization when applied to 2D hand radiographic and 3D teethMRI data sets. Additionally,when combined with a simple Markov Random Field model,we are able to outperform state of the art methods.},\n bibtype = {inbook},\n author = {Štern, Darko and Ebner, Thomas and Urschler, Martin},\n editor = {Ourselin, S. and Joskowicz, L. and Sabuncu, M. and Unal, G. and Wells, W.},\n doi = {10.1007/978-3-319-46723-8_26},\n chapter = {From Local to Global Random Regression Forests: Exploring Anatomical Landmark Localization},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n State of the art anatomical landmark localization algorithms pair local Random Forest (RF) detection with disambiguation of locally similar structures by including high level knowledge about relative landmark locations. In this work we pursue the question,how much high-level knowledge is needed in addition to a single landmark localization RF to implicitly model the global configuration of multiple,potentially ambiguous landmarks. We further propose a novel RF localization algorithm that distinguishes locally similar structures by automatically identifying them,exploring the back-projection of the response from accurate local RF predictions. In our experiments we show that this approach achieves competitive results in single and multi-landmark localization when applied to 2D hand radiographic and 3D teethMRI data sets. Additionally,when combined with a simple Markov Random Field model,we are able to outperform state of the art methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated Age Estimation from Hand MRI Volumes Using Deep Learning.\n \n \n \n \n\n\n \n Štern, D.; Payer, C.; Lepetit, V.; and Urschler, M.\n\n\n \n\n\n\n Volume 9901 LNCS . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 194-202. Ourselin, S.; Joskowicz, L.; Sabuncu, M.; Unal, G.; and Wells, W., editor(s). Springer, Cham, 2016.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2016},\n pages = {194-202},\n volume = {9901 LNCS},\n websites = {http://link.springer.com/10.1007/978-3-319-46723-8_23},\n publisher = {Springer, Cham},\n city = {Athens},\n id = {894b581c-1758-392e-8ec7-d0254ab9382e},\n created = {2018-02-18T20:51:32.756Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:45.801Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Stern2016},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Biological age (BA) estimation from radiologic data is an important topic in clinical medicine,e.g. in determining endocrinological diseases or planning paediatric orthopaedic surgeries,while in legal medicine it is employed to approximate chronological age. In this work,we propose the use of deep convolutional neural networks (DCNN) for automatic BA estimation from hand MRI volumes,inspired by the way radiologists visually perform age estimation using established staging schemes that follow physical maturation. In our results we outperform the state of the art automatic BA estimation method,achieving a mean error between estimated and ground truth BA of 0.36 ± 0.30 years,which is in line with radiologists doing visual BA estimation.},\n bibtype = {inbook},\n author = {Štern, Darko and Payer, Christian and Lepetit, Vincent and Urschler, Martin},\n editor = {Ourselin, S. and Joskowicz, L. and Sabuncu, M. and Unal, G. and Wells, W.},\n doi = {10.1007/978-3-319-46723-8_23},\n chapter = {Automated Age Estimation from Hand MRI Volumes Using Deep Learning},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Biological age (BA) estimation from radiologic data is an important topic in clinical medicine,e.g. in determining endocrinological diseases or planning paediatric orthopaedic surgeries,while in legal medicine it is employed to approximate chronological age. In this work,we propose the use of deep convolutional neural networks (DCNN) for automatic BA estimation from hand MRI volumes,inspired by the way radiologists visually perform age estimation using established staging schemes that follow physical maturation. In our results we outperform the state of the art automatic BA estimation method,achieving a mean error between estimated and ground truth BA of 0.36 ± 0.30 years,which is in line with radiologists doing visual BA estimation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Applicability of Greulich–Pyle and Tanner–Whitehouse grading methods to MRI when assessing hand bone age in forensic age estimation: A pilot study.\n \n \n \n \n\n\n \n Urschler, M.; Krauskopf, A.; Widek, T.; Sorantin, E.; Ehammer, T.; Borkenstein, M.; Yen, K.; and Scheurer, E.\n\n\n \n\n\n\n Forensic Science International, 266: 281-288. 9 2016.\n \n\n\n\n
\n\n\n\n \n \n \"ApplicabilityWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Applicability of Greulich–Pyle and Tanner–Whitehouse grading methods to MRI when assessing hand bone age in forensic age estimation: A pilot study},\n type = {article},\n year = {2016},\n keywords = {Forensic age estimation,Greulich–Pyle,Hand–wrist,MRI,Tanner–Whitehouse,X-ray},\n pages = {281-288},\n volume = {266},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S0379073816302687},\n month = {9},\n id = {bf4e4657-92fe-38d8-ae44-3bf2ad5226a2},\n created = {2018-02-18T20:51:32.827Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:50.255Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2016},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Determination of skeletal development is a key pillar in forensic age estimation of living persons. Radiological assessment of hand bone age is widely used until the age of about 17–18 years, applying visual grading techniques to hand radiographs. This study investigated whether Greulich–Pyle (GP) and Tanner–Whitehouse (TW2) grading can be equally used for magnetic resonance imaging (MRI) data, which would offer the huge benefit of avoiding ionizing radiation. In 18 subjects aged between 7 and 17 years a radiograph and an MRI scan of the hand were performed. Epiphyseal ossification of hand bones was rated by two blinded radiologists with both GP and TW2. Correlation between hand MRIs and radiographs was analyzed by linear regression and inter-observer agreement was assessed. Correlation between age estimates from MRI and radiographs was high for both GP (r2 = 0.98) and TW2 (r2 = 0.93). MRI showed a tendency to estimate age slightly lower for 14–18 year-olds, which would be favorable regarding majority age determination in case this result could be reproduced using a currently not existing reference estimation method based on MRI data. Inter-observer agreement was similar for GP in radiographs and MRI, while for TW2, agreement in MRI was lower than in radiographs. In spite of limitations regarding sample size and recruited subjects, our results indicate that the use of GP and TW2 on MRI data offers the possibility of hand bone age estimation without the need for ionizing radiation.},\n bibtype = {article},\n author = {Urschler, Martin and Krauskopf, Astrid and Widek, Thomas and Sorantin, Erich and Ehammer, Thomas and Borkenstein, Martin and Yen, Kathrin and Scheurer, Eva},\n doi = {10.1016/j.forsciint.2016.06.016},\n journal = {Forensic Science International}\n}
\n
\n\n\n
\n Determination of skeletal development is a key pillar in forensic age estimation of living persons. Radiological assessment of hand bone age is widely used until the age of about 17–18 years, applying visual grading techniques to hand radiographs. This study investigated whether Greulich–Pyle (GP) and Tanner–Whitehouse (TW2) grading can be equally used for magnetic resonance imaging (MRI) data, which would offer the huge benefit of avoiding ionizing radiation. In 18 subjects aged between 7 and 17 years a radiograph and an MRI scan of the hand were performed. Epiphyseal ossification of hand bones was rated by two blinded radiologists with both GP and TW2. Correlation between hand MRIs and radiographs was analyzed by linear regression and inter-observer agreement was assessed. Correlation between age estimates from MRI and radiographs was high for both GP (r2 = 0.98) and TW2 (r2 = 0.93). MRI showed a tendency to estimate age slightly lower for 14–18 year-olds, which would be favorable regarding majority age determination in case this result could be reproduced using a currently not existing reference estimation method based on MRI data. Inter-observer agreement was similar for GP in radiographs and MRI, while for TW2, agreement in MRI was lower than in radiographs. In spite of limitations regarding sample size and recruited subjects, our results indicate that the use of GP and TW2 on MRI data offers the possibility of hand bone age estimation without the need for ionizing radiation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Regressing Heatmaps for Multiple Landmark Localization Using CNNs.\n \n \n \n \n\n\n \n Payer, C.; Štern, D.; Bischof, H.; and Urschler, M.\n\n\n \n\n\n\n Volume 9901 LNCS . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 230-238. Ourselin, S.; Joskowicz, L.; Sabuncu, M.; Unal, G.; and Wells, W., editor(s). Springer, Cham, 2016.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2016},\n pages = {230-238},\n volume = {9901 LNCS},\n websites = {http://link.springer.com/10.1007/978-3-319-46723-8_27},\n publisher = {Springer, Cham},\n city = {Athens},\n id = {c537787e-2933-36cc-958e-189e68891292},\n created = {2018-02-18T20:51:33.053Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:47.711Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Payer2016MICCAI},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {We explore the applicability of deep convolutional neural networks (CNNs) for multiple landmark localization in medical image data. Exploiting the idea of regressing heatmaps for individual landmark locations,we investigate several fully convolutional 2D and 3D CNN architectures by training them in an end-to-end manner. We further propose a novel SpatialConfiguration-Net architecture that effectively combines accurate local appearance responses with spatial landmark configurations that model anatomical variation. Evaluation of our different architectures on 2D and 3D hand image datasets show that heatmap regression based on CNNs achieves state-of-the-art landmark localization performance,with SpatialConfiguration-Net being robust even in case of limited amounts of training data.},\n bibtype = {inbook},\n author = {Payer, Christian and Štern, Darko and Bischof, Horst and Urschler, Martin},\n editor = {Ourselin, S. and Joskowicz, L. and Sabuncu, M. and Unal, G. and Wells, W.},\n doi = {10.1007/978-3-319-46723-8_27},\n chapter = {Regressing Heatmaps for Multiple Landmark Localization Using CNNs},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n We explore the applicability of deep convolutional neural networks (CNNs) for multiple landmark localization in medical image data. Exploiting the idea of regressing heatmaps for individual landmark locations,we investigate several fully convolutional 2D and 3D CNN architectures by training them in an end-to-end manner. We further propose a novel SpatialConfiguration-Net architecture that effectively combines accurate local appearance responses with spatial landmark configurations that model anatomical variation. Evaluation of our different architectures on 2D and 3D hand image datasets show that heatmap regression based on CNNs achieves state-of-the-art landmark localization performance,with SpatialConfiguration-Net being robust even in case of limited amounts of training data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic localization of locally similar structures based on the scale-widening random regression forest.\n \n \n \n \n\n\n \n Stern, D.; Ebner, T.; and Urschler, M.\n\n\n \n\n\n\n In 2016 IEEE 13th International Symposium on Biomedical Imaging (ISBI), volume 2016-June, pages 1422-1425, 4 2016. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Automatic localization of locally similar structures based on the scale-widening random regression forest},\n type = {inproceedings},\n year = {2016},\n keywords = {anatomical landmark localization,hand X-ray,random regression forest,scale range of features},\n pages = {1422-1425},\n volume = {2016-June},\n websites = {http://ieeexplore.ieee.org/document/7493534/},\n month = {4},\n publisher = {IEEE},\n city = {Prague},\n id = {ae8d4669-9b1b-33f1-8907-5a05bc95f8db},\n created = {2018-02-18T20:51:33.093Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:48.999Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Stern2016b},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Selection of set of training pixels and feature range show to be critical scale-related parameters with high impact on results in localization methods based on random regression forests (RRF). Trained on pixels randomly selected from images with long range features, RRF captures the variation in landmark location but often without reaching satisfying accuracy. Conversely, training an RRF with short range features in a landmark's close surroundings enables accurate localization, but at the cost of ambiguous localization results in the presence of locally similar structures. We present a scale-widening RRF method that effectively handles such ambiguities. On a challenging hand radiography image data set, we achieve median and 90th percentile localization errors of 0.81 and 2.64mm, respectively, outperforming related state-of-the-art methods.},\n bibtype = {inproceedings},\n author = {Stern, Darko and Ebner, Thomas and Urschler, Martin},\n doi = {10.1109/ISBI.2016.7493534},\n booktitle = {2016 IEEE 13th International Symposium on Biomedical Imaging (ISBI)}\n}
\n
\n\n\n
\n Selection of set of training pixels and feature range show to be critical scale-related parameters with high impact on results in localization methods based on random regression forests (RRF). Trained on pixels randomly selected from images with long range features, RRF captures the variation in landmark location but often without reaching satisfying accuracy. Conversely, training an RRF with short range features in a landmark's close surroundings enables accurate localization, but at the cost of ambiguous localization results in the presence of locally similar structures. We present a scale-widening RRF method that effectively handles such ambiguities. On a challenging hand radiography image data set, we achieve median and 90th percentile localization errors of 0.81 and 2.64mm, respectively, outperforming related state-of-the-art methods.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Assessment of fiducial markers to enable the co-registration of photographs and MRI data.\n \n \n \n \n\n\n \n Webb, B., A.; Petrovic, A.; Urschler, M.; and Scheurer, E.\n\n\n \n\n\n\n Forensic Science International, 248: 148-153. 3 2015.\n \n\n\n\n
\n\n\n\n \n \n \"AssessmentWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Assessment of fiducial markers to enable the co-registration of photographs and MRI data},\n type = {article},\n year = {2015},\n keywords = {Fiducial markers,Forensic medicine,MRI,Multi-modal co-registration,Soft tissue injuries},\n pages = {148-153},\n volume = {248},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S037907381500002X},\n month = {3},\n id = {fd747ccf-5dcb-3739-a42f-774417cf0d10},\n created = {2015-03-13T13:28:17.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:07.490Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Webb2015},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Purpose: To investigate the visualisation of novel external fiducial skin markers in photography and MRI. To co-register photographs and MR images, and additionally assess the spatial accuracy of these co-registrations with the view of future application in the investigation of forensically relevant soft tissue lesions. Methods and materials: Strand-shaped fiducial markers were secured externally over hematomas on the thigh of 10 volunteers. The region of interest was photographed and examined using MRI at 3 T in oblique and transversal orientations and the visibility of the markers assessed. Markers provided 'control points' in both sets of images, enabling the computation of an affine transform to register oblique MR images to photographs. The fiducial registration error was evaluated by calculating the root-mean-square error of nine corresponding evaluation points visible in both modalities. Results: Fiducial markers were clearly visualised in both photography and MRI. The co-registration of photographs and oblique MR images was achieved for all participants. The overall root-mean-square error for registrations was 1.18. mm (TIRM) and 1.46. mm (TSE2D with SPAIR fat-suppression). Conclusions: The proposed approach led to the successful visualisation of non-invasive fiducial markers using photography and MRI (TIRM and TSE2D (SPAIR) sequences). This visualisation, combined with an affine transformation process provided a simple, cost-effective way to accurately co-register photographs and MR images of subcutaneous hematomas located on the thigh. Further investigation of the novel markers and the proposed co-visualisation approach holds potential to improve not only the forensic documentation of soft tissue lesions, but to also improve certain clinical applications, including the area of dermatology.},\n bibtype = {article},\n author = {Webb, Bridgette A. and Petrovic, Andreas and Urschler, Martin and Scheurer, Eva},\n doi = {10.1016/j.forsciint.2014.12.027},\n journal = {Forensic Science International}\n}
\n
\n\n\n
\n Purpose: To investigate the visualisation of novel external fiducial skin markers in photography and MRI. To co-register photographs and MR images, and additionally assess the spatial accuracy of these co-registrations with the view of future application in the investigation of forensically relevant soft tissue lesions. Methods and materials: Strand-shaped fiducial markers were secured externally over hematomas on the thigh of 10 volunteers. The region of interest was photographed and examined using MRI at 3 T in oblique and transversal orientations and the visibility of the markers assessed. Markers provided 'control points' in both sets of images, enabling the computation of an affine transform to register oblique MR images to photographs. The fiducial registration error was evaluated by calculating the root-mean-square error of nine corresponding evaluation points visible in both modalities. Results: Fiducial markers were clearly visualised in both photography and MRI. The co-registration of photographs and oblique MR images was achieved for all participants. The overall root-mean-square error for registrations was 1.18. mm (TIRM) and 1.46. mm (TSE2D with SPAIR fat-suppression). Conclusions: The proposed approach led to the successful visualisation of non-invasive fiducial markers using photography and MRI (TIRM and TSE2D (SPAIR) sequences). This visualisation, combined with an affine transformation process provided a simple, cost-effective way to accurately co-register photographs and MR images of subcutaneous hematomas located on the thigh. Further investigation of the novel markers and the proposed co-visualisation approach holds potential to improve not only the forensic documentation of soft tissue lesions, but to also improve certain clinical applications, including the area of dermatology.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Vertebrae Segmentation in 3D CT Images Based on a Variational Framework.\n \n \n \n \n\n\n \n Hammernik, K.; Ebner, T.; Stern, D.; Urschler, M.; and Pock, T.\n\n\n \n\n\n\n Volume 20 of Lecture Notes in Computational Vision and Biomechanics. Lecture Notes in Computational Vision and Biomechanics, pages 227-233. Yao, J.; Glocker, B.; Klinder, T.; and Li, S., editor(s). Springer, Cham, 2015.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2015},\n pages = {227-233},\n volume = {20},\n websites = {http://link.springer.com/10.1007/978-3-319-14148-0_20},\n publisher = {Springer, Cham},\n city = {Boston},\n series = {Lecture Notes in Computational Vision and Biomechanics},\n id = {d36cc99a-552d-326e-8289-264d0ef0b8ac},\n created = {2015-04-01T09:05:05.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:08.758Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Hammernik2015},\n notes = {Oral, Honourable Mention Award},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Automatic segmentation of 3D vertebrae is a challenging task in medical imaging. In this paper, we introduce a total variation (TV) based framework that incorporates an a priori model, i.e., a vertebral mean shape, image intensity and edge information. The algorithm was evaluated using leave-one-out cross validation on a data set containing ten computed tomography scans and ground truth segmentations provided for the CSI MICCAI 2014 spine and vertebrae segmentation challenge. We achieve promising results in terms of the Dice Similarity Coefficient (DSC) of 0.93 ± 0.04 averaged over the whole data set.},\n bibtype = {inbook},\n author = {Hammernik, Kerstin and Ebner, Thomas and Stern, Darko and Urschler, Martin and Pock, Thomas},\n editor = {Yao, J. and Glocker, B. and Klinder, T. and Li, S.},\n doi = {10.1007/978-3-319-14148-0_20},\n chapter = {Vertebrae Segmentation in 3D CT Images Based on a Variational Framework},\n title = {Lecture Notes in Computational Vision and Biomechanics}\n}
\n
\n\n\n
\n Automatic segmentation of 3D vertebrae is a challenging task in medical imaging. In this paper, we introduce a total variation (TV) based framework that incorporates an a priori model, i.e., a vertebral mean shape, image intensity and edge information. The algorithm was evaluated using leave-one-out cross validation on a data set containing ten computed tomography scans and ground truth segmentations provided for the CSI MICCAI 2014 spine and vertebrae segmentation challenge. We achieve promising results in terms of the Dice Similarity Coefficient (DSC) of 0.93 ± 0.04 averaged over the whole data set.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Artery-Vein Separation from Thoracic CT Images Using Integer Programming.\n \n \n \n \n\n\n \n Payer, C.; Pienn, M.; Bálint, Z.; Olschewski, A.; Olschewski, H.; and Urschler, M.\n\n\n \n\n\n\n Volume 9350 . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 36-43. Navab, N.; Hornegger, J.; Wells, W.; and Frangi, A., F., editor(s). Springer, Cham, 2015.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2015},\n pages = {36-43},\n volume = {9350},\n websites = {http://link.springer.com/10.1007/978-3-319-24571-3_5},\n publisher = {Springer, Cham},\n city = {Munich},\n id = {6be9c49d-854a-3afe-9a18-76536d303470},\n created = {2016-05-05T11:33:36.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:06.231Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Payer2015},\n notes = {Oral, Short-listed for MICCAI Young Scientist Award},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Automated computer-aided analysis of lung vessels has shown to yield promising results for non-invasive diagnosis of lung diseases. In order to detect vascular changes affecting arteries and veins differently, an algorithm capable of identifying these two compartments is needed. We propose a fully automatic algorithm that separates arteries and veins in thoracic computed tomography (CT) images based on two integer programs. The first extracts multiple subtrees inside a graph of vessel paths. The second labels each tree as either artery or vein by maximizing both, the contact surface in their Voronoi diagram, and a measure based on closeness to accompanying bronchi. We evaluate the performance of our automatic algorithm on 10 manual segmentations of arterial and venous trees from patients with and without pulmonary vascular disease, achieving an average voxel based overlap of 94.1% (range: 85.0% – 98.7%), outperforming a recent state-of-the-art interactive method.},\n bibtype = {inbook},\n author = {Payer, Christian and Pienn, Michael and Bálint, Zoltán and Olschewski, Andrea and Olschewski, Horst and Urschler, Martin},\n editor = {Navab, N. and Hornegger, J. and Wells, W. and Frangi, A. F.},\n doi = {10.1007/978-3-319-24571-3_5},\n chapter = {Automatic Artery-Vein Separation from Thoracic CT Images Using Integer Programming},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Automated computer-aided analysis of lung vessels has shown to yield promising results for non-invasive diagnosis of lung diseases. In order to detect vascular changes affecting arteries and veins differently, an algorithm capable of identifying these two compartments is needed. We propose a fully automatic algorithm that separates arteries and veins in thoracic computed tomography (CT) images based on two integer programs. The first extracts multiple subtrees inside a graph of vessel paths. The second labels each tree as either artery or vein by maximizing both, the contact surface in their Voronoi diagram, and a measure based on closeness to accompanying bronchi. We evaluate the performance of our automatic algorithm on 10 manual segmentations of arterial and venous trees from patients with and without pulmonary vascular disease, achieving an average voxel based overlap of 94.1% (range: 85.0% – 98.7%), outperforming a recent state-of-the-art interactive method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n You Should Use Regression to Detect Cells.\n \n \n \n \n\n\n \n Kainz, P.; Urschler, M.; Schulter, S.; Wohlhart, P.; and Lepetit, V.\n\n\n \n\n\n\n Volume 9351 . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 276-283. Navab, N.; Hornegger, J.; Wells, W.; and Frangi, A., F., editor(s). 2015.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2015},\n pages = {276-283},\n volume = {9351},\n websites = {http://link.springer.com/10.1007/978-3-319-24574-4_33},\n id = {7a8627ff-0d32-3615-b314-58c075620f92},\n created = {2016-05-05T11:33:36.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:03.067Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Kainz2015},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Automated cell detection in histopathology images is a hard problem due to the large variance of cell shape and appearance. We show that cells can be detected reliably in images by predicting, for each pixel location, a monotonous function of the distance to the center of the closest cell. Cell centers can then be identified by extracting local extremums of the predicted values. This approach results in a very simple method, which is easy to implement. We show on two challenging microscopy image datasets that our approach outperforms state-of-the-art methods in terms of accuracy, reliability, and speed. We also introduce a new dataset that we will make publicly available.},\n bibtype = {inbook},\n author = {Kainz, Philipp and Urschler, Martin and Schulter, Samuel and Wohlhart, Paul and Lepetit, Vincent},\n editor = {Navab, N. and Hornegger, J. and Wells, W. and Frangi, A. F.},\n doi = {10.1007/978-3-319-24574-4_33},\n chapter = {You Should Use Regression to Detect Cells},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Automated cell detection in histopathology images is a hard problem due to the large variance of cell shape and appearance. We show that cells can be detected reliably in images by predicting, for each pixel location, a monotonous function of the distance to the center of the closest cell. Cell centers can then be identified by extracting local extremums of the predicted values. This approach results in a very simple method, which is easy to implement. We show on two challenging microscopy image datasets that our approach outperforms state-of-the-art methods in terms of accuracy, reliability, and speed. We also introduce a new dataset that we will make publicly available.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Automatic third molar localization from 3D MRI using random regression forests.\n \n \n \n\n\n \n Unterpirker, W.; Ebner, T.; Štern, D.; and Urschler, M.\n\n\n \n\n\n\n In 19th International Conference on Medical Image Understanding and Analysis (MIUA), pages 195-200, 2015. BMVA\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Automatic third molar localization from 3D MRI using random regression forests},\n type = {inproceedings},\n year = {2015},\n pages = {195-200},\n publisher = {BMVA},\n city = {Lincoln, UK},\n id = {409a7896-26cc-3afe-ab31-51fa91eb114e},\n created = {2016-06-13T17:12:00.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:03.709Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Unterpirker2015},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Unterpirker, Walter and Ebner, Thomas and Štern, Darko and Urschler, Martin},\n booktitle = {19th International Conference on Medical Image Understanding and Analysis (MIUA)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n What automated age estimation of hand and wrist MRI data tells us about skeletal maturation in male adolescents.\n \n \n \n \n\n\n \n Urschler, M.; Grassegger, S.; and Štern, D.\n\n\n \n\n\n\n Annals of Human Biology, 42(4): 358-367. 7 2015.\n \n\n\n\n
\n\n\n\n \n \n \"WhatWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {What automated age estimation of hand and wrist MRI data tells us about skeletal maturation in male adolescents},\n type = {article},\n year = {2015},\n keywords = {Automatic software,MRI,forensic age estimation,hand and wrist},\n pages = {358-367},\n volume = {42},\n websites = {http://www.tandfonline.com/doi/full/10.3109/03014460.2015.1043945},\n month = {7},\n day = {4},\n id = {1554f6c3-760a-3e2b-8c92-04bd7acd7aa9},\n created = {2018-02-18T20:51:32.594Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:59.235Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2015a},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Background: Age estimation of individuals is important in human biology and has various medical and forensic applications. Recent interest in MR-based methods aims to investigate alternatives for established methods involving ionising radiation. Automatic, software-based methods additionally promise improved estimation objectivity.Aim: To investigate how informative automatically selected image features are regarding their ability to discriminate age, by exploring a recently proposed software-based age estimation method for MR images of the left hand and wrist.Subjects and methods: One hundred and two MR datasets of left hand images are used to evaluate age estimation performance, consisting of bone and epiphyseal gap volume localisation, computation of one age regression model per bone mapping image features to age and fusion of individual bone age predictions to a final age estimate.Results: Quantitative results of the software-based method show an age estimation performance with a mean absolute difference of 0.85 years (SD = 0.58 years) to chronological age, as determined by a cross-validation experiment. Qualitatively, it is demonstrated how feature selection works and which image features of skeletal maturation are automatically chosen to model the non-linear regression function.Conclusion: Feasibility of automatic age estimation based on MRI data is shown and selected image features are found to be informative for describing anatomical changes during physical maturation in male adolescents.},\n bibtype = {article},\n author = {Urschler, Martin and Grassegger, Sabine and Štern, Darko},\n doi = {10.3109/03014460.2015.1043945},\n journal = {Annals of Human Biology},\n number = {4}\n}
\n
\n\n\n
\n Background: Age estimation of individuals is important in human biology and has various medical and forensic applications. Recent interest in MR-based methods aims to investigate alternatives for established methods involving ionising radiation. Automatic, software-based methods additionally promise improved estimation objectivity.Aim: To investigate how informative automatically selected image features are regarding their ability to discriminate age, by exploring a recently proposed software-based age estimation method for MR images of the left hand and wrist.Subjects and methods: One hundred and two MR datasets of left hand images are used to evaluate age estimation performance, consisting of bone and epiphyseal gap volume localisation, computation of one age regression model per bone mapping image features to age and fusion of individual bone age predictions to a final age estimate.Results: Quantitative results of the software-based method show an age estimation performance with a mean absolute difference of 0.85 years (SD = 0.58 years) to chronological age, as determined by a cross-validation experiment. Qualitatively, it is demonstrated how feature selection works and which image features of skeletal maturation are automatically chosen to model the non-linear regression function.Conclusion: Feasibility of automatic age estimation based on MRI data is shown and selected image features are found to be informative for describing anatomical changes during physical maturation in male adolescents.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dental age estimation of living persons: Comparison of MRI with OPG.\n \n \n \n \n\n\n \n Baumann, P.; Widek, T.; Merkens, H.; Boldt, J.; Petrovic, A.; Urschler, M.; Kirnbauer, B.; Jakse, N.; and Scheurer, E.\n\n\n \n\n\n\n Forensic Science International, 253: 76-80. 8 2015.\n \n\n\n\n
\n\n\n\n \n \n \"DentalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Dental age estimation of living persons: Comparison of MRI with OPG},\n type = {article},\n year = {2015},\n keywords = {Age estimation,Eruption,Forensic,MRI,Mineralization,Molar},\n pages = {76-80},\n volume = {253},\n websites = {https://linkinghub.elsevier.com/retrieve/pii/S0379073815002364},\n month = {8},\n id = {0ac73bab-d51c-3061-8672-ce8c127c083c},\n created = {2018-02-18T20:51:32.611Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:02.421Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Baumann2015},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {The need for forensic age estimations in living adolescents is high mainly due to migration, particularly from countries where birth dates are not reliably documented. To date, the gold standard of dental age estimation is the evaluation of the mineralization and eruption stages of the third molars using an orthopantomogram (OPG). However, the use of ionizing radiation without medical indication is ethically controversial and not permitted in many countries. Thus, the aim of this study was to investigate if dental MRI can be used for the assessment of dental age with equally good results as when using an OPG.27 healthy volunteers (19 ♀, 8 ♂, age range 13.6-23.1 years, median 18.9 years) underwent an MRI scan of the jaw after a clinically indicated OPG. Mineralization and eruption stages of the molars were independently analyzed on OPGs and MRI by two blinded dentists according to the staging system established by Demirjian and Olze, respectively. The results of OPG and MRI were compared and inter-rater agreement was determined.The developmental stages of the 262 evaluated molars could be clearly differentiated in MRI. For both, mineralization and eruption, there was a good correlation between MRI and OPG. Overall MRI tended to yield slightly lower stages than the OPG. Inter-rater agreement was moderate for mineralization and good regarding eruption.Although a validation of these results using modality-specific reference values is needed, dental MRI seems to be suitable for a use in dental age estimation.},\n bibtype = {article},\n author = {Baumann, Pia and Widek, Thomas and Merkens, Heiko and Boldt, Julian and Petrovic, Andreas and Urschler, Martin and Kirnbauer, Barbara and Jakse, Norbert and Scheurer, Eva},\n doi = {10.1016/j.forsciint.2015.06.001},\n journal = {Forensic Science International}\n}
\n
\n\n\n
\n The need for forensic age estimations in living adolescents is high mainly due to migration, particularly from countries where birth dates are not reliably documented. To date, the gold standard of dental age estimation is the evaluation of the mineralization and eruption stages of the third molars using an orthopantomogram (OPG). However, the use of ionizing radiation without medical indication is ethically controversial and not permitted in many countries. Thus, the aim of this study was to investigate if dental MRI can be used for the assessment of dental age with equally good results as when using an OPG.27 healthy volunteers (19 ♀, 8 ♂, age range 13.6-23.1 years, median 18.9 years) underwent an MRI scan of the jaw after a clinically indicated OPG. Mineralization and eruption stages of the molars were independently analyzed on OPGs and MRI by two blinded dentists according to the staging system established by Demirjian and Olze, respectively. The results of OPG and MRI were compared and inter-rater agreement was determined.The developmental stages of the 262 evaluated molars could be clearly differentiated in MRI. For both, mineralization and eruption, there was a good correlation between MRI and OPG. Overall MRI tended to yield slightly lower stages than the OPG. Inter-rater agreement was moderate for mineralization and good regarding eruption.Although a validation of these results using modality-specific reference values is needed, dental MRI seems to be suitable for a use in dental age estimation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Anatomical Landmark Detection in Medical Applications Driven by Synthetic Data.\n \n \n \n \n\n\n \n Riegler, G.; Urschler, M.; Ruther, M.; Bischof, H.; and Stern, D.\n\n\n \n\n\n\n In 2015 IEEE International Conference on Computer Vision Workshop (ICCVW), volume 2015-Febru, pages 85-89, 12 2015. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"AnatomicalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Anatomical Landmark Detection in Medical Applications Driven by Synthetic Data},\n type = {inproceedings},\n year = {2015},\n keywords = {Biomedical imaging,Computed tomography,Shape,Three-dimensional displays,Training,Training data},\n pages = {85-89},\n volume = {2015-Febru},\n websites = {http://ieeexplore.ieee.org/document/7406370/},\n month = {12},\n publisher = {IEEE},\n city = {Santiago de Chile},\n id = {f5a1ae8e-149a-3bcc-8596-a83760e1fead},\n created = {2018-02-18T20:51:33.016Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:01.783Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Riegler2015},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {An important initial step in many medical image analysis applications is the accurate detection of anatomical landmarks. Most successful methods for this task rely on data-driven machine learning algorithms. However, modern machine learning techniques, e.g. convolutional neural networks, need a large corpus of training data, which is often an unrealistic setting for medical datasets. In this work, we investigate how to adapt synthetic image datasets from other computer vision tasks to overcome the under-representation of the anatomical pose and shape variations in medical image datasets. We transform both data domains to a common one in such a way that a convolutional neural network can be trained on the larger synthetic image dataset and fine-tuned on the smaller medical image dataset. Our evaluations on data of MR hand and whole body CT images demonstrate that this approach improves the detection results compared to training a convolutional neural network only on the medical data. The proposed approach may also be usable in other medical applications, where training data is scarce.},\n bibtype = {inproceedings},\n author = {Riegler, Gernot and Urschler, Martin and Ruther, Matthias and Bischof, Horst and Stern, Darko},\n doi = {10.1109/ICCVW.2015.21},\n booktitle = {2015 IEEE International Conference on Computer Vision Workshop (ICCVW)}\n}
\n
\n\n\n
\n An important initial step in many medical image analysis applications is the accurate detection of anatomical landmarks. Most successful methods for this task rely on data-driven machine learning algorithms. However, modern machine learning techniques, e.g. convolutional neural networks, need a large corpus of training data, which is often an unrealistic setting for medical datasets. In this work, we investigate how to adapt synthetic image datasets from other computer vision tasks to overcome the under-representation of the anatomical pose and shape variations in medical image datasets. We transform both data domains to a common one in such a way that a convolutional neural network can be trained on the larger synthetic image dataset and fine-tuned on the smaller medical image dataset. Our evaluations on data of MR hand and whole body CT images demonstrate that this approach improves the detection results compared to training a convolutional neural network only on the medical data. The proposed approach may also be usable in other medical applications, where training data is scarce.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Increased tortuosity of pulmonary arteries in patients with pulmonary hypertension in the arteries.\n \n \n \n\n\n \n Pienn, M.; Payer, C.; Olschewski, A.; Olschewski, H.; Urschler, M.; and Balint, Z.\n\n\n \n\n\n\n In 19th International Conference on Medical Image Understanding and Analysis (MIUA), pages 86-91, 2015. BMVA\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Increased tortuosity of pulmonary arteries in patients with pulmonary hypertension in the arteries},\n type = {inproceedings},\n year = {2015},\n pages = {86-91},\n publisher = {BMVA},\n city = {Lincoln, UK},\n id = {94c5161e-bee6-3f86-925c-bf38e5e53e30},\n created = {2018-02-22T08:05:49.276Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:00.504Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Pienn2015},\n notes = {Oral, Best Oral Presentation Award},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Pienn, Michael and Payer, Christian and Olschewski, Andrea and Olschewski, Horst and Urschler, Martin and Balint, Zoltan},\n booktitle = {19th International Conference on Medical Image Understanding and Analysis (MIUA)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Automatic high-speed video glottis segmentation using salient regions and 3D geodesic active contours.\n \n \n \n\n\n \n Schenk, F.; Aichinger, P.; Roesner, I.; and Urschler, M.\n\n\n \n\n\n\n Annals of the BMVA, 2015(3): 1-15. 2015.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Automatic high-speed video glottis segmentation using salient regions and 3D geodesic active contours},\n type = {article},\n year = {2015},\n pages = {1-15},\n volume = {2015},\n id = {b7ea65a9-886c-3ee4-aec6-c6a11d5229e6},\n created = {2018-02-22T08:05:50.392Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:39:58.602Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Schenk2015},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {article},\n author = {Schenk, Fabian and Aichinger, Philipp and Roesner, Imme and Urschler, Martin},\n journal = {Annals of the BMVA},\n number = {3}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Intuitive presentation of clinical forensic data using anonymous and person-specific 3D reference manikins.\n \n \n \n \n\n\n \n Urschler, M.; Höller, J.; Bornik, A.; Paul, T.; Giretzlehner, M.; Bischof, H.; Yen, K.; and Scheurer, E.\n\n\n \n\n\n\n Forensic Science International, 241: 155-166. 8 2014.\n \n\n\n\n
\n\n\n\n \n \n \"IntuitiveWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Intuitive presentation of clinical forensic data using anonymous and person-specific 3D reference manikins},\n type = {article},\n year = {2014},\n keywords = {3D imaging,3D reconstruction,Case presentation,Visualization},\n pages = {155-166},\n volume = {241},\n websites = {http://www.sciencedirect.com/science/article/pii/S0379073814002242},\n month = {8},\n publisher = {Elsevier Ireland Ltd},\n id = {9b924686-169c-33be-8c88-cfbbe184a8bf},\n created = {2015-02-18T08:30:19.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:12.544Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2014},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {The increasing use of CT/MR devices in forensic analysis motivates the need to present forensic findings from different sources in an intuitive reference visualization, with the aim of combining 3D volumetric images along with digital photographs of external findings into a 3D computer graphics model. This model allows a comprehensive presentation of forensic findings in court and enables comparative evaluation studies correlating data sources. The goal of this work was to investigate different methods to generate anonymous and patient-specific 3D models which may be used as reference visualizations. The issue of registering 3D volumetric as well as 2D photographic data to such 3D models is addressed to provide an intuitive context for injury documentation from arbitrary modalities. We present an image processing and visualization work-flow, discuss the major parts of this work-flow, compare the different investigated reference models, and show a number of cases studies that underline the suitability of the proposed work-flow for presenting forensically relevant information in 3D visualizations. © 2014 Elsevier Ireland Ltd.},\n bibtype = {article},\n author = {Urschler, Martin and Höller, Johannes and Bornik, Alexander and Paul, Tobias and Giretzlehner, Michael and Bischof, Horst and Yen, Kathrin and Scheurer, Eva},\n doi = {10.1016/j.forsciint.2014.05.017},\n journal = {Forensic Science International}\n}
\n
\n\n\n
\n The increasing use of CT/MR devices in forensic analysis motivates the need to present forensic findings from different sources in an intuitive reference visualization, with the aim of combining 3D volumetric images along with digital photographs of external findings into a 3D computer graphics model. This model allows a comprehensive presentation of forensic findings in court and enables comparative evaluation studies correlating data sources. The goal of this work was to investigate different methods to generate anonymous and patient-specific 3D models which may be used as reference visualizations. The issue of registering 3D volumetric as well as 2D photographic data to such 3D models is addressed to provide an intuitive context for injury documentation from arbitrary modalities. We present an image processing and visualization work-flow, discuss the major parts of this work-flow, compare the different investigated reference models, and show a number of cases studies that underline the suitability of the proposed work-flow for presenting forensically relevant information in 3D visualizations. © 2014 Elsevier Ireland Ltd.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quantification of Tortuosity and Fractal Dimension of the Lung Vessels in Pulmonary Hypertension Patients.\n \n \n \n \n\n\n \n Helmberger, M.; Pienn, M.; Urschler, M.; Kullnig, P.; Stollberger, R.; Kovacs, G.; Olschewski, A.; Olschewski, H.; and Bálint, Z.\n\n\n \n\n\n\n PLoS ONE, 9(1): e87515. 1 2014.\n \n\n\n\n
\n\n\n\n \n \n \"QuantificationWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Quantification of Tortuosity and Fractal Dimension of the Lung Vessels in Pulmonary Hypertension Patients},\n type = {article},\n year = {2014},\n pages = {e87515},\n volume = {9},\n websites = {https://dx.plos.org/10.1371/journal.pone.0087515},\n month = {1},\n publisher = {Public Library of Science},\n day = {31},\n id = {f434ddae-6ec6-3474-a6f3-ade3100de06e},\n created = {2015-02-18T08:30:19.000Z},\n file_attached = {true},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:15.127Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Helmberger2014},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Pulmonary hypertension (PH) can result in vascular pruning and increased tortuosity of the blood vessels. In this study we examined whether automatic extraction of lung vessels from contrast-enhanced thoracic computed tomography (CT) scans and calculation of tortuosity as well as 3D fractal dimension of the segmented lung vessels results in measures associated with PH. In this pilot study, 24 patients (18 with and 6 without PH) were examined with thorax CT following their diagnostic or follow-up right-sided heart catheterisation (RHC). Images of the whole thorax were acquired with a 128-slice dual-energy CT scanner. After lung identification, a vessel enhancement filter was used to estimate the lung vessel centerlines. From these, the vascular trees were generated. For each vessel segment the tortuosity was calculated using distance metric. Fractal dimension was computed using 3D box counting. Hemodynamic data from RHC was used for correlation analysis. Distance metric, the readout of vessel tortuosity, correlated with mean pulmonary arterial pressure (Spearman correlation coefficient: ρ = 0.60) and other relevant parameters, like pulmonary vascular resistance (ρ = 0.59), arterio-venous difference in oxygen (ρ = 0.54), arterial (ρ = -0.54) and venous oxygen saturation (ρ = -0.68). Moreover, distance metric increased with increase of WHO functional class. In contrast, 3D fractal dimension was only significantly correlated with arterial oxygen saturation (ρ = 0.47). Automatic detection of the lung vascular tree can provide clinically relevant measures of blood vessel morphology. Non-invasive quantification of pulmonary vessel tortuosity may provide a tool to evaluate the severity of pulmonary hypertension. © 2014 Helmberger et al.},\n bibtype = {article},\n author = {Helmberger, Michael and Pienn, Michael and Urschler, Martin and Kullnig, Peter and Stollberger, Rudolf and Kovacs, Gabor and Olschewski, Andrea and Olschewski, Horst and Bálint, Zoltán},\n editor = {Frati, Giacomo},\n doi = {10.1371/journal.pone.0087515},\n journal = {PLoS ONE},\n number = {1}\n}
\n
\n\n\n
\n Pulmonary hypertension (PH) can result in vascular pruning and increased tortuosity of the blood vessels. In this study we examined whether automatic extraction of lung vessels from contrast-enhanced thoracic computed tomography (CT) scans and calculation of tortuosity as well as 3D fractal dimension of the segmented lung vessels results in measures associated with PH. In this pilot study, 24 patients (18 with and 6 without PH) were examined with thorax CT following their diagnostic or follow-up right-sided heart catheterisation (RHC). Images of the whole thorax were acquired with a 128-slice dual-energy CT scanner. After lung identification, a vessel enhancement filter was used to estimate the lung vessel centerlines. From these, the vascular trees were generated. For each vessel segment the tortuosity was calculated using distance metric. Fractal dimension was computed using 3D box counting. Hemodynamic data from RHC was used for correlation analysis. Distance metric, the readout of vessel tortuosity, correlated with mean pulmonary arterial pressure (Spearman correlation coefficient: ρ = 0.60) and other relevant parameters, like pulmonary vascular resistance (ρ = 0.59), arterio-venous difference in oxygen (ρ = 0.54), arterial (ρ = -0.54) and venous oxygen saturation (ρ = -0.68). Moreover, distance metric increased with increase of WHO functional class. In contrast, 3D fractal dimension was only significantly correlated with arterial oxygen saturation (ρ = 0.47). Automatic detection of the lung vascular tree can provide clinically relevant measures of blood vessel morphology. Non-invasive quantification of pulmonary vessel tortuosity may provide a tool to evaluate the severity of pulmonary hypertension. © 2014 Helmberger et al.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards Automatic Bone Age Estimation from MRI: Localization of 3D Anatomical Landmarks.\n \n \n \n \n\n\n \n Ebner, T.; Stern, D.; Donner, R.; Bischof, H.; and Urschler, M.\n\n\n \n\n\n\n Volume 17 . Medical image computing and computer-assisted intervention : MICCAI ... International Conference on Medical Image Computing and Computer-Assisted Intervention, pages 421-428. Golland, P.; Hata, N.; Barillot, C.; Hornegger, J.; and Howe, R., editor(s). Springer, Cham, 2014.\n \n\n\n\n
\n\n\n\n \n \n \"MedicalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2014},\n pages = {421-428},\n volume = {17},\n websites = {http://link.springer.com/10.1007/978-3-319-10470-6_53},\n publisher = {Springer, Cham},\n city = {Boston},\n id = {fb447509-87be-3c9a-b846-24b48fad474a},\n created = {2015-02-18T08:46:00.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:14.497Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Ebner2014},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Bone age estimation (BAE) is an important procedure in forensic practice which recently has seen a shift in attention from X-ray to MRI based imaging. To automate BAE from MRI, localization of the joints between hand bones is a crucial first step, which is challenging due to anatomical variations, different poses and repeating structures within the hand. We propose a landmark localization algorithm using multiple random regression forests, first analyzing the shape of the hand from information of the whole image, thus implicitly modeling the global landmark configuration, followed by a refinement based on more local information to increase prediction accuracy. We are able to clearly outperform related approaches on our dataset of 60 T1-weighted MR images, achieving a mean landmark localization error of 1.4 ?? 1.5mm, while having only 0.25% outliers with an error greater than 10mm.},\n bibtype = {inbook},\n author = {Ebner, Thomas and Stern, Darko and Donner, Rene and Bischof, Horst and Urschler, Martin},\n editor = {Golland, P. and Hata, N. and Barillot, C. and Hornegger, J. and Howe, R.},\n doi = {10.1007/978-3-319-10470-6_53},\n chapter = {Towards Automatic Bone Age Estimation from MRI: Localization of 3D Anatomical Landmarks},\n title = {Medical image computing and computer-assisted intervention : MICCAI ... International Conference on Medical Image Computing and Computer-Assisted Intervention}\n}
\n
\n\n\n
\n Bone age estimation (BAE) is an important procedure in forensic practice which recently has seen a shift in attention from X-ray to MRI based imaging. To automate BAE from MRI, localization of the joints between hand bones is a crucial first step, which is challenging due to anatomical variations, different poses and repeating structures within the hand. We propose a landmark localization algorithm using multiple random regression forests, first analyzing the shape of the hand from information of the whole image, thus implicitly modeling the global landmark configuration, followed by a refinement based on more local information to increase prediction accuracy. We are able to clearly outperform related approaches on our dataset of 60 T1-weighted MR images, achieving a mean landmark localization error of 1.4 ?? 1.5mm, while having only 0.25% outliers with an error greater than 10mm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparing algorithms for automated vessel segmentation in computed tomography scans of the lung: the VESSEL12 study.\n \n \n \n \n\n\n \n Rudyanto, R., D.; Kerkstra, S.; van Rikxoort, E., M.; Fetita, C.; Brillet, P.; Lefevre, C.; Xue, W.; Zhu, X.; Liang, J.; Öksüz, İ.; Ünay, D.; Kadipaşaogˇlu, K.; Estépar, R., S., J.; Ross, J., C.; Washko, G., R.; Prieto, J.; Hoyos, M., H.; Orkisz, M.; Meine, H.; Hüllebrand, M.; Stöcker, C.; Mir, F., L.; Naranjo, V.; Villanueva, E.; Staring, M.; Xiao, C.; Stoel, B., C.; Fabijanska, A.; Smistad, E.; Elster, A., C.; Lindseth, F.; Foruzan, A., H.; Kiros, R.; Popuri, K.; Cobzas, D.; Jimenez-Carretero, D.; Santos, A.; Ledesma-Carbayo, M., J.; Helmberger, M.; Urschler, M.; Pienn, M.; Bosboom, D., G.; Campo, A.; Prokop, M.; de Jong, P., A.; Ortiz-de-Solorzano, C.; Muñoz-Barrutia, A.; and van Ginneken, B.\n\n\n \n\n\n\n Medical Image Analysis, 18(7): 1217-1232. 10 2014.\n \n\n\n\n
\n\n\n\n \n \n \"ComparingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Comparing algorithms for automated vessel segmentation in computed tomography scans of the lung: the VESSEL12 study},\n type = {article},\n year = {2014},\n keywords = {Algorithm comparison,Lung vessels,Thoracic computed tomography},\n pages = {1217-1232},\n volume = {18},\n websites = {http://www.sciencedirect.com/science/article/pii/S136184151400111X},\n month = {10},\n id = {bd5e5f35-1a04-3b6e-a5a8-b6a4c5b4ef24},\n created = {2015-03-13T13:27:46.000Z},\n accessed = {2015-03-13},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:13.180Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Rudyanto2014},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {The VESSEL12 (VESsel SEgmentation in the Lung) challenge objectively compares the performance of different algorithms to identify vessels in thoracic computed tomography (CT) scans. Vessel segmentation is fundamental in computer aided processing of data generated by 3D imaging modalities. As manual vessel segmentation is prohibitively time consuming, any real world application requires some form of automation. Several approaches exist for automated vessel segmentation, but judging their relative merits is difficult due to a lack of standardized evaluation. We present an annotated reference dataset containing 20 CT scans and propose nine categories to perform a comprehensive evaluation of vessel segmentation algorithms from both academia and industry. Twenty algorithms participated in the VESSEL12 challenge, held at International Symposium on Biomedical Imaging (ISBI) 2012. All results have been published at the VESSEL12 website http://vessel12.grand-challenge.org. The challenge remains ongoing and open to new participants. Our three contributions are: (1) an annotated reference dataset available online for evaluation of new algorithms; (2) a quantitative scoring system for objective comparison of algorithms; and (3) performance analysis of the strengths and weaknesses of the various vessel segmentation methods in the presence of various lung diseases. © 2014 .},\n bibtype = {article},\n author = {Rudyanto, Rina D. and Kerkstra, Sjoerd and van Rikxoort, Eva M. and Fetita, Catalin and Brillet, Pierre-Yves and Lefevre, Christophe and Xue, Wenzhe and Zhu, Xiangjun and Liang, Jianming and Öksüz, İlkay and Ünay, Devrim and Kadipaşaogˇlu, Kamuran and Estépar, Raúl San José and Ross, James C. and Washko, George R. and Prieto, Juan-Carlos and Hoyos, Marcela Hernández and Orkisz, Maciej and Meine, Hans and Hüllebrand, Markus and Stöcker, Christina and Mir, Fernando Lopez and Naranjo, Valery and Villanueva, Eliseo and Staring, Marius and Xiao, Changyan and Stoel, Berend C. and Fabijanska, Anna and Smistad, Erik and Elster, Anne C. and Lindseth, Frank and Foruzan, Amir Hossein and Kiros, Ryan and Popuri, Karteek and Cobzas, Dana and Jimenez-Carretero, Daniel and Santos, Andres and Ledesma-Carbayo, Maria J. and Helmberger, Michael and Urschler, Martin and Pienn, Michael and Bosboom, Dennis G.H. and Campo, Arantza and Prokop, Mathias and de Jong, Pim A. and Ortiz-de-Solorzano, Carlos and Muñoz-Barrutia, Arrate and van Ginneken, Bram},\n doi = {10.1016/j.media.2014.07.003},\n journal = {Medical Image Analysis},\n number = {7}\n}
\n
\n\n\n
\n The VESSEL12 (VESsel SEgmentation in the Lung) challenge objectively compares the performance of different algorithms to identify vessels in thoracic computed tomography (CT) scans. Vessel segmentation is fundamental in computer aided processing of data generated by 3D imaging modalities. As manual vessel segmentation is prohibitively time consuming, any real world application requires some form of automation. Several approaches exist for automated vessel segmentation, but judging their relative merits is difficult due to a lack of standardized evaluation. We present an annotated reference dataset containing 20 CT scans and propose nine categories to perform a comprehensive evaluation of vessel segmentation algorithms from both academia and industry. Twenty algorithms participated in the VESSEL12 challenge, held at International Symposium on Biomedical Imaging (ISBI) 2012. All results have been published at the VESSEL12 website http://vessel12.grand-challenge.org. The challenge remains ongoing and open to new participants. Our three contributions are: (1) an annotated reference dataset available online for evaluation of new algorithms; (2) a quantitative scoring system for objective comparison of algorithms; and (3) performance analysis of the strengths and weaknesses of the various vessel segmentation methods in the presence of various lung diseases. © 2014 .\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Automatic glottis segmentation from laryngeal high-speed videos using 3D active contours.\n \n \n \n\n\n \n Schenk, F.; Urschler, M.; Aigner, C.; Roesner, I.; Aichinger, P.; and Bischof, H.\n\n\n \n\n\n\n In 18th International Conference on Medical Image Understanding and Analysis (MIUA), pages 111-116, 2014. BMVA\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Automatic glottis segmentation from laryngeal high-speed videos using 3D active contours},\n type = {inproceedings},\n year = {2014},\n pages = {111-116},\n publisher = {BMVA},\n city = {London, UK},\n id = {9df64002-1cd9-3c8a-bbe1-1610e49dd172},\n created = {2015-03-13T13:28:17.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:11.286Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Schenk2014},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Schenk, Fabian and Urschler, Martin and Aigner, Christoph and Roesner, Imme and Aichinger, Philipp and Bischof, Horst},\n booktitle = {18th International Conference on Medical Image Understanding and Analysis (MIUA)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Determination of legal majority age from 3D magnetic resonance images of the radius bone.\n \n \n \n \n\n\n \n Stern, D.; Ebner, T.; Bischof, H.; and Urschler, M.\n\n\n \n\n\n\n In 2014 IEEE 11th International Symposium on Biomedical Imaging (ISBI), pages 1119-1122, 4 2014. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"DeterminationWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Determination of legal majority age from 3D magnetic resonance images of the radius bone},\n type = {inproceedings},\n year = {2014},\n keywords = {Bone age estimation (BAE),Hand bones,Legal majority age,Magnetic resonance (MR),Random forest,Segmentation},\n pages = {1119-1122},\n websites = {http://ieeexplore.ieee.org/document/6868071/},\n month = {4},\n publisher = {IEEE},\n city = {Beijing},\n id = {fe87de1c-2039-339c-94b3-507b7e207a11},\n created = {2018-02-18T20:51:32.730Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:16.411Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Stern2014a},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {The determination of an individual's legal majority age is becoming increasingly important in forensic practice. Established age estimation methods are based on 2D X-rays, but suffer from problems due to projective imaging and exposure to ionizing radiation, which, without proper medical or criminal indication, is ethically questionable and legally prohibited in many countries. We propose an automatic 3D method for the determination of legal maturity from MR images based on the ossification of the radius bone. Age estimation is performed by a linear regression model of the epiphyseal gap volume over the known ground truth age of training data. Results are comparable with the established Greulich/Pyle (GP) and Tanner/Whitehouse (TW) methods, but do not involve harmful radiation.},\n bibtype = {inproceedings},\n author = {Stern, Darko and Ebner, Thomas and Bischof, Horst and Urschler, Martin},\n doi = {10.1109/ISBI.2014.6868071},\n booktitle = {2014 IEEE 11th International Symposium on Biomedical Imaging (ISBI)}\n}
\n
\n\n\n
\n The determination of an individual's legal majority age is becoming increasingly important in forensic practice. Established age estimation methods are based on 2D X-rays, but suffer from problems due to projective imaging and exposure to ionizing radiation, which, without proper medical or criminal indication, is ethically questionable and legally prohibited in many countries. We propose an automatic 3D method for the determination of legal maturity from MR images based on the ossification of the radius bone. Age estimation is performed by a linear regression model of the epiphyseal gap volume over the known ground truth age of training data. Results are comparable with the established Greulich/Pyle (GP) and Tanner/Whitehouse (TW) methods, but do not involve harmful radiation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fully automatic bone age estimation from left hand MR images.\n \n \n \n \n\n\n \n Stern, D.; Ebner, T.; Bischof, H.; Grassegger, S.; Ehammer, T.; and Urschler, M.\n\n\n \n\n\n\n Volume 17 . Medical image computing and computer-assisted intervention : MICCAI ... International Conference on Medical Image Computing and Computer-Assisted Intervention, pages 220-227. Golland, P.; Hata, N.; Barillot, C.; Hornegger, J.; and Howe, R., editor(s). Springer, Cham, 2014.\n \n\n\n\n
\n\n\n\n \n \n \"MedicalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2014},\n pages = {220-227},\n volume = {17},\n websites = {http://link.springer.com/10.1007/978-3-319-10470-6_28},\n publisher = {Springer, Cham},\n city = {Boston},\n id = {e7c01b80-9098-380f-ac21-fedda721e7fd},\n created = {2018-02-18T20:51:32.978Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:13.861Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Stern2014},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {There has recently been an increased demand in bone age estimation (BAE) of living individuals and human remains in legal medicine applications. A severe drawback of established BAE techniques based on X-ray images is radiation exposure, since many countries prohibit scanning involving ionizing radiation without diagnostic reasons. We propose a completely automated method for BAE based on volumetric hand MRI images. On our database of 56 male caucasian subjects between 13 and 19 years, we are able to estimate the subjects age with a mean difference of 0.85 ?? 0.58 years compared to the chronological age, which is in line with radiologist results using established radiographic methods. We see this work as a promising first step towards a novel MRI based bone age estimation system, with the key benefits of lacking exposure to ionizing radiation and higher accuracy due to exploitation of volumetric data.},\n bibtype = {inbook},\n author = {Stern, Darko and Ebner, Thomas and Bischof, Horst and Grassegger, Sabine and Ehammer, Thomas and Urschler, Martin},\n editor = {Golland, P. and Hata, N. and Barillot, C. and Hornegger, J. and Howe, R.},\n doi = {10.1007/978-3-319-10470-6_28},\n chapter = {Fully automatic bone age estimation from left hand MR images},\n title = {Medical image computing and computer-assisted intervention : MICCAI ... International Conference on Medical Image Computing and Computer-Assisted Intervention}\n}
\n
\n\n\n
\n There has recently been an increased demand in bone age estimation (BAE) of living individuals and human remains in legal medicine applications. A severe drawback of established BAE techniques based on X-ray images is radiation exposure, since many countries prohibit scanning involving ionizing radiation without diagnostic reasons. We propose a completely automated method for BAE based on volumetric hand MRI images. On our database of 56 male caucasian subjects between 13 and 19 years, we are able to estimate the subjects age with a mean difference of 0.85 ?? 0.58 years compared to the chronological age, which is in line with radiologist results using established radiographic methods. We see this work as a promising first step towards a novel MRI based bone age estimation system, with the key benefits of lacking exposure to ionizing radiation and higher accuracy due to exploitation of volumetric data.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Memory Efficient 3D Integral Volumes.\n \n \n \n \n\n\n \n Urschler, M.; Bornik, A.; and Donoser, M.\n\n\n \n\n\n\n In 2013 IEEE International Conference on Computer Vision Workshops, pages 722-729, 12 2013. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"MemoryWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Memory Efficient 3D Integral Volumes},\n type = {inproceedings},\n year = {2013},\n keywords = {Integral volume,Memory efficient,Object detection,Random forest,Summed volume table},\n pages = {722-729},\n websites = {http://ieeexplore.ieee.org/document/6755967/},\n month = {12},\n publisher = {IEEE},\n city = {Sydney, AU},\n id = {3f5b2fa8-fb21-3bd2-a565-8a9c6fb64091},\n created = {2015-02-18T08:30:19.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:19.689Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2013},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Integral image data structures are very useful in computer vision applications that involve machine learning approaches based on ensembles of weak learners. The weak learners often are simply several regional sums of intensities subtracted from each other. In this work we present a memory efficient integral volume data structure, that allows reduction of required RAM storage size in such a supervised learning framework using 3D training data. We evaluate our proposed data structure in terms of the tradeoff between computational effort and storage, and show an application for 3D object detection of liver CT data. © 2013 IEEE.},\n bibtype = {inproceedings},\n author = {Urschler, Martin and Bornik, Alexander and Donoser, Michael},\n doi = {10.1109/ICCVW.2013.99},\n booktitle = {2013 IEEE International Conference on Computer Vision Workshops}\n}
\n
\n\n\n
\n Integral image data structures are very useful in computer vision applications that involve machine learning approaches based on ensembles of weak learners. The weak learners often are simply several regional sums of intensities subtracted from each other. In this work we present a memory efficient integral volume data structure, that allows reduction of required RAM storage size in such a supervised learning framework using 3D training data. We evaluate our proposed data structure in terms of the tradeoff between computational effort and storage, and show an application for 3D object detection of liver CT data. © 2013 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tortuosity of Pulmonary Vessels Correlates with Pulmonary Hypertension.\n \n \n \n \n\n\n \n Helmberger, M.; Urschler, M.; Pienn, M.; Balint, Z.; Olschewski, A.; and Bischof, H.\n\n\n \n\n\n\n In 17th International Conference on Medical Image Understanding and Analysis (MIUA), pages 87-92, 2013. BMVA\n \n\n\n\n
\n\n\n\n \n \n \"TortuosityWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Tortuosity of Pulmonary Vessels Correlates with Pulmonary Hypertension},\n type = {inproceedings},\n year = {2013},\n pages = {87-92},\n websites = {http://events.cs.bham.ac.uk/miua2013/MIUAproceedings.pdf},\n publisher = {BMVA},\n city = {Birmingham, UK},\n id = {a40dc231-b2ae-3d5b-86b2-9a53ca1a8439},\n created = {2015-03-13T13:28:17.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:21.597Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Helmberger2013},\n notes = {Oral, Best Paper Award},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Helmberger, Michael and Urschler, Martin and Pienn, Michael and Balint, Zoltan and Olschewski, Andrea and Bischof, Horst},\n booktitle = {17th International Conference on Medical Image Understanding and Analysis (MIUA)}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Forensic-Case Analysis: From 3D Imaging to Interactive Visualization.\n \n \n \n \n\n\n \n Urschler, M.; Bornik, A.; Scheurer, E.; Yen, K.; Bischof, H.; and Schmalstieg, D.\n\n\n \n\n\n\n IEEE Computer Graphics and Applications, 32(4): 79-87. 7 2012.\n \n\n\n\n
\n\n\n\n \n \n \"Forensic-CaseWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Forensic-Case Analysis: From 3D Imaging to Interactive Visualization},\n type = {article},\n year = {2012},\n keywords = {3D forensics,3D rendering,3D segmentation,Computed tomography,Forensics,Interactive systems,Magnetic resonance imaging,Three dimensional displays,Visual analytics,computed tomography,computer graphics,magnetic resonance imaging},\n pages = {79-87},\n volume = {32},\n websites = {http://ieeexplore.ieee.org/document/6265061/},\n month = {7},\n id = {32e2d61a-7727-390b-9685-7e292804f95f},\n created = {2015-02-18T08:46:24.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:22.876Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2012},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {An interactive framework prepares raw computed-tomography and magnetic-resonance-imaging scans for courtroom presentations. The framework makes use of combined computer graphics and computer vision techniques to enable a forensic case analysis workflow. © 1981-2012 IEEE.},\n bibtype = {article},\n author = {Urschler, Martin and Bornik, Alexander and Scheurer, Eva and Yen, Kathrin and Bischof, Horst and Schmalstieg, Dieter},\n doi = {10.1109/MCG.2012.75},\n journal = {IEEE Computer Graphics and Applications},\n number = {4}\n}
\n
\n\n\n
\n An interactive framework prepares raw computed-tomography and magnetic-resonance-imaging scans for courtroom presentations. The framework makes use of combined computer graphics and computer vision techniques to enable a forensic case analysis workflow. © 1981-2012 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Edge-Specific Kernel Functions For Pairwise Graph Matching.\n \n \n \n \n\n\n \n Donoser, M.; Urschler, M.; and Bischof, H.\n\n\n \n\n\n\n In Procedings of the British Machine Vision Conference 2012, pages 17.1-17.12, 2012. British Machine Vision Association\n \n\n\n\n
\n\n\n\n \n \n \"LearningWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Learning Edge-Specific Kernel Functions For Pairwise Graph Matching},\n type = {inproceedings},\n year = {2012},\n pages = {17.1-17.12},\n websites = {http://www.bmva.org/bmvc/2012/BMVC/paper017/index.html},\n publisher = {British Machine Vision Association},\n city = {Surrey, UK},\n id = {587a390f-86e9-30c2-bb4a-61db62136f65},\n created = {2015-03-13T13:28:18.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:24.768Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Donoser2012_BMVC},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {In this paper we consider the pairwise graph matching problem of finding correspondences between two point sets using unary and pairwise potentials, which analyze local descriptor similarity and geometric compatibility. Recently, it was shown that it is possible to learn optimal parameters for the features used in the potentials, which significantly improves results in supervised and unsupervised settings. It was demonstrated that even linear assignments (not considering geometry) with well learned potentials may improve over state-of-the-art quadratic assignment solutions. In this paper we extend this idea by directly learning edge-specific kernels for pairs of nodes. We define the pairwise kernel functions based on a statistical shape model that is learned from labeled training data. Assuming that the setting of graph matching is a priori known, the learned kernel functions allow to significantly improve results in comparison to general graph matching. We further demonstrate the applicability of game theory based evolutionary dynamics as effective and easy to implement approximation of the underlying graph matching optimization problem. Experiments on automatically aligning a set of faces and feature-point based localization of category instances demonstrate the value of the proposed method.},\n bibtype = {inproceedings},\n author = {Donoser, Michael and Urschler, Martin and Bischof, Horst},\n doi = {10.5244/C.26.17},\n booktitle = {Procedings of the British Machine Vision Conference 2012}\n}
\n
\n\n\n
\n In this paper we consider the pairwise graph matching problem of finding correspondences between two point sets using unary and pairwise potentials, which analyze local descriptor similarity and geometric compatibility. Recently, it was shown that it is possible to learn optimal parameters for the features used in the potentials, which significantly improves results in supervised and unsupervised settings. It was demonstrated that even linear assignments (not considering geometry) with well learned potentials may improve over state-of-the-art quadratic assignment solutions. In this paper we extend this idea by directly learning edge-specific kernels for pairs of nodes. We define the pairwise kernel functions based on a statistical shape model that is learned from labeled training data. Assuming that the setting of graph matching is a priori known, the learned kernel functions allow to significantly improve results in comparison to general graph matching. We further demonstrate the applicability of game theory based evolutionary dynamics as effective and easy to implement approximation of the underlying graph matching optimization problem. Experiments on automatically aligning a set of faces and feature-point based localization of category instances demonstrate the value of the proposed method.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Highly Consistent Sequential Segmentation.\n \n \n \n \n\n\n \n Donoser, M.; Urschler, M.; Riemenschneider, H.; and Bischof, H.\n\n\n \n\n\n\n Volume 6688 LNCS . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 48-58. Heyden, A.; and Kahl, F., editor(s). Springer, Berlin, Heidelberg, 2011.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2011},\n pages = {48-58},\n volume = {6688 LNCS},\n websites = {http://link.springer.com/10.1007/978-3-642-21227-7_5},\n publisher = {Springer, Berlin, Heidelberg},\n city = {Ystad Saltsjöbad, SW},\n id = {abb321a9-c721-3ed1-a14c-f9808cc8a732},\n created = {2015-02-18T08:30:19.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:25.406Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Donoser2011},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {This paper deals with segmentation of image sequences in an unsupervised manner with the goal of getting highly consistent segmentation results from frame-to-frame. We first introduce a segmentation method that uses results of the previous frame as initialization and significantly improves consistency in comparison to a single frame based approach. We also find correspondences between the segmented regions from one frame to the next to further increase consistency. This matching step is based on a modified version of an efficient partial shape matching method which allows identification of similar parts of regions despite topology changes like merges and splits. We use the identified matched parts to define a partial matching cost which is then used as input to pairwise graph matching. Experiments demonstrate that we can achieve highly consistent segmentations for diverse image sequences, even allowing to track manually initialized moving and static objects. © 2011 Springer-Verlag.},\n bibtype = {inbook},\n author = {Donoser, Michael and Urschler, Martin and Riemenschneider, Hayko and Bischof, Horst},\n editor = {Heyden, A. and Kahl, F.},\n doi = {10.1007/978-3-642-21227-7_5},\n chapter = {Highly Consistent Sequential Segmentation},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n This paper deals with segmentation of image sequences in an unsupervised manner with the goal of getting highly consistent segmentation results from frame-to-frame. We first introduce a segmentation method that uses results of the previous frame as initialization and significantly improves consistency in comparison to a single frame based approach. We also find correspondences between the segmented regions from one frame to the next to further increase consistency. This matching step is based on a modified version of an efficient partial shape matching method which allows identification of similar parts of regions despite topology changes like merges and splits. We use the identified matched parts to define a partial matching cost which is then used as input to pairwise graph matching. Experiments demonstrate that we can achieve highly consistent segmentations for diverse image sequences, even allowing to track manually initialized moving and static objects. © 2011 Springer-Verlag.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluation of Registration Methods on Thoracic CT: The EMPIRE10 Challenge.\n \n \n \n \n\n\n \n Murphy, K.; van Ginneken, B.; Reinhardt, J., M.; Kabus, S.; Kai Ding; Xiang Deng; Kunlin Cao; Kaifang Du; Christensen, G., E.; Garcia, V.; Vercauteren, T.; Ayache, N.; Commowick, O.; Malandain, G.; Glocker, B.; Paragios, N.; Navab, N.; Gorbunova, V.; Sporring, J.; de Bruijne, M.; Xiao Han; Heinrich, M., P.; Schnabel, J., A.; Jenkinson, M.; Lorenz, C.; Modat, M.; McClelland, J., R.; Ourselin, S.; Muenzing, S., E., A.; Viergever, M., A.; De Nigris, D.; Collins, D., L.; Arbel, T.; Peroni, M.; Rui Li; Sharp, G., C.; Schmidt-Richberg, A.; Ehrhardt, J.; Werner, R.; Smeets, D.; Loeckx, D.; Gang Song; Tustison, N.; Avants, B.; Gee, J., C.; Staring, M.; Klein, S.; Stoel, B., C.; Urschler, M.; Werlberger, M.; Vandemeulebroucke, J.; Rit, S.; Sarrut, D.; and Pluim, J., P., W.\n\n\n \n\n\n\n IEEE Transactions on Medical Imaging, 30(11): 1901-1920. 11 2011.\n \n\n\n\n
\n\n\n\n \n \n \"EvaluationWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Evaluation of Registration Methods on Thoracic CT: The EMPIRE10 Challenge},\n type = {article},\n year = {2011},\n keywords = {Chest,computed tomography,evaluation,registration},\n pages = {1901-1920},\n volume = {30},\n websites = {http://ieeexplore.ieee.org/document/5782992/},\n month = {11},\n id = {e864dfe0-8280-385c-9616-0708fb0e0d8d},\n created = {2015-02-18T08:30:20.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:27.394Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Murphy2011},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {EMPIRE10 (Evaluation of Methods for Pulmonary Image REgistration 2010) is a public platform for fair and meaningful comparison of registration algorithms which are applied to a database of intrapatient thoracic CT image pairs. Evaluation of nonrigid registration techniques is a nontrivial task. This is compounded by the fact that researchers typically test only on their own data, which varies widely. For this reason, reliable assessment and comparison of different registration algorithms has been virtually impossible in the past. In this work we present the results of the launch phase of EMPIRE10, which comprised the comprehensive evaluation and comparison of 20 individual algorithms from leading academic and industrial research groups. All algorithms are applied to the same set of 30 thoracic CT pairs. Algorithm settings and parameters are chosen by researchers expert in the configuration of their own method and the evaluation is independent, using the same criteria for all participants. All results are published on the EMPIRE10 website (http://empire10.isi.uu.nl). The challenge remains ongoing and open to new participants. Full results from 24 algorithms have been published at the time of writing. This paper details the organization of the challenge, the data and evaluation methods and the outcome of the initial launch with 20 algorithms. The gain in knowledge and future work are discussed. © 2011 IEEE.},\n bibtype = {article},\n author = {Murphy, Keelin and van Ginneken, B. and Reinhardt, Joseph M. and Kabus, Sven and Kai Ding, undefined and Xiang Deng, undefined and Kunlin Cao, undefined and Kaifang Du, undefined and Christensen, Gary E. and Garcia, Vincent and Vercauteren, Tom and Ayache, Nicholas and Commowick, Olivier and Malandain, Grgoire and Glocker, Ben and Paragios, Nikos and Navab, Nassir and Gorbunova, Vladlena and Sporring, Jon and de Bruijne, M. and Xiao Han, undefined and Heinrich, Mattias P. and Schnabel, Julia A. and Jenkinson, Mark and Lorenz, Cristian and Modat, Marc and McClelland, Jamie R. and Ourselin, Sebastien and Muenzing, S. E. A. and Viergever, Max A. and De Nigris, Dante and Collins, D. Louis and Arbel, Tal and Peroni, Marta and Rui Li, undefined and Sharp, Gregory C. and Schmidt-Richberg, Alexander and Ehrhardt, Jan and Werner, René and Smeets, Dirk and Loeckx, Dirk and Gang Song, undefined and Tustison, Nicholas and Avants, Brian and Gee, James C. and Staring, Marius and Klein, Stefan and Stoel, Berend C. and Urschler, Martin and Werlberger, Manuel and Vandemeulebroucke, Jef and Rit, Simon and Sarrut, David and Pluim, J. P. W.},\n doi = {10.1109/TMI.2011.2158349},\n journal = {IEEE Transactions on Medical Imaging},\n number = {11}\n}
\n
\n\n\n
\n EMPIRE10 (Evaluation of Methods for Pulmonary Image REgistration 2010) is a public platform for fair and meaningful comparison of registration algorithms which are applied to a database of intrapatient thoracic CT image pairs. Evaluation of nonrigid registration techniques is a nontrivial task. This is compounded by the fact that researchers typically test only on their own data, which varies widely. For this reason, reliable assessment and comparison of different registration algorithms has been virtually impossible in the past. In this work we present the results of the launch phase of EMPIRE10, which comprised the comprehensive evaluation and comparison of 20 individual algorithms from leading academic and industrial research groups. All algorithms are applied to the same set of 30 thoracic CT pairs. Algorithm settings and parameters are chosen by researchers expert in the configuration of their own method and the evaluation is independent, using the same criteria for all participants. All results are published on the EMPIRE10 website (http://empire10.isi.uu.nl). The challenge remains ongoing and open to new participants. Full results from 24 algorithms have been published at the time of writing. This paper details the organization of the challenge, the data and evaluation methods and the outcome of the initial launch with 20 algorithms. The gain in knowledge and future work are discussed. © 2011 IEEE.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Occlusion detection for ICAO compliant facial photographs.\n \n \n \n \n\n\n \n Storer, M.; Urschler, M.; and Bischof, H.\n\n\n \n\n\n\n In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops, pages 122-129, 6 2010. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"OcclusionWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Occlusion detection for ICAO compliant facial photographs},\n type = {inproceedings},\n year = {2010},\n pages = {122-129},\n websites = {http://ieeexplore.ieee.org/document/5544616/},\n month = {6},\n publisher = {IEEE},\n city = {San Francisco, US},\n id = {d27d3a77-c270-3995-9a71-3b228cec0313},\n created = {2015-02-18T08:30:18.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:29.352Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Storer2010a},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Facial image analysis is an important computer vision topic as a first step for biometric applications like face recognition/verification. The ICAO specification defines criteria to assess suitability of facial images for later use in such tasks. This standard prohibits photographs showing occlusions, thus there is the need to detect occluded images automatically. In this work we present a novel algorithm for occlusion detection and evaluate its performance on several databases. First, we use the publicly available AR faces database which contains many occluded face image samples We show a straight-forward algorithm based on color space techniques which gives a very high performance on this database. We conclude that the AR faces database is too simple to evaluate occlusions and propose our own, more complex database, which includes, e.g., hands or arbitrary objects covering the face. Finally we extend our first algorithm by an Active Shape Model in combination with a CA reconstruction verification. We show how our novel occlusion detection algorithm outperforms the simple approach on our more complex database. © 2010 IEEE.},\n bibtype = {inproceedings},\n author = {Storer, Markus and Urschler, Martin and Bischof, Horst},\n doi = {10.1109/CVPRW.2010.5544616},\n booktitle = {2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops}\n}
\n
\n\n\n
\n Facial image analysis is an important computer vision topic as a first step for biometric applications like face recognition/verification. The ICAO specification defines criteria to assess suitability of facial images for later use in such tasks. This standard prohibits photographs showing occlusions, thus there is the need to detect occluded images automatically. In this work we present a novel algorithm for occlusion detection and evaluate its performance on several databases. First, we use the publicly available AR faces database which contains many occluded face image samples We show a straight-forward algorithm based on color space techniques which gives a very high performance on this database. We conclude that the AR faces database is too simple to evaluate occlusions and propose our own, more complex database, which includes, e.g., hands or arbitrary objects covering the face. Finally we extend our first algorithm by an Active Shape Model in combination with a CA reconstruction verification. We show how our novel occlusion detection algorithm outperforms the simple approach on our more complex database. © 2010 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Intensity-Based Congealing for Unsupervised Joint Image Alignment.\n \n \n \n \n\n\n \n Storer, M.; Urschler, M.; and Bischof, H.\n\n\n \n\n\n\n In 2010 20th International Conference on Pattern Recognition, pages 1473-1476, 8 2010. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Intensity-BasedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Intensity-Based Congealing for Unsupervised Joint Image Alignment},\n type = {inproceedings},\n year = {2010},\n pages = {1473-1476},\n websites = {http://ieeexplore.ieee.org/document/5597177/},\n month = {8},\n publisher = {IEEE},\n city = {Istanbul},\n id = {c33d1955-4cc4-3c0e-b7c4-10fc92ce3cba},\n created = {2015-02-18T08:30:18.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:31.267Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Storer2010b},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {We present an approach for unsupervised alignment of an ensemble of images called congealing. Our algorithm is based on image registration using the mutual information measure as a cost function. The cost function is optimized by a standard gradient descent method in a multiresolution scheme. As opposed to other congealing methods, which use the SSD measure, the mutual information measure is better suited as a similarity measure for registering images since no prior assumptions on the relation of intensities between images are required. We present alignment results on the MNIST handwritten digit database and on facial images obtained from the CVL database. © 2010 IEEE.},\n bibtype = {inproceedings},\n author = {Storer, Markus and Urschler, Martin and Bischof, Horst},\n doi = {10.1109/ICPR.2010.364},\n booktitle = {2010 20th International Conference on Pattern Recognition}\n}
\n
\n\n\n
\n We present an approach for unsupervised alignment of an ensemble of images called congealing. Our algorithm is based on image registration using the mutual information measure as a cost function. The cost function is optimized by a standard gradient descent method in a multiresolution scheme. As opposed to other congealing methods, which use the SSD measure, the mutual information measure is better suited as a similarity measure for registering images since no prior assumptions on the relation of intensities between images are required. We present alignment results on the MNIST handwritten digit database and on facial images obtained from the CVL database. © 2010 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Robust Active Appearance Model Fitting.\n \n \n \n \n\n\n \n Storer, M.; Roth, P., M.; Urschler, M.; Bischof, H.; and Birchbauer, J., A.\n\n\n \n\n\n\n Volume 68 CCIS . Communications in Computer and Information Science, pages 229-241. Ranchordas, A.; Pereira, J., M.; Araujo, H., J.; and Tavares, J., M., R., S., editor(s). Springer, Berlin, Heidelberg, 2010.\n \n\n\n\n
\n\n\n\n \n \n \"CommunicationsWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2010},\n pages = {229-241},\n volume = {68 CCIS},\n websites = {http://link.springer.com/10.1007/978-3-642-11840-1_17},\n publisher = {Springer, Berlin, Heidelberg},\n id = {3cdc5bb3-99a3-3fa9-a5ea-abb5a6c16b5c},\n created = {2015-02-18T08:30:19.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:32.535Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Storer2010},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {The Active Appearance Model (AAM) is a widely used approach for model based vision showing excellent results. But one major drawback is that the method is not robust against occlusions. Thus, if parts of the image are occluded the method converges to local minima and the obtained results are unreliable. To overcome this problem we propose a robust AAM fitting strategy. The main idea is to apply a robust PCA model to reconstruct the missing feature information and to use the thus obtained image as input for the standard AAM fitting process. Since existing methods for robust PCA reconstruction are computationally too expensive for real-time processing we applied a more efficient method: Fast-Robust PCA (FR-PCA). In fact, by using our FR-PCA the computational effort is drastically reduced. Moreover, more accurate reconstructions are obtained. In the experiments, we evaluated both, the FR-PCA model on the publicly available ALOI database and the whole robust AAM fitting chain on facial images. The results clearly show the benefits of our approach in terms of accuracy and speed when processing disturbed data (i.e., images containing occlusions). © 2010 Springer-Verlag Berlin Heidelberg.},\n bibtype = {inbook},\n author = {Storer, Markus and Roth, Peter M. and Urschler, Martin and Bischof, Horst and Birchbauer, Josef A.},\n editor = {Ranchordas, A. and Pereira, J. M. and Araujo, H. J. and Tavares, J. M. R. S.},\n doi = {10.1007/978-3-642-11840-1_17},\n chapter = {Efficient Robust Active Appearance Model Fitting},\n title = {Communications in Computer and Information Science}\n}
\n
\n\n\n
\n The Active Appearance Model (AAM) is a widely used approach for model based vision showing excellent results. But one major drawback is that the method is not robust against occlusions. Thus, if parts of the image are occluded the method converges to local minima and the obtained results are unreliable. To overcome this problem we propose a robust AAM fitting strategy. The main idea is to apply a robust PCA model to reconstruct the missing feature information and to use the thus obtained image as input for the standard AAM fitting process. Since existing methods for robust PCA reconstruction are computationally too expensive for real-time processing we applied a more efficient method: Fast-Robust PCA (FR-PCA). In fact, by using our FR-PCA the computational effort is drastically reduced. Moreover, more accurate reconstructions are obtained. In the experiments, we evaluated both, the FR-PCA model on the publicly available ALOI database and the whole robust AAM fitting chain on facial images. The results clearly show the benefits of our approach in terms of accuracy and speed when processing disturbed data (i.e., images containing occlusions). © 2010 Springer-Verlag Berlin Heidelberg.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optical flow based deformable volume registration using a novel second-order regularization prior.\n \n \n \n \n\n\n \n Grbić, S.; Urschler, M.; Pock, T.; and Bischof, H.\n\n\n \n\n\n\n In Dawant, B., M.; and Haynor, D., R., editor(s), Medical Imaging 2010: Image Processing, volume 7623, pages 76232R, 3 2010. SPIE\n \n\n\n\n
\n\n\n\n \n \n \"OpticalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Optical flow based deformable volume registration using a novel second-order regularization prior},\n type = {inproceedings},\n year = {2010},\n pages = {76232R},\n volume = {7623},\n websites = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.844549},\n month = {3},\n publisher = {SPIE},\n day = {4},\n city = {San Diego},\n id = {a40e7896-9722-3148-ba3d-5b978f1b08cc},\n created = {2015-03-13T13:28:18.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:31.905Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Grbic2010},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Nonlinear image registration is an initial step for a large number of medical image analysis applications. Optical flow based intensity registration is often used for dealing with intra-modality applications involving motion differences. In this work we present an energy functional which uses a novel, second-order regularization prior of the displacement field. Compared to other methods our scheme is robust to non-Gaussian noise and does not penalize locally affine deformation fields in homogeneous areas. We propose an efficient and stable numerical scheme to find the minimizer of the presented energy. We implemented our algorithm using modern consumer graphics processing units and thereby increased the execution performance dramatically. We further show experimental evaluations on clinical CT thorax data sets at different breathing states and on dynamic 4D CT cardiac data sets. © 2010 Copyright SPIE - The International Society for Optical Engineering.},\n bibtype = {inproceedings},\n author = {Grbić, Saša and Urschler, Martin and Pock, Thomas and Bischof, Horst},\n editor = {Dawant, Benoit M. and Haynor, David R.},\n doi = {10.1117/12.844549},\n booktitle = {Medical Imaging 2010: Image Processing}\n}
\n
\n\n\n
\n Nonlinear image registration is an initial step for a large number of medical image analysis applications. Optical flow based intensity registration is often used for dealing with intra-modality applications involving motion differences. In this work we present an energy functional which uses a novel, second-order regularization prior of the displacement field. Compared to other methods our scheme is robust to non-Gaussian noise and does not penalize locally affine deformation fields in homogeneous areas. We propose an efficient and stable numerical scheme to find the minimizer of the presented energy. We implemented our algorithm using modern consumer graphics processing units and thereby increased the execution performance dramatically. We further show experimental evaluations on clinical CT thorax data sets at different breathing states and on dynamic 4D CT cardiac data sets. © 2010 Copyright SPIE - The International Society for Optical Engineering.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Person Independent Head Pose Estimation by Non-Linear Regression and Manifold Embedding.\n \n \n \n\n\n \n Straka, M.; Urschler, M.; Storer, M.; Bischof, H.; and Birchbauer, J., A.\n\n\n \n\n\n\n In 34th Workshop of the Austrian Association for Pattern Recognition, 2010. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Person Independent Head Pose Estimation by Non-Linear Regression and Manifold Embedding},\n type = {inproceedings},\n year = {2010},\n city = {Zwettl, AT},\n id = {04d1621c-2ea5-35ca-9083-e744a8d40c9b},\n created = {2015-03-13T13:28:18.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:28.715Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Straka2010},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Straka, Matthias and Urschler, Martin and Storer, Markus and Bischof, Horst and Birchbauer, Josef A.},\n booktitle = {34th Workshop of the Austrian Association for Pattern Recognition}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Robust Optical Flow Based Deformable Registration of Thoracic CT Images.\n \n \n \n\n\n \n Urschler, M.; Werlberger, M.; Scheurer, E.; and Bischof, H.\n\n\n \n\n\n\n In MICCAI Workshop Medical Image Analysis in the Clinic: A Grand Challenge, pages 195-204, 2010. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Robust Optical Flow Based Deformable Registration of Thoracic CT Images},\n type = {inproceedings},\n year = {2010},\n pages = {195-204},\n city = {Beijing, CN},\n id = {efe83c53-e788-32b1-9ddd-4ecfa821f3f5},\n created = {2015-03-13T13:28:18.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:33.164Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2010_MICCAIWS},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {We present an optical flow deformable registration method which is based on robust measures for data and regularization terms. We show two specific implementations of the method, where one penalizes gradients in the displacement field in an isotropic fashion and the other one regularizes by weighting the penalization according to the image gradients anisotropically. Our data term consists of the L 1-norm of the standard optical flow constraint. We show a numerical algorithm that solves the two proposed models in a primal-dual optimization setup. Our algorithm works in a multi-resolution manner and it is applied to the 20 data sets of the EMPIRE10 registration challenge. Our results show room for improvement. Our rather simple model does not penalize non-diffeomorphic transformations, which leads to bad results on one of the evaluation measures, and it seems unsuited for large deformations cases. However, our algorithm is able to perform registrations of data set sizes around 400 3 on the order of a few minutes using a dedicated CUDA based GPU implementation, which is very fast compared to other reported algorithms.},\n bibtype = {inproceedings},\n author = {Urschler, Martin and Werlberger, Manuel and Scheurer, Eva and Bischof, Horst},\n booktitle = {MICCAI Workshop Medical Image Analysis in the Clinic: A Grand Challenge}\n}
\n
\n\n\n
\n We present an optical flow deformable registration method which is based on robust measures for data and regularization terms. We show two specific implementations of the method, where one penalizes gradients in the displacement field in an isotropic fashion and the other one regularizes by weighting the penalization according to the image gradients anisotropically. Our data term consists of the L 1-norm of the standard optical flow constraint. We show a numerical algorithm that solves the two proposed models in a primal-dual optimization setup. Our algorithm works in a multi-resolution manner and it is applied to the 20 data sets of the EMPIRE10 registration challenge. Our results show room for improvement. Our rather simple model does not penalize non-diffeomorphic transformations, which leads to bad results on one of the evaluation measures, and it seems unsuited for large deformations cases. However, our algorithm is able to perform registrations of data set sizes around 400 3 on the order of a few minutes using a dedicated CUDA based GPU implementation, which is very fast compared to other reported algorithms.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n 3D-MAM: 3D morphable appearance model for efficient fine head pose estimation from still images.\n \n \n \n\n\n \n Storer, M.; Urschler, M.; and Bischof, H.\n\n\n \n\n\n\n In 2009 IEEE 12th International Conference on Computer Vision Workshops (ICCVW), pages 192-199, 2009. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {3D-MAM: 3D morphable appearance model for efficient fine head pose estimation from still images},\n type = {inproceedings},\n year = {2009},\n pages = {192-199},\n publisher = {IEEE},\n city = {Kyoto, JP},\n id = {ce5139fe-06e2-378a-8d83-589275466d18},\n created = {2015-02-18T08:30:18.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:37.632Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Storer2009},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Identity-invariant estimation of head pose from still images is a challenging task due to the high variability of facial appearance. We present a novel 3D head pose estimation approach, which utilizes the flexibility and expressibility of a dense generative 3D facial model in combination with a very fast fitting algorithm. The efficiency of the head pose estimation is obtained by a 2D synthesis of the facial input image. This optimization procedure drives the appearance and pose of the 3D facial model. In contrast to many other approaches we are specifically interested in the more difficult task of head pose estimation from still images, instead of tracking faces in image sequences. We evaluate our approach on two publicly available databases (FacePix and USF HumanID) and compare our method to the 3D morphable model and other state of the art approaches in terms of accuracy and speed.},\n bibtype = {inproceedings},\n author = {Storer, Markus and Urschler, Martin and Bischof, Horst},\n doi = {10.1109/ICCVW.2009.5457701},\n booktitle = {2009 IEEE 12th International Conference on Computer Vision Workshops (ICCVW)}\n}
\n
\n\n\n
\n Identity-invariant estimation of head pose from still images is a challenging task due to the high variability of facial appearance. We present a novel 3D head pose estimation approach, which utilizes the flexibility and expressibility of a dense generative 3D facial model in combination with a very fast fitting algorithm. The efficiency of the head pose estimation is obtained by a 2D synthesis of the facial input image. This optimization procedure drives the appearance and pose of the 3D facial model. In contrast to many other approaches we are specifically interested in the more difficult task of head pose estimation from still images, instead of tracking faces in image sequences. We evaluate our approach on two publicly available databases (FacePix and USF HumanID) and compare our method to the 3D morphable model and other state of the art approaches in terms of accuracy and speed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Saliency driven total variation segmentation.\n \n \n \n \n\n\n \n Donoser, M.; Urschler, M.; Hirzer, M.; and Bischof, H.\n\n\n \n\n\n\n In 2009 IEEE 12th International Conference on Computer Vision, pages 817-824, 9 2009. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"SaliencyWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Saliency driven total variation segmentation},\n type = {inproceedings},\n year = {2009},\n pages = {817-824},\n websites = {http://ieeexplore.ieee.org/document/5459296/},\n month = {9},\n publisher = {IEEE},\n city = {Kyoto, JP},\n id = {f174ccff-0e2c-3b8f-b3c2-44adff9b9811},\n created = {2015-02-18T08:30:18.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:34.421Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Donoser2009},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {This paper introduces an unsupervised color segmentation method. The underlying idea is to segment the input image several times, each time focussing on a different salient part of the image and to subsequently merge all obtained results into one composite segmentation. We identify salient parts of the image by applying affinity propagation clustering to efficiently calculated local color and texture models. Each salient region then serves as an independent initialization for a figure/ground segmentation. Segmentation is done by minimizing a convex energy functional based on weighted total variation leading to a global optimal solution. Each salient region provides an accurate figure/ground segmentation highlighting different parts of the image. These highly redundant results are combined into one composite segmentation by analyzing local segmentation certainty. Our formulation is quite general, and other salient region detection algorithms in combination with any semi-supervised figure/ground segmentation approach can be used. We demonstrate the high quality of our method on the well-known Berkeley segmentation database. Furthermore we show that our method can be used to provide good spatial support for recognition frameworks. ©2009 IEEE.},\n bibtype = {inproceedings},\n author = {Donoser, Michael and Urschler, Martin and Hirzer, Martin and Bischof, Horst},\n doi = {10.1109/ICCV.2009.5459296},\n booktitle = {2009 IEEE 12th International Conference on Computer Vision}\n}
\n
\n\n\n
\n This paper introduces an unsupervised color segmentation method. The underlying idea is to segment the input image several times, each time focussing on a different salient part of the image and to subsequently merge all obtained results into one composite segmentation. We identify salient parts of the image by applying affinity propagation clustering to efficiently calculated local color and texture models. Each salient region then serves as an independent initialization for a figure/ground segmentation. Segmentation is done by minimizing a convex energy functional based on weighted total variation leading to a global optimal solution. Each salient region provides an accurate figure/ground segmentation highlighting different parts of the image. These highly redundant results are combined into one composite segmentation by analyzing local segmentation certainty. Our formulation is quite general, and other salient region detection algorithms in combination with any semi-supervised figure/ground segmentation approach can be used. We demonstrate the high quality of our method on the well-known Berkeley segmentation database. Furthermore we show that our method can be used to provide good spatial support for recognition frameworks. ©2009 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast-Robust PCA.\n \n \n \n \n\n\n \n Storer, M.; Roth, P., M.; Urschler, M.; and Bischof, H.\n\n\n \n\n\n\n Volume 5575 LNCS . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 430-439. Salberg, A., B.; Hardeberg, J., Y.; and Jenssen, R., editor(s). Springer, Berlin, Heidelberg, 2009.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2009},\n pages = {430-439},\n volume = {5575 LNCS},\n websites = {http://link.springer.com/10.1007/978-3-642-02230-2_44},\n publisher = {Springer, Berlin, Heidelberg},\n city = {Oslo, Norway},\n id = {c88f5864-f4f0-3175-8d63-cb65f9389600},\n created = {2015-02-18T08:30:19.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:35.052Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Storer2009a},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Principal Component Analysis (PCA) is a powerful and widely used tool in Computer Vision and is applied, e.g., for dimensionality reduction. But as a drawback, it is not robust to outliers. Hence, if the input data is corrupted, an arbitrarily wrong representation is obtained. To overcome this problem, various methods have been proposed to robustly estimate the PCA coefficients, but these methods are computationally too expensive for practical applications. Thus, in this paper we propose a novel fast and robust PCA (FR-PCA), which drastically reduces the computational effort. Moreover, more accurate representations are obtained. In particular, we propose a two-stage outlier detection procedure, where in the first stage outliers are detected by analyzing a large number of smaller subspaces. In the second stage, remaining outliers are detected by a robust least-square fitting. To show these benefits, in the experiments we evaluate the FR-PCA method for the task of robust image reconstruction on the publicly available ALOI database. The results clearly show that our approach outperforms existing methods in terms of accuracy and speed when processing corrupted data. © 2009 Springer Berlin Heidelberg.},\n bibtype = {inbook},\n author = {Storer, Markus and Roth, Peter M. and Urschler, Martin and Bischof, Horst},\n editor = {Salberg, A. B. and Hardeberg, J. Y. and Jenssen, R.},\n doi = {10.1007/978-3-642-02230-2_44},\n chapter = {Fast-Robust PCA},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Principal Component Analysis (PCA) is a powerful and widely used tool in Computer Vision and is applied, e.g., for dimensionality reduction. But as a drawback, it is not robust to outliers. Hence, if the input data is corrupted, an arbitrarily wrong representation is obtained. To overcome this problem, various methods have been proposed to robustly estimate the PCA coefficients, but these methods are computationally too expensive for practical applications. Thus, in this paper we propose a novel fast and robust PCA (FR-PCA), which drastically reduces the computational effort. Moreover, more accurate representations are obtained. In particular, we propose a two-stage outlier detection procedure, where in the first stage outliers are detected by analyzing a large number of smaller subspaces. In the second stage, remaining outliers are detected by a robust least-square fitting. To show these benefits, in the experiments we evaluate the FR-PCA method for the task of robust image reconstruction on the publicly available ALOI database. The results clearly show that our approach outperforms existing methods in terms of accuracy and speed when processing corrupted data. © 2009 Springer Berlin Heidelberg.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Active Appearance Model Fitting under Occlusion using Fast-Robust PCA.\n \n \n \n \n\n\n \n Storer, M.; Roth, P., M.; Urschler, M.; Bischof, H.; and Birchbauer, J., A.\n\n\n \n\n\n\n In Proceedings of the Fourth International Conference on Computer Vision Theory and Applications, pages 129-136, 2009. SciTePress - Science and and Technology Publications\n \n\n\n\n
\n\n\n\n \n \n \"ActiveWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Active Appearance Model Fitting under Occlusion using Fast-Robust PCA},\n type = {inproceedings},\n year = {2009},\n pages = {129-136},\n websites = {http://www.scitepress.org/DigitalLibrary/Link.aspx?doi=10.5220/0001768701290136},\n publisher = {SciTePress - Science and and Technology Publications},\n city = {Lisboa},\n id = {866ec879-ba43-395c-a91d-d453ac7c3fef},\n created = {2015-02-18T08:30:19.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:36.345Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Storer2009b},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {The Active Appearance Model (AAM) is a widely used method for model based vision showing excellent results. But one major drawback is that the method is not robust against occlusions. Thus, if parts of the image are occluded the method converges to local minima and the obtained results are unreliable. To overcome this problem we propose a robust AAM fitting strategy. The main idea is to apply a robust PCA model to reconstruct the missing feature information and to use the thus obtained image as input for the standard AAM fitting process. Since existing methods for robust PCA reconstruction are computationally too expensive for real-time processing we developed a more efficient method: fast robust PCA (FR-PCA). In fact, by using our FR-PCA the computational effort is drastically reduced. Moreover, more accurate reconstructions are obtained. In the experiments, we evaluated both, the fast robust PCA model on the publicly available ALOI database and the whole robust AAM fitting chain on facial images. The results clearly show the benefits of our approach in terms of accuracy and speed when processing disturbed data (i.e., images containing occlusions).},\n bibtype = {inproceedings},\n author = {Storer, Markus and Roth, Peter M. and Urschler, Martin and Bischof, Horst and Birchbauer, Josef A.},\n doi = {10.5220/0001768701290136},\n booktitle = {Proceedings of the Fourth International Conference on Computer Vision Theory and Applications}\n}
\n
\n\n\n
\n The Active Appearance Model (AAM) is a widely used method for model based vision showing excellent results. But one major drawback is that the method is not robust against occlusions. Thus, if parts of the image are occluded the method converges to local minima and the obtained results are unreliable. To overcome this problem we propose a robust AAM fitting strategy. The main idea is to apply a robust PCA model to reconstruct the missing feature information and to use the thus obtained image as input for the standard AAM fitting process. Since existing methods for robust PCA reconstruction are computationally too expensive for real-time processing we developed a more efficient method: fast robust PCA (FR-PCA). In fact, by using our FR-PCA the computational effort is drastically reduced. Moreover, more accurate reconstructions are obtained. In the experiments, we evaluated both, the fast robust PCA model on the publicly available ALOI database and the whole robust AAM fitting chain on facial images. The results clearly show the benefits of our approach in terms of accuracy and speed when processing disturbed data (i.e., images containing occlusions).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Interactive 3D Segmentation as an Example for Medical Visual Computing.\n \n \n \n\n\n \n Urschler, M.; Bornik, A.; Scheurer, E.; Pock, T.; and Bischof, H.\n\n\n \n\n\n\n Vermessung & Geoinformation, 3: 311-318. 2009.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Interactive 3D Segmentation as an Example for Medical Visual Computing},\n type = {article},\n year = {2009},\n pages = {311-318},\n volume = {3},\n id = {686edc51-a4fe-3e75-8bf5-e93821be6063},\n created = {2015-03-13T13:28:17.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:35.686Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2009_VG},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {article},\n author = {Urschler, Martin and Bornik, Alexander and Scheurer, Eva and Pock, Thomas and Bischof, Horst},\n journal = {Vermessung & Geoinformation}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An automatic hybrid segmentation approach for aligned face portrait images.\n \n \n \n\n\n \n Hirzer, M.; Urschler, M.; Bischof, H.; and Birchbauer, J., A.\n\n\n \n\n\n\n In 33rd Workshhop of the Austrian Association for Pattern Recognition, pages 49-60, 2009. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {An automatic hybrid segmentation approach for aligned face portrait images},\n type = {inproceedings},\n year = {2009},\n pages = {49-60},\n city = {Stainz, AT},\n id = {cfe281ec-0109-3bee-b551-731f9f6f330f},\n created = {2015-03-13T13:28:17.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:33.794Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Hirzer2009},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {With the introduction of electronic personal documents (e.g. passports) in recent years the analysis of suitable photographs has become an important field of research. Such photographs for machine read-able travel documents have to fulfill a set of minimal quality requirements defined by the International Civil Aviation Organization (ICAO). As some of the specified requirements are related to certain im-age regions only, these regions must be located in advance. In this work we present an automatic segmentation method for aligned color face images. The method is based on a convex variational energy formulation which is solved using weighted total variation. We apply constraints in the form of prior knowledge about the spatial configuration of typical passport photographs in order to solve for a global energy minimum. Several experiments on face images from two different datasets are presented to evaluate the performance of our algorithm. The obtained results demonstrate that our method is fairly robust and significantly outperforms other methods targeted at the same problem, in particular an expert system and an AdaBoost classifier.},\n bibtype = {inproceedings},\n author = {Hirzer, Martin and Urschler, Martin and Bischof, Horst and Birchbauer, Josef A.},\n booktitle = {33rd Workshhop of the Austrian Association for Pattern Recognition}\n}
\n
\n\n\n
\n With the introduction of electronic personal documents (e.g. passports) in recent years the analysis of suitable photographs has become an important field of research. Such photographs for machine read-able travel documents have to fulfill a set of minimal quality requirements defined by the International Civil Aviation Organization (ICAO). As some of the specified requirements are related to certain im-age regions only, these regions must be located in advance. In this work we present an automatic segmentation method for aligned color face images. The method is based on a convex variational energy formulation which is solved using weighted total variation. We apply constraints in the form of prior knowledge about the spatial configuration of typical passport photographs in order to solve for a global energy minimum. Several experiments on face images from two different datasets are presented to evaluate the performance of our algorithm. The obtained results demonstrate that our method is fairly robust and significantly outperforms other methods targeted at the same problem, in particular an expert system and an AdaBoost classifier.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Robust facial component detection for face alignment applications.\n \n \n \n\n\n \n Urschler, M.; Storer, M.; Bischof, H.; and Birchbauer, J., A.\n\n\n \n\n\n\n In 33rd Workshop of the Austrian Association for Pattern Recognition, pages 61-72, 2009. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Robust facial component detection for face alignment applications},\n type = {inproceedings},\n year = {2009},\n pages = {61-72},\n city = {Stainz, AT},\n id = {4ab3c995-e524-3ee8-8571-d21c018cadfc},\n created = {2015-03-13T13:28:17.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:36.985Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2009_OAGM},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Urschler, Martin and Storer, Markus and Bischof, Horst and Birchbauer, Josef A.},\n booktitle = {33rd Workshop of the Austrian Association for Pattern Recognition}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Classifier fusion for robust ICAO compliant face analysis.\n \n \n \n \n\n\n \n Storer, M.; Urschler, M.; Bischof, H.; and Birchbauer, J., A.\n\n\n \n\n\n\n In 2008 8th IEEE International Conference on Automatic Face & Gesture Recognition, pages 1-8, 9 2008. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"ClassifierWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Classifier fusion for robust ICAO compliant face analysis},\n type = {inproceedings},\n year = {2008},\n pages = {1-8},\n websites = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4813391},\n month = {9},\n publisher = {IEEE},\n city = {Amsterdam},\n id = {814f8651-d601-3e3b-936c-bcd40903f871},\n created = {2015-02-18T08:30:19.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:40.159Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Storer2008},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Biometrics is a huge and very fast growing domain of methods for uniquely recognizing humans based on one or more intrinsic physical or behavioral traits with applications in many different areas, e.g., surveillance, person verification and identification. The International Civil Aviation Organization (ICAO) provides a number of specifications to prepare automated recognition from travel document photos. The goal of these specifications is to increase security in civil aviation on the basis of standardized biometric data. Due to this international standard, there is a high demand for automatically checking face images to assist civil service employees in decision-making. In this work, we present a face normalization and analysis system implementing several parts of the ICAO specification. Our key contribution of this analysis is the fusion of different established classifiers to boost performance of the overall system. Our results show the superior checking quality on facial images due to utilizing classifier fusion compared to a single classifier decision. © 2008 IEEE.},\n bibtype = {inproceedings},\n author = {Storer, Markus and Urschler, Martin and Bischof, Horst and Birchbauer, Josef A.},\n doi = {10.1109/AFGR.2008.4813391},\n booktitle = {2008 8th IEEE International Conference on Automatic Face & Gesture Recognition}\n}
\n
\n\n\n
\n Biometrics is a huge and very fast growing domain of methods for uniquely recognizing humans based on one or more intrinsic physical or behavioral traits with applications in many different areas, e.g., surveillance, person verification and identification. The International Civil Aviation Organization (ICAO) provides a number of specifications to prepare automated recognition from travel document photos. The goal of these specifications is to increase security in civil aviation on the basis of standardized biometric data. Due to this international standard, there is a high demand for automatically checking face images to assist civil service employees in decision-making. In this work, we present a face normalization and analysis system implementing several parts of the ICAO specification. Our key contribution of this analysis is the fusion of different established classifiers to boost performance of the overall system. Our results show the superior checking quality on facial images due to utilizing classifier fusion compared to a single classifier decision. © 2008 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Face Image Normalization and Expression/Pose Validation for the Analysis of Machine Readable Travel Documents.\n \n \n \n\n\n \n Storer, M.; Urschler, M.; Bischof, H.; and Birchbauer, J., A.\n\n\n \n\n\n\n In 32nd Workshop of the Austrian Association for Pattern Recognition: Challenges in the Biosciences: Image Analysis and Pattern Recognition Aspects, pages 29-39, 2008. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Face Image Normalization and Expression/Pose Validation for the Analysis of Machine Readable Travel Documents},\n type = {inproceedings},\n year = {2008},\n pages = {29-39},\n city = {Linz, AT},\n id = {dac3e54c-853b-331a-86e0-343d91127f40},\n created = {2015-02-18T08:30:19.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:38.890Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Storer2008b},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Biometrics is a huge and very fast growing domain of methods for uniquely recognizing humans based on one or more intrinsic physical or behavioral traits. Hence, there are many different applications, e.g., surveillance, person verification and identification. The International Civil Aviation Organization (ICAO) provides a number of specifications to prepare automated recognition from travel document photos. The goal of these specifications is increasing security in civil aviation on the basis of standardized biometric data. In this context we are concerned with the strict requirements of deviation from frontal pose, eyes-open, mouth-closed, and eyes looking away for assessing the suitability of face images for inclusion in travel documents. Due to this international standard, there is a high demand for automatically checking face images to assist civil service employees in decision-making. In this work, we present a face normalization and analysis system implementing several parts of the ICAO specification. Our key contribution of this analysis is the fusion of different established classifiers to boost performance of the system. Our results show the superior checking quality of our methods in comparison to state-of-the-art technology of two commercial vendors.},\n bibtype = {inproceedings},\n author = {Storer, M and Urschler, M and Bischof, H and Birchbauer, J A},\n booktitle = {32nd Workshop of the Austrian Association for Pattern Recognition: Challenges in the Biosciences: Image Analysis and Pattern Recognition Aspects}\n}
\n
\n\n\n
\n Biometrics is a huge and very fast growing domain of methods for uniquely recognizing humans based on one or more intrinsic physical or behavioral traits. Hence, there are many different applications, e.g., surveillance, person verification and identification. The International Civil Aviation Organization (ICAO) provides a number of specifications to prepare automated recognition from travel document photos. The goal of these specifications is increasing security in civil aviation on the basis of standardized biometric data. In this context we are concerned with the strict requirements of deviation from frontal pose, eyes-open, mouth-closed, and eyes looking away for assessing the suitability of face images for inclusion in travel documents. Due to this international standard, there is a high demand for automatically checking face images to assist civil service employees in decision-making. In this work, we present a face normalization and analysis system implementing several parts of the ICAO specification. Our key contribution of this analysis is the fusion of different established classifiers to boost performance of the system. Our results show the superior checking quality of our methods in comparison to state-of-the-art technology of two commercial vendors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On combining classifiers for assessing portrait image complicance with ICAO/ISO standards.\n \n \n \n\n\n \n Storer, M.; Urschler, M.; Bischof, H.; and Birchbauer, J., A.\n\n\n \n\n\n\n In Biometrics and Electronic Signatures (BIOSIG), volume 137 LNI, pages 153-164, 2008. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {On combining classifiers for assessing portrait image complicance with ICAO/ISO standards},\n type = {inproceedings},\n year = {2008},\n pages = {153-164},\n volume = {137 LNI},\n city = {Darmstadt, GE},\n id = {2ddec09e-10c6-3b90-a463-2ea19361f8c0},\n created = {2015-03-13T13:28:17.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:39.523Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Storer2008a},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Storer, Markus and Urschler, Martin and Bischof, Horst and Birchbauer, Josef A.},\n booktitle = {Biometrics and Electronic Signatures (BIOSIG)}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A Framework for Comparison and Evaluation of Nonlinear Intra-Subject Image Registration Algorithms.\n \n \n \n \n\n\n \n Urschler, M.; Kluckner, S.; and Bischof, H.\n\n\n \n\n\n\n Insight Journal, (2007 MICCAI Open Science Workshop): 1-16. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"AWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A Framework for Comparison and Evaluation of Nonlinear Intra-Subject Image Registration Algorithms},\n type = {article},\n year = {2007},\n pages = {1-16},\n websites = {http://hdl.handle.net/1926/561},\n id = {bd9910d2-e0fd-3f43-81ab-d63b6cb427ea},\n created = {2015-02-18T08:30:18.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:41.434Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2007},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Performance validation of nonlinear registration algorithms is a difficult problem due to the lack of a suitable ground truth in most applications. However, the ill-posed nature of the nonlinear registration problem and the large space of possible solutions makes the quantitative evaluation of algorithms extremely important. We argue that finding a standardized way of performing evaluation and comparing existing and new algorithms currently is more important than inventing novel methods. While there are already existing evaluation frameworks for nonlinear inter-subject brain registration applications, there is still a lack of protocols for intra-subject studies or soft tissue organs. In this work we present such a framework which is designed in an é?éopen-sourceé?é and é?éopen-dataé?é manner around the Insight Segmentation & Registration Toolkit. The goal of our work is to provide the research community with the basis framework that should be extended by interested people in a community effort to gain importance for evaluation studies. We demonstrate our proposed framework on a sample evaluation and release its implementation and associated tools to the public domain.},\n bibtype = {article},\n author = {Urschler, Martin and Kluckner, Stefan and Bischof, Horst},\n journal = {Insight Journal},\n number = {2007 MICCAI Open Science Workshop}\n}
\n
\n\n\n
\n Performance validation of nonlinear registration algorithms is a difficult problem due to the lack of a suitable ground truth in most applications. However, the ill-posed nature of the nonlinear registration problem and the large space of possible solutions makes the quantitative evaluation of algorithms extremely important. We argue that finding a standardized way of performing evaluation and comparing existing and new algorithms currently is more important than inventing novel methods. While there are already existing evaluation frameworks for nonlinear inter-subject brain registration applications, there is still a lack of protocols for intra-subject studies or soft tissue organs. In this work we present such a framework which is designed in an é?éopen-sourceé?é and é?éopen-dataé?é manner around the Insight Segmentation & Registration Toolkit. The goal of our work is to provide the research community with the basis framework that should be extended by interested people in a community effort to gain importance for evaluation studies. We demonstrate our proposed framework on a sample evaluation and release its implementation and associated tools to the public domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Duality Based Algorithm for TV-L 1-Optical-Flow Image Registration.\n \n \n \n \n\n\n \n Pock, T.; Urschler, M.; Zach, C.; Beichel, R.; and Bischof, H.\n\n\n \n\n\n\n Volume 4792 LNCS . Medical Image Computing and Computer-Assisted Intervention – MICCAI 2007, pages 511-518. Ayache, N.; Ourselin, S.; and Maeder, A., editor(s). Springer Berlin Heidelberg, 2007.\n \n\n\n\n
\n\n\n\n \n \n \"MedicalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2007},\n pages = {511-518},\n volume = {4792 LNCS},\n issue = {PART 2},\n websites = {http://link.springer.com/10.1007/978-3-540-75759-7_62},\n publisher = {Springer Berlin Heidelberg},\n city = {Berlin, Heidelberg},\n id = {998dc1fc-da49-3a5f-88aa-4f97919e771f},\n created = {2015-02-18T08:30:18.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:42.063Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Pock2007MICCAI},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Nonlinear image registration is a challenging task in the field of medical image analysis. In many applications discontinuities may be present in the displacement field, and intensity variations may occur. In this work we therefore utilize an energy functional which is based on Total Variation regularization and a robust data term. We propose a novel, fast and stable numerical scheme to find the minimizer of this energy. Our approach combines a fixed-point procedure derived from duality principles combined with a fast thresholding step. We show experimental results on synthetic and clinical CT lung data sets at different breathing states as well as registration results on inter-subject brain MRIs. © Springer-Verlag Berlin Heidelberg 2007.},\n bibtype = {inbook},\n author = {Pock, Thomas and Urschler, Martin and Zach, Christopher and Beichel, Reinhard and Bischof, Horst},\n editor = {Ayache, N. and Ourselin, S. and Maeder, A.},\n doi = {10.1007/978-3-540-75759-7_62},\n chapter = {A Duality Based Algorithm for TV-L 1-Optical-Flow Image Registration},\n title = {Medical Image Computing and Computer-Assisted Intervention – MICCAI 2007}\n}
\n
\n\n\n
\n Nonlinear image registration is a challenging task in the field of medical image analysis. In many applications discontinuities may be present in the displacement field, and intensity variations may occur. In this work we therefore utilize an energy functional which is based on Total Variation regularization and a robust data term. We propose a novel, fast and stable numerical scheme to find the minimizer of this energy. Our approach combines a fixed-point procedure derived from duality principles combined with a fast thresholding step. We show experimental results on synthetic and clinical CT lung data sets at different breathing states as well as registration results on inter-subject brain MRIs. © Springer-Verlag Berlin Heidelberg 2007.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Automatic Point Landmark Matching for Regularizing Nonlinear Intensity Registration: Application to Thoracic CT Images.\n \n \n \n \n\n\n \n Urschler, M.; Zach, C.; Ditt, H.; and Bischof, H.\n\n\n \n\n\n\n Volume 9 . International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI), pages 710-717. Larsen, R.; Nielsen, M.; and Sporring, J., editor(s). Springer, Berlin, Heidelberg, 2006.\n \n\n\n\n
\n\n\n\n \n \n \"InternationalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2006},\n pages = {710-717},\n volume = {9},\n issue = {Pt 2},\n websites = {http://link.springer.com/10.1007/11866763_87,http://www.ncbi.nlm.nih.gov/pubmed/17354835},\n publisher = {Springer, Berlin, Heidelberg},\n city = {Copenhagen, DK},\n id = {851a8398-eae6-31ba-b60e-2fafd27b8e62},\n created = {2015-02-18T08:30:18.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2020-01-29T22:08:23.492Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2006MICCAI},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Nonlinear image registration is a prerequisite for a variety of medical image analysis tasks. A frequently used registration method is based on manually or automatically derived point landmarks leading to a sparse displacement field which is densified in a thin-plate spline (TPS) framework. A large problem of TPS interpolation/approximation is the requirement for evenly distributed landmark correspondences over the data set which can rarely be guaranteed by landmark matching algorithms. We propose to overcome this problem by combining the sparse correspondences with intensity-based registration in a generic nonlinear registration scheme based on the calculus of variations. Missing landmark information is compensated by a stronger intensity term, thus combining the strengths of both approaches. An explicit formulation of the generic framework is derived that constrains an intra-modality intensity data term with a regularization term from the corresponding landmarks and an anisotropic image-driven displacement regularization term. An evaluation of this algorithm is performed comparing it to an intensity- and a landmark-based method. Results on four synthetically deformed and four clinical thorax CT data sets at different breathing states are shown.},\n bibtype = {inbook},\n author = {Urschler, Martin and Zach, Christopher and Ditt, Hendrik and Bischof, Horst},\n editor = {Larsen, R. and Nielsen, M. and Sporring, J.},\n doi = {10.1007/11866763_87},\n chapter = {Automatic Point Landmark Matching for Regularizing Nonlinear Intensity Registration: Application to Thoracic CT Images},\n title = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)}\n}
\n
\n\n\n
\n Nonlinear image registration is a prerequisite for a variety of medical image analysis tasks. A frequently used registration method is based on manually or automatically derived point landmarks leading to a sparse displacement field which is densified in a thin-plate spline (TPS) framework. A large problem of TPS interpolation/approximation is the requirement for evenly distributed landmark correspondences over the data set which can rarely be guaranteed by landmark matching algorithms. We propose to overcome this problem by combining the sparse correspondences with intensity-based registration in a generic nonlinear registration scheme based on the calculus of variations. Missing landmark information is compensated by a stronger intensity term, thus combining the strengths of both approaches. An explicit formulation of the generic framework is derived that constrains an intra-modality intensity data term with a regularization term from the corresponding landmarks and an anisotropic image-driven displacement regularization term. An evaluation of this algorithm is performed comparing it to an intensity- and a landmark-based method. Results on four synthetically deformed and four clinical thorax CT data sets at different breathing states are shown.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A new registration/visualization paradigm for CT-fluoroscopy guided RF liver ablation.\n \n \n \n \n\n\n \n Micu, R.; Jakobs, T., F.; Urschler, M.; and Navab, N.\n\n\n \n\n\n\n Medical image computing and computer-assisted intervention : MICCAI ... International Conference on Medical Image Computing and Computer-Assisted Intervention, 9(Pt 1): 882-90. 2006.\n \n\n\n\n
\n\n\n\n \n \n \"AWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A new registration/visualization paradigm for CT-fluoroscopy guided RF liver ablation.},\n type = {article},\n year = {2006},\n pages = {882-90},\n volume = {9},\n websites = {http://link.springer.com/10.1007/11866565_108,http://www.ncbi.nlm.nih.gov/pubmed/17354974},\n publisher = {Springer, Berlin, Heidelberg},\n city = {Copenhagen, DK},\n id = {1cd3d2d9-ab0f-3901-a299-2a596ef2cd40},\n created = {2015-02-18T08:30:19.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:43.938Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Micu2006},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {2D-3D slice-to-volume registration for abdominal organs like liver is difficult due to the breathing motion and tissue deformation. The purpose of our approach is to ease CT-fluoroscopy (CT-fluoro) based needle insertion for the Radiofrequency Liver Ablation procedure using high resolution contrasted preoperative data. In this case, low signal-to-noise ratio, absence of contrast and additional presence of needle in CT-fluoro makes it difficult to guarantee the solution of any deformable slice-to-volume registration algorithm. In this paper, we first propose a method for creating a set of ground truth (GT) simulation data based on a non-linear deformation of the CT-fluoro volume obtained from real patients. Second, we split the CT-fluoro image and apply intensity based rigid and affine registration to each section. We then propose a novel solution, which consists of intuitive visualization sequences of optimal sub-volumes of preinterventional data based on the registration results. Experiments on synthetic and real patient data and direct feedback of two interventionalists validate our alternative approach.},\n bibtype = {article},\n author = {Micu, Ruxandra and Jakobs, Tobias F. and Urschler, Martin and Navab, Nassir},\n editor = {Larsen, Rasmus and Nielsen, Mads and Sporring, Jon},\n doi = {10.1007/11866565_108},\n journal = {Medical image computing and computer-assisted intervention : MICCAI ... International Conference on Medical Image Computing and Computer-Assisted Intervention},\n number = {Pt 1}\n}
\n
\n\n\n
\n 2D-3D slice-to-volume registration for abdominal organs like liver is difficult due to the breathing motion and tissue deformation. The purpose of our approach is to ease CT-fluoroscopy (CT-fluoro) based needle insertion for the Radiofrequency Liver Ablation procedure using high resolution contrasted preoperative data. In this case, low signal-to-noise ratio, absence of contrast and additional presence of needle in CT-fluoro makes it difficult to guarantee the solution of any deformable slice-to-volume registration algorithm. In this paper, we first propose a method for creating a set of ground truth (GT) simulation data based on a non-linear deformation of the CT-fluoro volume obtained from real patients. Second, we split the CT-fluoro image and apply intensity based rigid and affine registration to each section. We then propose a novel solution, which consists of intuitive visualization sequences of optimal sub-volumes of preinterventional data based on the registration results. Experiments on synthetic and real patient data and direct feedback of two interventionalists validate our alternative approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Partially rigid bone registration in CT Angiography.\n \n \n \n\n\n \n Urschler, M.; Ditt, H.; and Bischof, H.\n\n\n \n\n\n\n In Computer Vision Winter Workshop, pages 34-39, 2006. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Partially rigid bone registration in CT Angiography},\n type = {inproceedings},\n year = {2006},\n pages = {34-39},\n city = {Telc, CZ},\n id = {e6fe4f29-edb4-3568-b288-b9448ef51aa7},\n created = {2015-03-13T11:40:28.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:43.316Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2006CVWW},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Urschler, Martin and Ditt, Hendrik and Bischof, Horst},\n booktitle = {Computer Vision Winter Workshop}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SIFT and Shape Context for Feature-Based Nonlinear Registration of Thoracic CT Images.\n \n \n \n \n\n\n \n Urschler, M.; Bauer, J.; Ditt, H.; and Bischof, H.\n\n\n \n\n\n\n Volume 4241 LNCS . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 73-84. Beichel, R.; and Sonka, M., editor(s). Springer, Berlin, Heidelberg, 2006.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2006},\n pages = {73-84},\n volume = {4241 LNCS},\n websites = {http://link.springer.com/10.1007/11889762_7},\n publisher = {Springer, Berlin, Heidelberg},\n city = {Graz, AT},\n id = {1a027814-1298-39f2-af2f-64db21b5fa93},\n created = {2018-02-18T20:51:33.180Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:44.609Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2006_CVAMIA},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Nonlinear image registration is a prerequisite for various medical image analysis applications. Many data acquisition protocols suffer from problems due to breathing motion which has to be taken into account for further analysis. Intensity based nonlinear registration is often used to align differing images, however this requires a large computational effort, is sensitive to intensity variations and has problems with matching small structures. In this work a feature-based image registration method is proposed that combines runtime efficiency with good registration accuracy by making use of a fully automatic feature matching and registration approach. The algorithm stages are 3D corner detection, calculation of local (SIFT) and global (Shape Context) 3D descriptors, robust feature matching and calculation of a dense displacement field. An evaluation of the algorithm on seven synthetic and four clinical data sets is presented. The quantitative and qualitative evaluations show lower runtime and superior results when compared to the Demons algorithm. © Springer-Verlag Berlin Heidelberg 2006.},\n bibtype = {inbook},\n author = {Urschler, Martin and Bauer, Joachim and Ditt, Hendrik and Bischof, Horst},\n editor = {Beichel, Reinhard and Sonka, Milan},\n doi = {10.1007/11889762_7},\n chapter = {SIFT and Shape Context for Feature-Based Nonlinear Registration of Thoracic CT Images},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Nonlinear image registration is a prerequisite for various medical image analysis applications. Many data acquisition protocols suffer from problems due to breathing motion which has to be taken into account for further analysis. Intensity based nonlinear registration is often used to align differing images, however this requires a large computational effort, is sensitive to intensity variations and has problems with matching small structures. In this work a feature-based image registration method is proposed that combines runtime efficiency with good registration accuracy by making use of a fully automatic feature matching and registration approach. The algorithm stages are 3D corner detection, calculation of local (SIFT) and global (Shape Context) 3D descriptors, robust feature matching and calculation of a dense displacement field. An evaluation of the algorithm on seven synthetic and four clinical data sets is presented. The quantitative and qualitative evaluations show lower runtime and superior results when compared to the Demons algorithm. © Springer-Verlag Berlin Heidelberg 2006.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2005\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Assessing breathing motion by shape matching of lung and diaphragm surfaces.\n \n \n \n \n\n\n \n Urschler, M.; and Bischof, H.\n\n\n \n\n\n\n In Amini, A., A.; and Manduca, A., editor(s), Medical Imaging 2005: Physiology, Function, and Structure from Medical Images, volume 5746, pages 440, 4 2005. SPIE\n \n\n\n\n
\n\n\n\n \n \n \"AssessingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Assessing breathing motion by shape matching of lung and diaphragm surfaces},\n type = {inproceedings},\n year = {2005},\n pages = {440},\n volume = {5746},\n websites = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.595687},\n month = {4},\n publisher = {SPIE},\n day = {14},\n city = {San Diego},\n id = {462760e8-d3b7-33ab-a367-5857c466c1e8},\n created = {2015-03-13T11:40:28.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:45.248Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2005_SPIE},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n abstract = {Studying complex thorax breating motion is an important research topic for accurate fusion of functional and anatomical data, radiotherapy planning or reduction of breathing motion artifacts. We investigate segmented CT lung, airway and diaphragm surfaces at several different breathing states between Functional Residual and Total Lung Capacity. In general, it is hard to robustly derive corresponding shape features like curvature maxima from lung and diaphragm surfaces since diaphragm and rib cage muscles tend to deform the elastic lung tissue such that e.g. ridges might disappear. A novel registration method based on the shape context approach for shape matching is presented where we extend shape context to 3D surfaces. The shape context approach was reported as a promising method for matching 2D shapes without relying on extracted shape features. We use the point correspondences for a non-rigid thin-plate-spline registration to get deformation fields that describe the movement of lung and diaphragm. Our validation consists of experiments on phantom and real sheep thorax data sets. Phantom experiments make use of shapes that are manipulated with known transformations that simulate breathing behaviour. Real thorax data experiments use a data set showing lungs and diaphragm at 5 distinct breathing states, where we compare subsets of the data sets and qualitatively and quantitatively asses the registration performance by using manually identified corresponding landmarks.},\n bibtype = {inproceedings},\n author = {Urschler, Martin and Bischof, Horst},\n editor = {Amini, Amir A. and Manduca, Armando},\n doi = {10.1117/12.595687},\n booktitle = {Medical Imaging 2005: Physiology, Function, and Structure from Medical Images}\n}
\n
\n\n\n
\n Studying complex thorax breating motion is an important research topic for accurate fusion of functional and anatomical data, radiotherapy planning or reduction of breathing motion artifacts. We investigate segmented CT lung, airway and diaphragm surfaces at several different breathing states between Functional Residual and Total Lung Capacity. In general, it is hard to robustly derive corresponding shape features like curvature maxima from lung and diaphragm surfaces since diaphragm and rib cage muscles tend to deform the elastic lung tissue such that e.g. ridges might disappear. A novel registration method based on the shape context approach for shape matching is presented where we extend shape context to 3D surfaces. The shape context approach was reported as a promising method for matching 2D shapes without relying on extracted shape features. We use the point correspondences for a non-rigid thin-plate-spline registration to get deformation fields that describe the movement of lung and diaphragm. Our validation consists of experiments on phantom and real sheep thorax data sets. Phantom experiments make use of shapes that are manipulated with known transformations that simulate breathing behaviour. Real thorax data experiments use a data set showing lungs and diaphragm at 5 distinct breathing states, where we compare subsets of the data sets and qualitatively and quantitatively asses the registration performance by using manually identified corresponding landmarks.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2004\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Matching 3D lung surfaces with the shape context approach.\n \n \n \n\n\n \n Urschler, M.; and Bischof, H.\n\n\n \n\n\n\n In 28th Workshop of the Austrian Association for Pattern Recognition: Digital Imaging in Media and Education, volume 179, pages 133-140, 2004. OCG\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Matching 3D lung surfaces with the shape context approach},\n type = {inproceedings},\n year = {2004},\n pages = {133-140},\n volume = {179},\n publisher = {OCG},\n city = {Hagenberg, AT},\n id = {5e04876c-9a7c-3356-8ad0-e9e0a1606454},\n created = {2015-03-13T11:40:28.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:46.502Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2004_OAGM},\n notes = {Oral},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Urschler, Martin and Bischof, Horst},\n booktitle = {28th Workshop of the Austrian Association for Pattern Recognition: Digital Imaging in Media and Education}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Registering 3D lung surfaces using the shape context approach.\n \n \n \n\n\n \n Urschler, M.; and Bischof, H.\n\n\n \n\n\n\n In 8th Annual Conference on Medical Image Understanding and Analysis, pages 212-215, 2004. BMVA\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Registering 3D lung surfaces using the shape context approach},\n type = {inproceedings},\n year = {2004},\n pages = {212-215},\n publisher = {BMVA},\n city = {London, UK},\n id = {1c46be81-30f5-33c5-894b-6d7b8b76e41c},\n created = {2015-03-13T11:40:28.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:47.141Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2004_MIUA},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Urschler, Martin and Bischof, Horst},\n booktitle = {8th Annual Conference on Medical Image Understanding and Analysis}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2002\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n The LiveWire Approach for the Segmentation of Left Ventricle Electron-Beam CT Images.\n \n \n \n\n\n \n Urschler, M.; Mayer, H.; Bolter, R.; and Leberl, F.\n\n\n \n\n\n\n In 26th Workshop of the Austrian Association for Pattern Recognition: Vision with Non-Traditional Sensors, volume 160, pages 319-326, 2002. OCG\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {The LiveWire Approach for the Segmentation of Left Ventricle Electron-Beam CT Images},\n type = {inproceedings},\n year = {2002},\n pages = {319-326},\n volume = {160},\n publisher = {OCG},\n city = {Graz, AT},\n id = {13a2cd2b-96b1-35a0-904b-2b22fd85a92a},\n created = {2015-03-13T11:40:28.000Z},\n file_attached = {false},\n profile_id = {53d1e3c7-2f16-3c81-9a84-dccd45be4841},\n last_modified = {2019-11-08T01:40:47.777Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Urschler2002},\n notes = {Poster},\n folder_uuids = {0ec41d70-75f1-4a99-820b-0a83ccc37f54},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Urschler, Martin and Mayer, Heinz and Bolter, Regine and Leberl, Franz},\n booktitle = {26th Workshop of the Austrian Association for Pattern Recognition: Vision with Non-Traditional Sensors}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);