<script src="https://bibbase.org/service/mendeley/42d295c0-0737-38d6-8b43-508cab6ea85d/group/27e0553c-8ec0-31bd-b42c-825b8a5a9ae8?jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/service/mendeley/42d295c0-0737-38d6-8b43-508cab6ea85d/group/27e0553c-8ec0-31bd-b42c-825b8a5a9ae8");
print_r($contents);
?>
<iframe src="https://bibbase.org/service/mendeley/42d295c0-0737-38d6-8b43-508cab6ea85d/group/27e0553c-8ec0-31bd-b42c-825b8a5a9ae8"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@inproceedings{ title = {Scalable Quality Assurance for Neuroimaging (SQAN): automated quality control for medical imaging}, type = {inproceedings}, year = {2020}, keywords = {angularjs,automated quality control,javascript portal,medical research imaging,mongodb,node.js,protocol compliance,vue.js}, pages = {6}, volume = {11318}, websites = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11318/2549722/Scalable-Quality-Assurance-for-Neuroimaging-SQAN--automated-quality-control/10.1117/12.2549722.full}, month = {3}, publisher = {SPIE}, day = {2}, id = {e2e6a75f-b533-3b18-bf22-931dd37c1e31}, created = {2020-04-23T05:12:40.694Z}, accessed = {2020-04-23}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-04-23T05:12:40.804Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Medical imaging, a key component in clinical diagnosis of and research on numerous medical conditions, is very costly and can generate massive datasets. For instance, a single scanned subject produces hundreds of thousands of images and millions of key-value metadata pairs that must be verified to ensure instrument and research protocol compliance. Many projects lack funds to reacquire images if data quality issues are detected later. Data quality assurance (QA) requires continuous involvement by all stakeholders and use of specific quality control (QC) methods to identify data issues likely to require post-processing correction or real-time re-acquisition. While many useful QC methods exist, they are often designed for specific use-cases with limited scope and documentation, making integration with other setups difficult. We present the Scalable Quality Assurance for Neuroimaging (SQAN), an open-source software suite developed by Indiana University for protocol quality control and instrumental validation on medical imaging data. SQAN includes a comprehensive QC Engine that ensures adherence to a research study’s protocol. A modern, intuitive web portal serves a wide range of users including researchers, scanner technologists and data scientists, each of whom approach QC with unique priorities, expertise, insights and expectations. Since Fall 2017, a fully operational SQAN instance has supported 50+ research projects, and has QC’d ∼3.5 million images and over 700 million metadata tags. SQAN is designed to scale to any imaging center’s QC needs, and to extend beyond protocol QC toward image-level QC and integration with pipeline and non-imaging database systems.}, bibtype = {inproceedings}, author = {Gopu, Arvind and Young, Michael D. and Avena-Koenigsberger, Andrea and Perigo, Raymond W. and West, John D. and Paramasivam, Meenakshisundaram and Hayashi, Soichi and Henschel, Robert}, editor = {Deserno, Thomas M. and Chen, Po-Hao}, doi = {10.1117/12.2549722}, booktitle = {Medical Imaging 2020: Imaging Informatics for Healthcare, Research, and Applications} }
@techreport{ title = {Explore True Performance Using Application Benchmark for the Next Generation HPC Systems}, type = {techreport}, year = {2020}, pages = {8}, websites = {http://hdl.handle.net/2022/25344}, institution = {Indiana University}, id = {8d4b2458-e0ec-3bb8-8885-c75c326faf11}, created = {2020-07-07T20:22:32.377Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-07-07T20:22:32.377Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Henschel, Robert and Li, Junjie and Eigenmann, Rudolf and Chandrasekaran, Sunita} }
@article{ title = {Image Archives Made Easy with Docker and ImageX}, type = {article}, year = {2020}, pages = {267}, volume = {522}, id = {4e91b579-add6-3eab-920b-fffad3932654}, created = {2020-07-08T15:03:43.166Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-07-08T15:03:43.166Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {We have developed a suite of integrated services called ImageX, which streamlines and automates the process of identifying and cataloguing large image data-sets (e.g. astronomical FITS images), harvesting metadata, converting the images to tile pyramids, and serving the images to clients. We have also developed a front-end server with an integrated open-source JavaScript-based viewer to display the images. This viewer, built on top of the widely used OpenSeaDragon framework, allows us to display even the largest of astronomical images to clients on standard internet connections. The viewer also supports displaying and interacting with whole sets of data simultaneously, allowing observers to visually inspect an entire night's worth of observing, for example. In addition we can present multiple images with spatial positioning, to visualize target coverage. By pulling and running our publicly available Docker containers, we will show how any observatory, data archive, or astronomer can start sharing their images by following a few simple steps. We describe here how many of the features of ImageX including intelligent image scaling with adaptive histograms, user level authorization, access authentication via JSON web tokens, load-balanced nginx image servers, and support for mobile clients. ImageX is fully open-sourced, and is built upon widely-supported industry standard frameworks, and is currently in use on the ODI-PPA portal serving astronomical images taken at the WIYN Observatory in near real-time. Finally, we show how our implementation for astronomy can be seamlessly adapted to serve any scientific imaging dataset that can be converted into a standard format like JPG or PNG.}, bibtype = {article}, author = {Young, Michael D. and Gopu, Arvind and Perigo, Raymond and Hayashi, Soichi}, journal = {ASPC} }
@techreport{ title = {2020 External Review of the Pervasive Technology Institute}, type = {techreport}, year = {2020}, keywords = {External Review,Technical Report}, websites = {http://creativecommons.org/licenses/by/4.0/.}, month = {5}, publisher = {PTI}, day = {31}, id = {b5d008b9-6fbf-318c-a1e5-92b200e7c0b0}, created = {2020-09-09T20:35:59.679Z}, accessed = {2020-09-09}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T20:35:59.772Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Review process description, findings, comments, and supporting documents.}, bibtype = {techreport}, author = {Stewart, C A and Slavin, S and Ping, R} }
@article{ title = {Indiana University Pervasive Technology Institute preproposal and proposal management, documentation, and templates}, type = {article}, year = {2020}, keywords = {Technical Report}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/25580}, id = {166aff7c-463a-3876-9732-3d69c07f5280}, created = {2020-09-09T20:39:19.066Z}, accessed = {2020-09-09}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T20:39:19.142Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Technical report detailing IUPTI processes, with templates and styles.}, bibtype = {article}, author = {Stewart, Craig A. and Miller, Therese and Snapp-Childs, Winona and Jankowski, Harmony and Husk, Malinda} }
@article{ title = {The open diffusion data derivatives, brain data upcycling via integrated publishing of derivatives and reproducible open cloud services}, type = {article}, year = {2019}, keywords = {Brain imaging,Cognitive neuroscience,Computational science,Magnetic resonance imaging,Network models}, pages = {69}, volume = {6}, websites = {http://www.nature.com/articles/s41597-019-0073-y}, month = {12}, publisher = {Nature Publishing Group}, day = {23}, id = {0f9c4e84-5a69-3cf4-bdfe-cb2cc9d58b1a}, created = {2019-08-14T17:12:33.155Z}, accessed = {2019-08-14}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-14T19:56:03.442Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {We describe the Open Diffusion Data Derivatives (O3D) repository: an integrated collection of preserved brain data derivatives and processing pipelines, published together using a single digital-object-identifier. The data derivatives were generated using modern diffusion-weighted magnetic resonance imaging data (dMRI) with diverse properties of resolution and signal-to-noise ratio. In addition to the data, we publish all processing pipelines (also referred to as open cloud services). The pipelines utilize modern methods for neuroimaging data processing (diffusion-signal modelling, fiber tracking, tractography evaluation, white matter segmentation, and structural connectome construction). The O3D open services can allow cognitive and clinical neuroscientists to run the connectome mapping algorithms on new, user-uploaded, data. Open source code implementing all O3D services is also provided to allow computational and computer scientists to reuse and extend the processing methods. Publishing both data-derivatives and integrated processing pipeline promotes practices for scientific reproducibility and data upcycling by providing open access to the research assets for utilization by multiple scientific communities.}, bibtype = {article}, author = {Avesani, Paolo and McPherson, Brent and Hayashi, Soichi and Caiafa, Cesar F. and Henschel, Robert and Garyfallidis, Eleftherios and Kitchell, Lindsey and Bullock, Daniel and Patterson, Andrew and Olivetti, Emanuele and Sporns, Olaf and Saykin, Andrew J. and Wang, Lei and Dinov, Ivo and Hancock, David and Caron, Bradley and Qian, Yiming and Pestilli, Franco}, doi = {10.1038/s41597-019-0073-y}, journal = {Scientific Data}, number = {1} }
@inproceedings{ title = {Assessment of financial returns on investments in cyberinfrastructure facilities}, type = {inproceedings}, year = {2019}, pages = {1-8}, websites = {http://dl.acm.org/citation.cfm?doid=3332186.3332228}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {339373b0-95d3-34eb-8ad0-38b5e6ba964e}, created = {2019-08-14T18:55:48.982Z}, accessed = {2019-08-14}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-19T18:25:08.253Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {In recent years, considerable attention has been given to assessing the value of investments in cyberinfrastructure (CI). This paper includes a survey of current methods for the assessment of financial returns on investment (ROI) in CI. Applying the financial concept of ROI proves challenging with regard to a service that, in most academic environments, does not generate a "sold amount" such as one would find in the buying and selling of stocks. The paper concludes with a discussion of future research directions and challenges in the assessment of financial ROI in CI. This work is intended less as a definitive guide than as a starting point for further exploration in the assessment of CI's value for scientific research.}, bibtype = {inproceedings}, author = {Stewart, Craig A. and Apon, Amy and Payne, Ron and Slavin, Shawn D. and Hancock, David Y. and Wernert, Julie and Furlani, Thomas and Lifka, David and Sill, Alan and Berente, Nicholas and McMullen, Donald F. and Cheatham, Thomas}, doi = {10.1145/3332186.3332228}, booktitle = {Proceedings of the Practice and Experience in Advanced Research Computing on Rise of the Machines (learning) - PEARC '19} }
@article{ title = {Training children aged 5–10 years in manual compliance control to improve drawing and handwriting}, type = {article}, year = {2019}, pages = {42-50}, volume = {65}, websites = {https://www.sciencedirect.com/science/article/pii/S0167945717308552}, month = {6}, publisher = {North-Holland}, day = {1}, id = {2f916262-ee89-3bcf-9c8f-28b146e13587}, created = {2019-08-14T18:56:54.278Z}, accessed = {2019-08-14}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-14T19:56:03.340Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {A large proportion of school-aged children exhibit poor drawing and handwriting. This prevalence limits the availability of therapy. We developed an automated method for training improved manual compliance control and relatedly, prospective control of a stylus. The approach included a difficult training task, while providing parametrically modifiable support that enables the children to perform successfully while developing good compliance control. The task was to use a stylus to push a bead along a 3D wire path. Support was provided by making the wire magnetically attractive to the stylus. Support was progressively reduced as 3D tracing performance improved. We report studies that (1) compared performance of Typically Developing (TD) children and children with Developmental Coordination Disorder (DCD), (2) tested training with active versus passive movement, (3) tested progressively reduced versus constant or no support during training, (4) tested children of different ages, (5) tested the transfer of training to a drawing task, (6) tested the specificity of training in respect to the size, shape and dimensionality of figures, and (7) investigated the relevance of the training task to the Beery VMI, an inventory used to diagnose DCD. The findings were as follows. (1) Pre-training performance of TD and DCD children was the same and good with high support but distinct and poor with low support. Support yielded good self-efficacy that motivated training. Post training performance with no support was improved and the same for TD and DCD children. (2) Actively controlled movements were required for improved performance. (3) Progressively reduced support was required for good performance during and after training. (4) Age differences in performance during pre-training were eliminated post-training. (5) Improvements transferred to drawing. (6) There was no evidence of specificity of training in transfer. (7) Disparate Beery scores were reflected in pre-training but not post-training performance. We conclude that the method improves manual compliance control, and more generally, prospective control of movements used in drawing performance.}, bibtype = {article}, author = {Bingham, Geoffrey P. and Snapp-Childs, Winona}, doi = {10.1016/J.HUMOV.2018.04.002}, journal = {Human Movement Science} }
@inbook{ type = {inbook}, year = {2019}, pages = {189-222}, websites = {https://www.taylorfrancis.com/books/9781351036856/chapters/10.1201/9781351036863-8}, month = {5}, publisher = {CRC Press}, day = {8}, id = {fc736732-3d1a-3b0a-ba0c-6d3ea9f60427}, created = {2019-08-14T20:04:56.611Z}, accessed = {2019-08-14}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-14T20:04:56.694Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, bibtype = {inbook}, author = {Stewart, Craig A. and Hancock, David Y. and Miller, Therese and Fischer, Jeremy and Liming, R. Lee and Turner, George and Lowe, John Michael and Gregory, Steven and Skidmore, Edwin and Vaughn, Matthew and Stanzione, Dan and Merchant, Nirav and Foster, Ian and Taylor, James and Rad, Paul and Brendel, Volker and Afgan, Enis and Packard, Michael and Miller, Therese and Snapp-Childs, Winona}, doi = {10.1201/9781351036863-8}, chapter = {Jetstream: A Novel Cloud System for Science}, title = {Contemporary High Performance Computing} }
@inbook{ type = {inbook}, year = {2019}, pages = {157-170}, websites = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-5225-7598-6.ch012}, publisher = {IGI Global}, city = {Hershey, PA, USA}, id = {5b8f4010-429f-399c-8b30-d37cd7be0a3b}, created = {2019-08-15T19:24:09.516Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-15T19:45:52.393Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, source_type = {CHAP}, private_publication = {false}, abstract = {Computers accelerate our ability to achieve scientific breakthroughs. As technology evolves and new research needs come to light, the role for cyberinfrastructure as “knowledge” infrastructure continues to expand. In essence, cyberinfrastructure can be thought of as the integration of supercomputers, data resources, visualization, and people that extends the impact and utility of information technology. This chapter discusses cyberinfrastructure, the related topics of science gateways and campus bridging, and identifies future challenges and opportunities in cyberinfrastructure.}, bibtype = {inbook}, author = {Stewart, Craig A and Knepper, Richard and Link, Matthew R and Pierce, Marlon and Wernert, Eric and Wilkins-Diehr, Nancy}, editor = {Mehdi Khosrow-Pour, D B A}, doi = {10.4018/978-1-5225-7598-6.ch012}, chapter = {Cyberinfrastructure, Cloud Computing, Science Gateways, Visualization, and Cyberinfrastructure Ease of Use}, title = {Advanced Methodologies and Technologies in Network Architecture, Mobile Computing, and Data Analytics} }
@inproceedings{ title = {Research Computing Desktops: Demystifying research computing for non-Linux users}, type = {inproceedings}, year = {2019}, pages = {1-8}, publisher = {Association for Computing Machinery (ACM)}, id = {ab9bac49-cbc1-3b74-8c59-8610c3f1c83e}, created = {2019-08-19T18:33:19.655Z}, accessed = {2019-08-19}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-21T14:21:47.097Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Many members of the current generation of students and researchers are accustomed to intuitive computing devices and never had to learn how to use command-line based systems, which comprise the majority of high-performance computing environments in use. In the 2013-14 time frame, both Indiana University and Purdue university separately launched virtual desktop front-ends for their high performance computing clusters with the aim of offering an easier on-ramp to new users. In the last five years we iterated on and refined these approaches, and we now have over two thousand annual active users combined. Over 75% of those users say that the desktop services are either moderately or extremely important for their ability to use HPC resources. In this paper, we share our experience bootstrapping this new service framework, bringing in the end-users, dealing with runaway success, and making this service a sustainable offering. This paper offers a comprehensive picture of the driving motivations for desktops at each institution, reasons users like desktops, and ways of getting started.}, bibtype = {inproceedings}, author = {Thota, Abhinav and Dietz, Daniel T. and Phillips, Christopher and Zhu, Xiao and Weakley, Le Mai and Fulton, Ben and Dennis, H. E. Cicada Brokaw and Huber, Laura and Michael, Scott and Snapp-Childs, Winona and Harrell, Stephen Lien and Younts, Alexander}, doi = {10.1145/3332186.3332206}, booktitle = {Proceedings of the Practice and Experience in Advanced Research Computing on Rise of the Machines (learning) (PEARC '19)} }
@techreport{ title = {The Pervasive Technology Institute at 20: Two decades of success and counting}, type = {techreport}, year = {2019}, websites = {http://hdl.handle.net/2022/22607}, id = {d942d9af-3b25-3307-bd98-f4b22e329cf6}, created = {2019-08-20T15:54:10.184Z}, accessed = {2019-08-20}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-10-01T17:56:36.398Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A.; and Welch, Von; and Doak, Thomas G.; and Miller, Therese; and Plale, Beth; and Walsh, John A.; and Link, Matthew R.; and Snapp-Childs, Winona}, doi = {10.5967/QDF0-S837} }
@inproceedings{ title = {Beyond Campus Bridging}, type = {inproceedings}, year = {2019}, pages = {1-4}, websites = {https://dl.acm.org/doi/10.1145/3332186.3333151}, month = {7}, publisher = {ACM}, day = {28}, city = {New York, NY, USA}, id = {4878c1a2-f18b-363b-8ea5-e6966daca56c}, created = {2019-08-27T14:14:28.044Z}, accessed = {2019-08-19}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-04-27T19:43:34.668Z}, read = {true}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {inproceedings}, author = {Coulter, J. Eric and Knepper, Rich and Reynolds, Resa and Sprouse, Jodie and Bird, Stephen}, doi = {10.1145/3332186.3333151}, booktitle = {Proceedings of the Practice and Experience in Advanced Research Computing on Rise of the Machines (learning)} }
@inproceedings{ title = {A Lightweight Framework for Research Data Management}, type = {inproceedings}, year = {2019}, pages = {1-4}, publisher = {Association for Computing Machinery (ACM)}, id = {cb2a3da7-0b06-3e55-9837-60c2e15b5494}, created = {2019-08-27T17:08:24.734Z}, accessed = {2019-08-27}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-27T17:08:24.815Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {We describe a framework for managing live research data involving two major components. First, a system for the scalable scheduling and execution of automated policies for moving, organizing, and archiving data. Second, a system for managing metadata to facilitate curation and discovery with minimal change to existing workflows. Our approach is guided by four main principles: 1) to be non-invasive and to allow for easy integration into existing workflows and computing environments; 2) to be built on established, cloud-aware, open-source tools; 3) to be easily extensible and configurable, and thus, adaptable to different academic disciplines; and 4) to integrate with and take advantage of infrastructure and services available on academic campuses and research computing environments. These principles give our solution a well-defined place along the spectrum of research data management software such as sophisticated electronic lab notebooks and science gate-ways. Our lightweight and flexible data management framework provides for curation and preservation of research data within a lab, department or university cyberinfrastructure.}, bibtype = {inproceedings}, author = {Nikolov, Dimitar and Tuna, Esen}, doi = {10.1145/3332186.3333157}, booktitle = {Proceedings of the Practice and Experience in Advanced Research Computing on Rise of the Machines (learning) (PEARC '19)} }
@inproceedings{ title = {E-RPID PEARC 2019 - The Digital Object Architecture and Enhanced Robust Persistent Identification of Data}, type = {inproceedings}, year = {2019}, pages = {1-4}, websites = {http://dl.acm.org/citation.cfm?doid=3332186.3333255}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {0910d4a2-d95a-3133-a24a-ebd4f8f9094f}, created = {2019-09-19T16:24:06.797Z}, accessed = {2019-09-19}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-19T16:24:06.797Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {The expansion of the research community's ability to collect and store data has grown much more rapidly than its ability to catalog, make accessible, and make use of data. Recent initiatives in Open Science and Open Data have attempted to address the problems of making data discoverable, accessible and reusable at internet scales. The Enhanced Robust Persistent Identification of Data (E-RPID) project's goal is to address these deficiencies and enable options for data interoperability and reusability in the current research data landscape by utilizing Persistent Identifiers (PIDs) and a kernel of state information available with PID resolution. To do this requires integrating a set of preexisting software systems along with a small set of newly developed software solutions. The combination of these software components and the core principles of making data FAIR (findable, accessible, interoperable and reusable) will allow us to use Persistent Identifiers to create an end-to-end fabric capable of realizing the Digital Object Architecture for researchers. This poster will acquaint the audience to the concepts of the Digital Object Architecture, describe the software service architecture necessary to enable this architecture, outline the existing E-RPID testbed that is available for experimental usage from the Jetstream cloud environment, and describe the diverse set of use cases already using E-RPID to enhance their data accessibility, interoperability and reusability. It will focus on how the Digital Object Architecture and E-RPID testbed would interact with XSEDE resources and how E-RPID could assist with interoperability, reusability and reproducibility of HPC workflows.}, bibtype = {inproceedings}, author = {Quick, Rob and Lannom, Larry and Krenz, Marina and Luo, Yu}, doi = {10.1145/3332186.3333255}, booktitle = {Proceedings of the Practice and Experience in Advanced Research Computing on Rise of the Machines (learning) - PEARC '19} }
@article{ title = {Programmable Education Infrastructure: Cloud resources as HPC Education Environments}, type = {article}, year = {2019}, pages = {107-107}, volume = {10}, websites = {http://www.jocse.org/articles/10/1/18/}, month = {1}, publisher = {The Shodor Education Foundation, Inc.}, id = {e0e6d638-eec7-3ac9-88fe-fc4a58184ae6}, created = {2019-09-24T19:55:28.169Z}, accessed = {2019-09-24}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-24T19:55:28.169Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Cloud computing is growing area for educating students and per- forming meaningful scientific research. The challenge for many educators and researchers is knowing how to use some of the unique aspects of computing in the cloud. One key feature is true elastic computing - resources on demand. The elasticity and programmability of cloud resources make them an excellent tool for educators who require access to a wide range of computing environments. In the field of HPC education, such environments are an absolute necessity, and getting access to them can create a large burden on the educators above and beyond designing content. While cloud resources won't replace traditional HPC environments for large research projects, they are an excellent option for providing both user and administrator education on HPC environments. The highly configurable nature of cloud environments allows educators to tailor the educational resource to the needs of their attendees, and provide a wide range of hands-on experiences. In this demo, we'll show how the Jetstream cloud environment can be used to provide training for both new HPC administrators and users, by showing a ground-up build of a simple HPC system. While this approach uses the Jetstream cloud, it is generalizable across any cloud provider. We will show how this allows an educator to tackle everything from basic command-line concepts and scheduler use to advanced cluster-management concepts such as elasticity and management of scientific software.}, bibtype = {article}, author = {Coulter, Eric and Knepper, Richard and Fischer, Jeremy}, doi = {10.22369/issn.2153-4136/10/1/18}, journal = {The Journal of Computational Science Education}, number = {1} }
@inproceedings{ title = {Assessment of non-financial returns on cyberinfrastructure}, type = {inproceedings}, year = {2019}, pages = {1-10}, websites = {http://dl.acm.org/citation.cfm?doid=3355738.3355749}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {a750bcb5-17eb-3d1e-8f4d-a6a7352d47b3}, created = {2019-10-01T15:08:52.517Z}, accessed = {2019-10-01}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-07-07T20:22:32.982Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {In recent years, considerable attention has been given to assessing the value of investments in cyberinfrastructure (CI). This paper focuses on assessment of value measured in ways other than financial benefits - what might well be termed impact or outcomes. This paper is a companion to a paper presented at the PEARC'19 conference, which focused on methods for assessing financial returns on investment. In this paper we focus on methods for assessing impacts such as effect on publication production, importance of publications, and assistance with major scientific accomplishments as signified by major awards. We in particular focus on the role of humans in the loop - humanware. This includes a brief description of the roles humans play in facilitating use of research cyberinfratructure - including clouds - and then a discussion of how those impacts have been assessed. Our conclusion overall is that there has been more progress in the past very few years in developing methods for the quantitative assessment of financial returns on investment than there has been in assessing non-quantitative impacts. There are a few clear actions that many research institutions could take to start better assessing the non-financial impacts of investment in cyberinfrastructure. However, there is a great need for assessment efforts to turn more attention to the assessment of non-financial benefits of investment in cyberinfrastructure, particularly the benefits of investing in humans and the benefits to humans who are involved in supporting and using cyberinfrastructure, including clouds.}, bibtype = {inproceedings}, author = {Stewart, Craig A. and Slavin, Shawn D. and Apon, Amy and Hancock, David Y. and Furlani, Thomas and Sill, Alan and Wernert, Julie and Lifka, David and Berente, Nicholas and Cheatham, Thomas}, doi = {10.1145/3355738.3355749}, booktitle = {Proceedings of the Humans in the Loop: Enabling and Facilitating Research on Cloud Computing - HARC '19} }
@article{ title = {Extending XSEDE Innovations to Campus Cyberinfrastructure-The XSEDE National Integration Toolkit 1 INTRODUCTION 1.1 Overview of XCRI}, type = {article}, year = {2019}, keywords = {Clusters,OpenHPC,Scientific Computing,System Administration,XCRI,XSEDE}, pages = {16-20}, volume = {10}, websites = {https://doi.org/10.22369/issn.2153-4136/10/1/3}, id = {ae14d38b-8270-346c-8acd-ff8129c42333}, created = {2020-04-27T19:54:32.061Z}, accessed = {2020-04-27}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-07-08T13:29:24.281Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {XSEDE Service Providers (SPs) and resources have the benefit of years of testing and implementation, tuning and configuration, and the development of specific tools to help users and systems make the best use of these resources. Cyberinfrastructure professionals at the campus level are often charged with building computer resources which are compared to these national-level resources. While organizations and companies exist that guide cyberinfras-tructure configuration choices down certain paths, there is no easy way to distribute the long-term knowledge of the XSEDE project to campus CI professionals. The XSEDE Cyberinfrastructure Resource Integration team has created a variety of toolkits to enable easy knowledge and best-practice transfer from XESDE SPs to campus CI professionals. The XSEDE National Integration Toolkit (XNIT) provides the software used on most XSEDE systems in an effort to propagate the best practices and knowledge of XSEDE resources. XNIT includes basic tools and configuration that make it simpler for a campus cluster to have the same software set and many of the advantages and XSEDE SP resource affords. In this paper, we will detail the steps taken to build such a library of software and discuss the challenges involved in disseminating awareness of toolkits among cyberin-frastructure professionals. We will also describe our experiences in updating the XNIT to be compatible with the OpenHPC project, which forms the basis of many new HPC systems, and appears situated to become the de-facto choice of management software provider for many HPC centers.}, bibtype = {article}, author = {Coulter, Eric and Sprouse, Jodie and Reynolds, Resa and Knepper, Richard}, doi = {10.22369/issn.2153-4136/10/1/3}, journal = {Journal of Computational Science Education}, number = {1} }
@techreport{ title = {ABI Sustaining: The National Center for Genome Analysis Support 2019 Annual Report}, type = {techreport}, year = {2019}, websites = {http://creativecommons.org/licenses/by/4.0/.}, id = {d01dd5cb-51bc-36b0-bfa0-befac2038f15}, created = {2020-07-07T20:33:34.042Z}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-07-07T20:33:35.654Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Doak, T G and Stewart, C A and Hahn, R and Yuzhen, M} }
@techreport{ title = {Jetstream NSF Annual Report}, type = {techreport}, year = {2019}, pages = {1-49}, websites = {http://hdl.handle.net/2022/24806}, id = {c7812048-3bf2-379e-97c7-fcf987776f52}, created = {2020-07-08T13:29:23.674Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-07-08T13:29:23.674Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Hancock, David Y and Fischer, Jeremy and Taylor, James and Snapp-childs, Winona and Vaughn, Matthew} }
@techreport{ title = {Annual User Assessment - 2019 Summary Report}, type = {techreport}, year = {2019}, issue = {November}, websites = {http://hdl.handle.net/2022/24614}, id = {eb429b9b-56b3-36f1-9a00-a506f9091bd6}, created = {2020-07-08T14:43:06.277Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-07-08T14:43:06.277Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Hancock, David Y and Snapp-childs, Winona and Wernert, Julie and Jankowski, Harmony E} }
@unpublished{ title = {Performance Characteristics of Virtualized GPUs for Deep Learning}, type = {unpublished}, year = {2019}, websites = {http://hdl.handle.net/2022/24567}, institution = {Indiana University}, id = {049580e2-1678-37af-8d1f-91731fc5fe96}, created = {2020-07-08T14:48:23.980Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-07-08T14:51:38.564Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Michael2019-tv}, source_type = {unpublished}, private_publication = {false}, bibtype = {unpublished}, author = {Michael, Scott and Teige, Scott and Li, Junjie and Lowe, John Michael and Turner, George and Henschel, Robert} }
@article{ title = {A 3D hydrodynamics study of gravitational instabilities in a young circumbinary disc}, type = {article}, year = {2019}, keywords = {Accretion, accretion discs,Binaries: general,Hydrodynamics,Protoplanetary discs}, pages = {2347-2361}, volume = {483}, publisher = {Oxford University Press}, id = {984acb76-a3d4-3b6d-85dd-d31aed777d26}, created = {2020-07-08T14:51:37.938Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-07-08T14:51:37.938Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {We present a 3D hydrodynamics study of gravitational instabilities (GIs) in a 0.14 M· circumbinary protoplanetary disc orbiting a 1M· star and a 0.02M· brown dwarf companion. We examine the thermodynamical state of the disc and determine the strengths of GI-induced densitywaves, non-axisymmetric density structures,mass inflowand outflow, and gravitational torques. Results are compared with a parallel simulation of a protoplanetary disc without the brown dwarf binary companion. Simulations are performed using CHYMERA, a radiative 3D hydrodynamics code. The onset of GIs in the circumbinary disc is much more violent due to the stimulation of a strong one-armed density wave by the brown dwarf. Despite this early difference, detailed analyses show that both discs relax to a very similar quasi-steady phase by 2500 years after the beginning of the simulations. Similarities include the thermodynamics of the quasi-steady phase, the final surface density distribution, radial mass influx, and nonaxisymmetric power and torques for spiral arm multiplicities of two or more. Effects of binarity in the disc are evident in gravitational torque profiles, temperature profiles in the inner discs, and radial mass transport. After 3800 years, the semimajor axis of the binary decreases by about one percentage and the eccentricity roughly doubles. The mass transport in the outer circumbinary disc associated with the one-armed wave may influence planet formation.}, bibtype = {article}, author = {Desai, Karna M. and Steiman-Cameron, Thomas Y. and Michael, Scott and Cai, Kai and Durisen, Richard H.}, doi = {10.1093/mnras/sty3240}, journal = {Monthly Notices of the Royal Astronomical Society}, number = {2} }
@article{ title = {Research Computing Desktops: Demystifying research computing for non-Linux users}, type = {article}, year = {2019}, pages = {1-8}, id = {9dfb69a1-2fa1-3663-98fc-057a2db45eae}, created = {2020-07-08T14:58:24.958Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-07-08T14:58:24.958Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {article}, author = {Thota, Abhinav and Weakley, Le Mai and Fulton, Ben and Dennis, H. E. Cicada Brokaw and Huber, Laura and Michael, Scott and Snapp‐Childs, Winona and Harrell, Stephen Lien and Younts, Alexander and Dietz, Daniel T. and Phillips, Christopher and Zhu, Xiao}, journal = {PEARC19: Proceedings of the Practice and Experience in Advanced Research Computing on Rise of the Machines (learning)} }
@techreport{ title = {Jetstream (NSF Award 1445604) Annual Report: December 1, 2018 – November 28, 2019}, type = {techreport}, year = {2019}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/24806}, id = {7ece6b1c-cc4f-30fb-b25e-d9ab6ce35584}, created = {2020-09-11T15:47:57.640Z}, accessed = {2020-09-11}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-11T15:47:57.640Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Hancock, David Y; Merchant, Nirav; Lowe, John Michael; Fischer, Jeremy; Liming, Lee; Taylor, James; Afgan, Enis; Turner, George; Skidmore, Edwin; Beck, Brain W.; Snapp-Childs, Winona; Foster, Ian; Vaughn, Matthew} }
@inproceedings{ title = {Methodologies and Practices for Adoption of a Novel National Research Environment}, type = {inproceedings}, year = {2018}, keywords = {XSEDE,cloud,hpc,research}, pages = {21:1--21:7}, websites = {http://doi.acm.org/10.1145/3219104.3219115}, publisher = {ACM}, city = {New York, NY, USA}, series = {PEARC '18}, id = {f4f0808f-cc4d-3505-88fa-27e59771eba0}, created = {2018-08-09T16:38:15.500Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-19T18:58:55.592Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Fischer:2018:MPA:3219104.3219115}, source_type = {inproceedings}, private_publication = {false}, abstract = {There are numerous domains of science that have been using high performance computing (HPC) systems for decades. Historically, when new HPC resources are introduced, specific variations may require researchers to make minor adjustments to their workflows but the general usage and expectations remain much the same. This consistency means that domain scientists can generally move from system to system as necessary and as new resources come online, they can be fairly easily adopted by these researchers. However, as novel resources, such as cloud computing systems, become available, additional work may be required in order to help researchers find and use the resource. When the goal of a system's funding and deployment is to find non-traditional research groups that have been under-served by the national cyberinfrastructure, a different approach to system adoption and training is required. When Jetstream was funded by the NSF as the first production research cloud, it became clear that to attract non-traditional or under-served researchers, a very proactive approach would be required. Here we show how the Jetstream team 1) developed methods and practices for increasing awareness of the system to both traditional HPC users as well as under-served and non-traditional users of HPC systems, 2) developed training approaches which highlight the capabilities that a cloud system may offer that are different from traditional HPC systems. We also discuss areas of success and failure, and plans for future efforts.}, bibtype = {inproceedings}, author = {Fischer, Jeremy and Beck, Brian W and Sudarshan, Sanjana and Turner, George and Snapp-Childs, Winona and Stewart, Craig A and Hancock, David Y}, doi = {10.1145/3219104.3219115}, booktitle = {Proceedings of the Practice and Experience on Advanced Research Computing (PEARC '18)} }
@inproceedings{ title = {High Availability on Jetstream: Practices and Lessons Learned}, type = {inproceedings}, year = {2018}, keywords = {Atmosphere,XSEDE,acm reference format,atmosphere,availability,cloud,george turner,hpc,jeremy fischer,john michael lowe,research,sanjana sudarshan,xsede}, pages = {4:1--4:7}, websites = {http://doi.acm.org/10.1145/3217880.3217884}, publisher = {ACM}, city = {New York, NY, USA}, series = {ScienceCloud'18}, id = {6ebf1261-4623-3de9-afef-2d47a97a8224}, created = {2018-08-09T16:38:15.694Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-19T18:58:55.764Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Lowe:2018:HAJ:3217880.3217884}, source_type = {inproceedings}, private_publication = {false}, abstract = {Research computing has traditionally used high performance computing (HPC) clusters and has been a service not given to high availability without a doubling of computational and storage capacity. System maintenance such as security patching, firmware updates, and other system upgrades generally meant that the system would be unavailable for the duration of the work unless one has redundant HPC systems and storage. While efforts were often made to limit downtimes, when it became necessary, maintenance windows might be one to two hours or as much as an entire day. As the National Science Foundation (NSF) began funding non-traditional research systems, looking at ways to provide higher availability for researchers became one focus for service providers. One of the design elements of Jetstream was to have geographic dispersion to maximize availability. This was the first step in a number of design elements intended to make Jetstream exceed the NSF's availability requirements. We will examine the design steps employed, the components of the system and how the availability for each was considered in deployment, how maintenance is handled, and the lessons learned from the design and implementation of the Jetstream cloud.}, bibtype = {inproceedings}, author = {Lowe, John Michael and Fischer, Jeremy and Sudarshan, Sanjana and Turner, George and Stewart, Craig A and Hancock, David Y}, doi = {10.1145/3217880.3217884}, booktitle = {Proceedings of the 9th Workshop on Scientific Cloud Computing (ScienceCloud'18)} }
@inproceedings{ title = {A Computational Notebook Approach to Large-scale Text Analysis}, type = {inproceedings}, year = {2018}, keywords = {HPC,Spark,computational notebook,interactive analysis,scalability,text analysis}, pages = {1-8}, websites = {http://doi.acm.org/10.1145/3219104.3219153,http://dl.acm.org/citation.cfm?doid=3219104.3219153}, publisher = {ACM Press}, city = {New York, New York, USA}, series = {PEARC '18}, id = {1838034b-de53-36ef-9f68-68b825786a2b}, created = {2018-08-09T16:38:15.859Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T19:27:36.347Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Ruan:2018:CNA:3219104.3219153}, source_type = {inproceedings}, private_publication = {false}, abstract = {Large-scale text analysis algorithms are important to many fields as they interrogate reams of textual data to extract evidence, correlations, and trends not readily discoverable by a human reader. Unfortunately, there is often an expertise mismatch between computational researchers who have the technical and programming skills necessary to develop workflows at scale and domain scholars who have knowledge of the literary, historical, scientific, or social factors that can affect data as it is manipulated. Our work focuses on the use of scalable computational notebooks as a model to bridge the accessibility gap for domain scholars, putting the power of HPC resources directly in the hands of the researchers who have scholarly questions. The computational notebook approach offers many benefits, including: fine-grained control through modularized functions, interactive analysis that puts the "human in the loop", scalable analysis that leverages Spark-as-a-Service, and complexity hiding interfaces that minimize the need for HPC expertise. In addition, the notebook approach makes it easy to share, reproduce, and sustain research workflows. We illustrate the applicability of our approach with usage scenarios on HPC systems as well as within a restricted computing environment to access sensitive, in-copyright data, and demonstrate the usefulness of the notebook approach with three examples from three different domains and data sources. These sources include historical topic trends in ten thousand scientific articles, sentiment analysis of tweets, and literary analysis of the copyrighted works of Kurt Vonnegut using non-consumptive techniques.}, bibtype = {inproceedings}, author = {Ruan, Guangchen and Gniady, Tassie and Kloster, David and Wernert, Eric and Tuna, Esen}, doi = {10.1145/3219104.3219153}, booktitle = {Proceedings of the Practice and Experience on Advanced Research Computing - PEARC '18} }
@inproceedings{ title = {High Performance Photogrammetry for Academic Research}, type = {inproceedings}, year = {2018}, keywords = {HPC,acm reference format,benchmarking,distributed processing,evaluation,hpc,performance,performance evaluation,photogrammetry,scalability}, pages = {45:1--45:8}, websites = {http://doi.acm.org/10.1145/3219104.3219148}, publisher = {ACM}, city = {New York, NY, USA}, series = {PEARC '18}, id = {044c4eb8-4212-3e9d-91f7-e4c4472d10bc}, created = {2018-08-09T16:38:15.879Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-19T19:14:35.657Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Ruan:2018:HPP:3219104.3219148}, source_type = {inproceedings}, private_publication = {false}, abstract = {Photogrammetry is the process of computationally extracting a three-dimensional surface model from a set of two-dimensional photographs of an object or environment. It is used to build models of everything from terrains to statues to ancient artifacts. In the past, the computational process was done on powerful PCs and could take weeks for large datasets. Even relatively small objects often required many hours of compute time to stitch together. With the availability of parallel processing options in the latest release of state-of-the-art photogrammetry software, it is possible to leverage the power of high performance computing systems on large datasets. In this paper we present a particular implementation of a high performance photogrammetry service. Though the service is currently based on a specific software package (Agisoft's PhotoScan), our system architecture is designed around a general photogrammetry process that can be easily adapted to leverage other photogrammetry tools. In addition, we report on an extensive performance study that measured the relative impacts of dataset size, software quality settings, and processing cluster size. Furthermore, we share lessons learned that are useful to system administrators looking to establish a similar service, and we describe the user-facing support components that are crucial for the success of the service.}, bibtype = {inproceedings}, author = {Ruan, Guangchen and Wernert, Eric and Sherman, William and Gniady, Tassie and Tuna, Esen and Sherman, William}, doi = {10.1145/3219104.3219148}, booktitle = {Proceedings of the Practice and Experience on Advanced Research Computing (PEARC '18)} }
@inproceedings{ title = {Grid Technology for Supporting Health Education and Measuring the Health Outcome}, type = {inproceedings}, year = {2018}, keywords = {XSEDE,community health,cyberinfrastructure,data grid,data integration,grid computing,health education,iRODS,mobile technology,portal,virtual}, pages = {89:1--89:4}, websites = {http://doi.acm.org/10.1145/3219104.3229247}, publisher = {ACM}, city = {New York, NY, USA}, series = {PEARC '18}, id = {0929ef09-4e70-3137-8936-c572cef02e45}, created = {2018-08-09T16:38:15.914Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-08-09T16:38:15.914Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Sukhija:2018:GTS:3219104.3229247}, source_type = {inproceedings}, private_publication = {false}, bibtype = {inproceedings}, author = {Sukhija, Nitin and Datta, Arun K and Sevin, Sonny and Coulter, Eric}, doi = {10.1145/3219104.3229247}, booktitle = {Proceedings of the Practice and Experience on Advanced Research Computing} }
@inproceedings{ title = {Your Good Health is a Workforce Issue}, type = {inproceedings}, year = {2018}, keywords = {SAD,Standard American Diet,acm reference format,cancer,health,life balance,preventive testing,sad,standard american diet,stress,work,work/life balance,workforce development}, pages = {75:1--75:8}, websites = {http://doi.acm.org/10.1145/3219104.3219107}, publisher = {ACM}, city = {New York, NY, USA}, series = {PEARC '18}, id = {b6b2fe1d-da0d-3b27-86e8-e0bfc457f47f}, created = {2018-08-09T16:38:16.085Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-19T19:14:35.644Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart:2018:YGH:3219104.3219107}, source_type = {inproceedings}, private_publication = {false}, abstract = {The high performance computing (HPC), cyberinfrastructure, and research and academic information technology communities are small - too small to fulfill current needs for such professionals in the US. Members of this community are also often under a lot of stress, and with that can come health problems. The senior author was diagnosed with Stage IV cancer in early 2017. In this paper, we share what we have learned about health management in general and dealing with cancer in particular, focusing on lessons that are portable to other members of the HPC, cyberinfrastructure, and research and academic information technology communities. We also make recommendations to the National Science Foundation regarding changes the NSF could make to reduce some of the stress this community feels on a day-in, day-out basis. The key point of this report is to provide information to members of the cyberinfrastructure community that they might not already have - and might not receive from their primary care physicians - that will help them live longer and healthier lives. While our own experiences are based on one of the author's diagnosis of cancer, the information presented here should be of general value to all in terms of strategies for reducing and detecting long-term health risks. Our hope is that this information will help you be as healthy as possible until you reach retirement age and then healthy during a well-deserved and long period of retirement!}, bibtype = {inproceedings}, author = {Stewart, Craig A and Krefeldt, Marion}, doi = {10.1145/3219104.3219107}, booktitle = {Proceedings of the Practice and Experience on Advanced Research Computing (PEARC '18)} }
@article{ title = {Jetstream—Early operations performance, adoption, and impacts}, type = {article}, year = {2018}, keywords = {OpenStack,cloud computing,long tail of science,science impacts,user adoption,virtual machines}, websites = {https://onlinelibrary.wiley.com/doi/abs/10.1002/cpe.4683}, month = {9}, publisher = {John Wiley and Sons Ltd}, day = {2}, id = {0cb966ed-d41f-356e-a060-4c85ee79285c}, created = {2019-08-14T19:30:20.743Z}, accessed = {2019-08-14}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T19:01:35.807Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {article}, author = {Hancock, David Y. and Stewart, Craig A. and Vaughn, Matthew and Fischer, Jeremy and Lowe, John Michael and Turner, George and Swetnam, Tyson L. and Chafin, Tyler K. and Afgan, Enis and Pierce, Marlon E. and Snapp‐Childs, Winona}, doi = {10.1002/cpe.4683}, journal = {Concurrency and Computation: Practice and Experience} }
@inproceedings{ title = {Return on investment for three cyberinfrastructure facilities: A local campus supercomputer; the NSF-funded Jetstream cloud system; and XSEDE (the eXtreme Science and Engineering Discovery Environment)}, type = {inproceedings}, year = {2018}, keywords = {Cost benefit analysis,High performance computing,Scientific computing,Supercomputing}, pages = {223-236}, month = {1}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, day = {4}, id = {27a7768a-9431-38f9-8943-b0e91eda32b2}, created = {2019-08-14T20:11:09.672Z}, accessed = {2019-08-14}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-15T18:17:38.177Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {The economics of high performance computing are rapidly changing. Commercial cloud offerings, private research clouds, and pressure on the budgets of institutions of higher education and federally-funded research organizations are all contributing factors. As such, it has become a necessity that all expenses and investments be analyzed and considered carefully. In this paper we will analyze the return on investment (ROI) for three different kinds of cyberinfrastructure resources: the eXtreme Science and Engineering Discovery Environment (XSEDE); the NSF-funded Jetstream cloud system; and the Indiana University (IU) Big Red II supercomputer, funded exclusively by IU for use of the IU community and collaborators. We determined the ROI for these three resources by assigning financial values to services by either comparison with commercially available services, or by surveys of value of these resources to their users. In all three cases, the ROI for these very different types of cyberinfrastructure resources was well greater than 1-meaning that investors are getting more than $1 in returned value for every $1 invested. While there are many ways to measure the value and impact of investment in cyberinfrastructure resources, we are able to quantify the short-term ROI and show that it is a net positive for campuses and the federal government respectively.}, bibtype = {inproceedings}, author = {Stewart, Craig A. and Hancock, David Y. and Wernert, Julie and Link, Matthew R. and Wilkins-Diehr, Nancy and Miller, Therese and Gaither, Kelly and Snapp-Childs, Winona}, doi = {10.1109/UCC.2018.00031}, booktitle = {Proceedings - 11th IEEE/ACM International Conference on Utility and Cloud Computing, UCC 2018} }
@inproceedings{ title = {Scaling JupyterHub using Kubernetes on Jetstream cloud: Platform as a service for research and educational initiatives in the atmospheric sciences}, type = {inproceedings}, year = {2018}, keywords = {Cloud Computing,JupyterHub,Kubernetes,Magnum,OpenStack,Unidata,Workforce Development}, month = {7}, publisher = {Association for Computing Machinery}, day = {22}, id = {e379f9c5-50e2-3bdc-849f-f7033fc9017d}, created = {2019-08-19T19:17:50.898Z}, accessed = {2019-08-19}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-19T19:17:51.011Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Unidata, an NSF funded project that started in 1983, is a diverse community of education and research institutions with the common goal of sharing geoscience data and the tools to access and visualize that data. Unidata provides weather observations and other data, software tools, and support to enhance Earth-system education and research, and continuously examines ways of adapting their workflows for new technologies to maximize the reach of their education and research efforts. In support of Unidata objectives to host workshops for atmospheric data analysis using JupyterHub, we explore a cloud computing approach leveraging Kubernetes coupled with JupyterHub that when combined will provide a solution for researchers and students to pull data from Unidata and burst onto Jetstream cloud by requesting resources dynamically via easy to use JupyterHub. More specifically, on Jetstream, Kubernetes is used for automating deployment and scaling of domain specific containerized applications, and JupyterHub is used for spawning multiple hubs within the same Kubernetes cluster instance that will be used for supporting classroom settings. JupyterHub's modular kernel feature will support dynamic needs of classroom application requirements. The proposed approach will serve as an end-to-end solution for researchers to execute their workflows, with JupyterHub serving as a powerful tool for user training and next-generation workforce development in atmospheric sciences.}, bibtype = {inproceedings}, author = {Sarajlic, Semir and Chastang, Julien and Marru, Suresh and Fischer, Jeremy and Lowe, Mike}, doi = {10.1145/3219104.3229249}, booktitle = {Proceedings of the Practice and Experience on Advanced Research Computing (PEARC '18)} }
@article{ title = {Training children aged 5–10 years in compliance control: tracing smaller figures yields better learning not specific to the scale of drawn figures}, type = {article}, year = {2018}, keywords = {Compliance control,Manual control,Motor development,Prospective control,Specificity}, pages = {2589-2601}, volume = {236}, month = {10}, publisher = {Springer Verlag}, day = {1}, id = {d6b7767c-93e8-313c-a12c-4af514d38e59}, created = {2019-08-19T19:31:53.154Z}, accessed = {2019-08-19}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-19T19:31:53.269Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {© 2018 Springer-Verlag GmbH Germany, part of Springer Nature Previously we developed a method that supports active movement generation to allow practice with improvement of good compliance control in tracing and drawing. We showed that the method allowed children with motor impairments to improve at a 3D tracing task to become as proficient as typically developing children and that the training improved 2D figure copying. In this study, we expanded the training protocol to include a wider variety of ages (5–10-year-olds) and we made the figures traced in training the same as in figure copying, but varied the scale of training and copying figures to assess the generality of learning. Forty-eight children were assigned to groups trained using large or small figures. All were tested before training with a tracing task and a copying task. Then, the children trained over five sessions in the tracing task with either small or large figures. Finally, the tracing and copying tasks were tested again following training. A mean speed measure was used to control for path length variations in the timed task. Performance on both tasks at both baseline and posttest varied as a function of the size of the figure and age. In addition, tracing performance also varied with the level of support. In particular, speeds were higher with more support, larger figures and older children. After training, performance improved. Speeds increased. In tracing, performance improved more for large figures traced by children who trained on large figures. In copying, however, performance only improved significantly for children who had trained on small figures and it improved equally for large and small figures. In conclusion, training by tracing smaller figures yielded better learning that was not, however, specific to the scale of drawn figures. Small figures exhibit greater mean curvature. We infer that it yielded better general improvement.}, bibtype = {article}, author = {Snapp-Childs, Winona and Fath, Aaron J. and Bingham, Geoffrey P.}, doi = {10.1007/s00221-018-5319-y}, journal = {Experimental Brain Research}, number = {10} }
@inproceedings{ title = {ImageX 3.0: a full stack imaging archive solution}, type = {inproceedings}, year = {2018}, pages = {46}, month = {7}, publisher = {SPIE-Intl Soc Optical Eng}, day = {6}, id = {810519b2-ec3c-3d93-ab9a-0bddedf9d5e7}, created = {2019-08-19T19:42:22.455Z}, accessed = {2019-08-19}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-19T19:42:22.515Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Over the past several years we have faced the need to develop a number of solutions to address the challenge of archiving large-format scientific imaging data and seamlessly visualizing that data-irrespective of the image format-on a web browser. ImageX is a ground-up rewrite and synthesis of our solutions to this issue, with a goal of reducing the workload required to transition from simply storing vast amounts of scientific imaging data on disk to securely archiving and sharing that data with the world. The components that make up the ImageX service stack include a secure and scalable back-end data service optimized for providing imaging data, a pre-processor to harvest metadata and intelligently scale and store the imaging data, and a flexible and embeddable front-end visualization web application. Our latest version of the software suite called ImageX 3.0 has been designed to meet the needs of a single user running locally on their own personal computer or scaled up to provide support for the image storage and visualization needs of a modern observatory with the intention of providing a 'Push button' solution to a fully deployed solution. Each ImageX 3.0 component is provided as a Docker container, and can be rapidly and seamlessly deployed to meet demand. In this paper, we describe the ImageX architecture while demonstrating many of its features, including intelligent image scaling with adaptive histograms, load-balancing, and administrative tools. On the user-facing side we demonstrate how the ImageX 3.0 viewer can be embedded into the content of any web application, and explore the astronomy-specific features and plugins we've written into it. The ImageX service stack is fully open-sourced, and is built upon widely-supported industry standards (Node.js, Angular, etc.). Apart from being deployed as a standalone service stack, ImageX components are currently in use or expected to be deployed on: (1) the ODI-PPA portal serving astronomical images taken at the WIYN Observatory in near real-time; (2) the web portal serving microscopy images taken at the IU Electron Microscopy Center; (3) the RADY-SCA portal supporting radiology and medical imaging as well as neuroscience researchers at IU. © 2018 SPIE.}, bibtype = {inproceedings}, author = {Young, Michael D. and Gopu, Arvind and Perigo, Raymond}, doi = {10.1117/12.2313684}, booktitle = {SPIE ASTRONOMICAL TELESCOPES + INSTRUMENTATION 10-15 June 2018 Austin, Texas, United States} }
@inproceedings{ title = {Toward sustainable deployment of distributed services on the cloud: dockerized ODI-PPA on Jetstream}, type = {inproceedings}, year = {2018}, pages = {108}, month = {7}, publisher = {SPIE-Intl Soc Optical Eng}, day = {6}, id = {7c008811-624f-3899-b058-604ef044ba86}, created = {2019-08-19T19:46:13.379Z}, accessed = {2019-08-19}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-19T19:46:13.466Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {The One Degree Imager - Portal, Pipeline and Archive (ODI-PPA) - a mature and fully developed product - has been a workhorse for astronomers observing on the WIYN ODI. It not only provides access to data stored in a secure archive, it also has a rich search and visualization interface, as well as integrated pipeline capabilities connected with supercomputers at Indiana University in a manner transparent to the user. As part of our ongoing sustainability review process, and given the increasing age of the ODI-PPA codebase, we have considered various approaches to modernization. While industry currently trends toward Node.js based architectures, we concluded that porting an entire legacy PHP and Python-based system like ODI-PPA with its complex and distributed service stack would require too significant an amount of human development/testing/deployment hours. Aging deployment hardware with tight budgets is another issue we identified, a common one especially when deploying complex distributed service stacks. In this paper, we present DockStream (https://jsportal.odi.iu.edu), an elegant solution that addresses both of the aforementioned issues. Using ODI-PPA as a case study, we present a proof of concept solution combining a suite of Docker containers built for each PPA service and a mechanism to acquire cost-free computational and storage resources. The dockerized ODI-PPA services can be deployed on one Dockerenabled host or several depending on the availability of hardware resources and the expected levels of use. In this paper, we describe the process of designing, creating, and deploying such custom containers. The NSF-funded Jetstream led by the Indiana University Pervasive Technology Institute (PTI), provides cloud-based, on-demand computing and data analysis resources, and a pathway to tackle the issue of insufficient hardware refreshment funds. We briefly describe the process to acquiring computational and storage resources on Jetstream, and the use of the Atmosphere web interface to create and maintain virtual machines on Jetstream. Finally, we present a summary of security refinements to a dockerized service stack on the cloud using nginx, custom docker networks, and Linux firewalls that significant decrease the risk of security vulnerabilities and incidents while improving scalability.}, bibtype = {inproceedings}, author = {Bao, Yuanzhi and Gopu, Arvind and Perigo, Raymond and Young, Michael D.}, doi = {10.1117/12.2313647}, booktitle = {SPIE ASTRONOMICAL TELESCOPES + INSTRUMENTATION 10-15 June 2018 Austin, Texas, United States} }
@article{ title = {XD Metrics on Demand Value Analytics: Visualizing the Impact of Internal Information Technology Investments on External Funding, Publications, and Collaboration Networks}, type = {article}, year = {2018}, volume = {2}, websites = {http://journal.frontiersin.org/article/10.3389/frma.2017.00010/full}, month = {1}, publisher = {Frontiers Media SA}, day = {29}, id = {03ddb375-0e5d-36f2-977a-9ac3df7ceb49}, created = {2019-08-26T15:49:59.572Z}, accessed = {2019-08-26}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-11T16:36:20.629Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Many universities invest substantial resources in the design, deployment, and maintenance of campus-based cyberinfrastructure. To justify the expense, it is important that university administrators and others understand and communicate the value of these internal investments in terms of scholarly impact as measured by external funding, publications, and research collaborations. This paper introduces two visualizations and their usage in the Value Analytics (VA) module for Open XD Metrics on Demand (XDMoD). The VA module was developed by Indiana University’s (IU) Research Technologies division in conjunction with IU’s Cyberinfrastructure for Network Science Center (CNS) and the University at Buffalo’s Center for Computational Research (CCR). It interrelates quantitative measures of information technology (IT) usage, external funding, and publications in support of IT strategic decision making. This paper details the data, analysis workflows, and visual mappings used in the two VA visualizations that aim to communicate the value of different IT usage in terms of NSF and NIH funding, resulting publications, and associated research collaborations. To illustrate the feasibility of measuring IT values on research, we measured its financial and academic impact from the period between 2012 and 2017. The financial return on investment (ROI) is measured in terms of the funding, totaling $ 21,016,055 for NIH and NSF projects, and the academic ROI constitutes 1,531 NIH and NSF awards and 968 publications associated with 83 NSF and NIH awards. In addition, the results show that Medical Specialties, Brain Research, and Infectious Diseases are the top three scientific disciplines ranked by their publication records during the given time period.}, bibtype = {article}, author = {Scrivner, Olga and Singh, Gagandeep and Bouchard, Sara E. and Hutcheson, Scott C. and Fulton, Ben and Link, Matthew R. and Börner, Katy}, doi = {10.3389/frma.2017.00010}, journal = {Frontiers in Research Metrics and Analytics} }
@inproceedings{ title = {IQ-stations: Advances in state-of-the-art low cost immersive displays for research and development}, type = {inproceedings}, year = {2018}, keywords = {Consumer hardware,IQ-Station,Tracking systems,Virtual reality}, pages = {5}, month = {7}, publisher = {Association for Computing Machinery}, day = {22}, id = {e7a4766b-f39b-3351-9341-2e1c1125b194}, created = {2019-08-27T16:57:13.089Z}, accessed = {2019-08-27}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-27T16:57:13.209Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, bibtype = {inproceedings}, author = {Sherman, William R. and Whiting, Eric and Money, James H. and Grover, Shane}, doi = {10.1145/3219104.3219106}, booktitle = {Proceedings of the Practice and Experience on Advanced Research Computing (PEARC '18)} }
@techreport{ title = {ABI Sustaining: The National Center for Genome Analysis Support 2018 Annual Report}, type = {techreport}, year = {2018}, keywords = {NCGAS,National Science Foundation}, websites = {http://creativecommons.org/licenses/by/4.0/.}, month = {9}, day = {10}, id = {a26d7e79-f0a8-330b-a835-bcdd15700570}, created = {2020-09-09T20:51:15.087Z}, accessed = {2020-09-09}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T20:51:15.173Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {National Science Foundation ABI-1458641}, bibtype = {techreport}, author = {Michaels, Scott D and Doak, T G and Stewart, C A and Michaels, S D (} }
@article{ title = {An archive of spectra from the Mayall Fourier transform spectrometer at Kitt Peak}, type = {article}, year = {2017}, volume = {129}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85011965299&doi=10.1088%2F1538-3873%2F129%2F972%2F024006&partnerID=40&md5=ae36d0681e87052b496e96baed19c7e6}, publisher = {Institute of Physics Publishing}, id = {8e8dd5de-954a-3333-ae73-98a1d76e4d03}, created = {2018-02-27T18:07:25.360Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:25.360Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Pilachowski2017}, source_type = {article}, notes = {cited By 0}, private_publication = {false}, abstract = {We describe the SpArc science gateway for spectral data obtained using the Fourier Transform Spectrometer (FTS) in operation at the Mayall 4-m telescope at the Kitt Peak National Observatory during the period from 1975 through 1995. SpArc is hosted by Indiana University Bloomington and is available for public access. The archive includes nearly 10,000 individual spectra of more than 800 different astronomical sources including stars, nebulae, galaxies, and solar system objects. We briefly describe the FTS instrument itself and summarize the conversion of the original interferograms into spectral data and the process for recovering the data into FITS files. The architecture of the archive is discussed and the process for retrieving data from the archive is introduced. Sample use cases showing typical FTS spectra are presented. © 2017. The Astronomical Society of the Pacific. All rights reserved.}, bibtype = {article}, author = {Pilachowski, C A and Hinkle, K H and Young, M D and Dennis, H B and Gopu, A and Henschel, R and Hayashi, S}, doi = {10.1088/1538-3873/129/972/024006}, journal = {Publications of the Astronomical Society of the Pacific}, number = {972} }
@inproceedings{ title = {Performance Benchmarking of the R Programming Environment on the Stampede 1 . 5 Supercomputer}, type = {inproceedings}, year = {2017}, keywords = {2017,acm reference format,benchmarking,james r,many-core,mccombs and scott michael,performance benchmarking of,r,scalability,xeon phi,xsede}, pages = {8}, volume = {Part F1287}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85025810544&doi=10.1145%2F3093338.3093346&partnerID=40&md5=38a5e9f4f39737f05a2dff38119eff0b}, publisher = {Association for Computing Machinery}, id = {5c5cf6ce-c8dc-3e44-b573-f072e2a1e9cc}, created = {2018-02-27T18:07:25.754Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-29T18:21:19.397Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {McCombs2017}, source_type = {conference}, notes = {cited By 0; Conference of 2017 Practice and Experience in Advanced Research Computing, PEARC 2017 ; Conference Date: 9 July 2017 Through 13 July 2017; Conference Code:128771}, private_publication = {false}, abstract = {We present performance results obtained with a new single-node performance benchmark of the R programming environment on the many-core Xeon Phi Knights Landing and standard Xeonbased compute nodes of the Stampede supercomputer cluster at the Texas Advanced Computing Center. The benchmark consists of microbenchmarks of linear algebra kernels and machine learning functionality that includes clustering and neural network training from the R distribution. The standard Xeon-based nodes outperformed their Xeon Phi counterparts for matrices of small to medium dimensions, performing approximately twice as fast for most of the linear algebra microbenchmarks. For matrices of medium to large dimensions, the Knights Landing nodes were competitive with or outperformed the standard Xeon-based nodes with most of the linear algebra microbenchmarks, executing as much as five times faster than the standard Xeon-based nodes. For the clustering and neural network training microbenchmarks, the standard Xeonbased nodes performed up to four times faster than their Xeon Phi counterparts for many large data sets, indicating that commonly used R packages may need to be reengineered to take advantage of existing optimized, scalable kernels. © 2017 Association for Computing Machinery.}, bibtype = {inproceedings}, author = {Mccombs, James R and Michael, Scott}, doi = {10.1145/3093338.3093346}, booktitle = {Proceedings of the Practice and Experience in Advanced Research Computing 2017 on Sustainability, Success and Impact (PEARC17)} }
@article{ title = {STAR-Fusion: Fast and Accurate Fusion Transcript Detection from RNA-Seq}, type = {article}, year = {2017}, pages = {120295}, publisher = {Cold Spring Harbor Labs Journals}, id = {864690a5-7ed7-384a-be6c-4cbb346f3250}, created = {2018-02-27T18:07:26.203Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:16.111Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Haas2017d}, source_type = {JOUR}, private_publication = {false}, bibtype = {article}, author = {Haas, Brian and Dobin, Alexander and Stransky, Nicolas and Li, Bo and Yang, Xiao and Tickle, Timothy and Bankapur, Asma and Ganote, Carrie and Doak, Thomas and Pochet, Natalie}, journal = {bioRxiv} }
@techreport{ title = {ABI Sustaining: The National Center for Genome Analysis Support 2017 Annual Report}, type = {techreport}, year = {2017}, id = {8e39179e-5bfc-3f9f-971e-3d76fa022996}, created = {2018-02-27T18:07:26.615Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:16.078Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Doak2017a}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Doak, Thomas G and Stewart, Craig A and Michaels, Scott D} }
@inproceedings{ title = {The Community software repository from xsede: A resource for the national research community}, type = {inproceedings}, year = {2017}, pages = {8}, volume = {Part F1287}, id = {bf8e244c-26dd-3f16-9439-7d5e6d887149}, created = {2018-02-27T18:07:27.361Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-29T18:55:43.754Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {© 2017 ACM. The Extreme Science and Engineering Discovery Environment (XSEDE) connects cyberinfrastructure (CI) resources, software, and services. One of XSEDE's primary goals in supporting US research generally is to "advance the ecosystem"-making use of XSEDE's leadership position to create software, tools, and services that lead to an effective and efficient national cyberinfrastructure. Software enables this endeavor in two very distinct ways: enabling the operation of XSEDE as a distributed yet integrated cyberinfrastructure resource; and by providing access to a wide variety of software of value to end user researchers and students, operators of campus cyberinfrastructure resources, and to those considering to propose new cyberinfrastructure resources to the National Science Foundation (NSF). The Community Software Repository (CSR) provides transparency about how XSEDE operates and provides access to software of use and value to the US research community generally. The CSR provides access to use cases that describe needs expressed by the research community, capability delivery plans that describe how XSEDE meets those needs, and the actual software that meets those needs. Software is delivered in a variety of forms and formats. The CSR also includes mechanisms for interaction between XSEDE staff, software developers, and the end user community to accelerate meeting of community needs and aid software developers in finding audiences for their software. XCI's long term goal is that the XSEDE Community Software Repository will be widely used and valuable to the national research community.}, bibtype = {inproceedings}, author = {Navarro, J.P. and Stewart, C.A. and Knepper, R. and Liming, L. and Lifka, D. and Dahan, M.}, doi = {10.1145/3093338.3093373}, booktitle = {Proceedings of the Practice and Experience in Advanced Research Computing 2017 on Sustainability, Success and Impact (PEARC17)} }
@techreport{ title = {Summary of the 2017 NCGAS User Survey}, type = {techreport}, year = {2017}, id = {76193244-4087-32f2-98f2-53efd338adcc}, created = {2018-02-27T18:07:28.202Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.668Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Doak2017}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Doak, Thomas G and Stewart, Craig A and Michaels, Scott D} }
@article{ title = {The Open Science Cyber Risk Profile: The Rosetta Stone for Open Science and Cybersecurity}, type = {article}, year = {2017}, keywords = {Cyber security; Cyber-attacks; OSCRP; Risk profil,Natural sciences computing; Security of data,Security systems}, pages = {94-95}, volume = {15}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85031689244&doi=10.1109%2FMSP.2017.3681058&partnerID=40&md5=c8539146c103a7d978d47ba1947849a6}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, id = {2ec555b0-7d13-33c4-bde5-c873b59bced4}, created = {2018-02-27T18:07:29.346Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:29.346Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Peisert201794}, source_type = {article}, notes = {cited By 0}, private_publication = {false}, abstract = {The Open Science Cyber Risk Profile (OSCRP) working group has created a document that motivates scientists by demonstrating how improving their security posture reduces the risks to their science. This effort aims to bridge the communication gap between scientists and IT security professionals and allows for the effective management of risks to open science caused by IT security threats. © 2003-2012 IEEE.}, bibtype = {article}, author = {Peisert, S and Welch, V}, doi = {10.1109/MSP.2017.3681058}, journal = {IEEE Security and Privacy}, number = {5} }
@techreport{ title = {Indiana University Pervasive Technology Institute}, type = {techreport}, year = {2017}, city = {Bloomington, IN}, id = {74d4621c-d873-3a41-a40c-5a5632bab05b}, created = {2018-02-27T18:07:29.907Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:29.907Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {misc}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, C A and Welch, V and Plale, B and Fox, G and Pierce, M and Sterling, T}, doi = {10.5072/FK2154N14D} }
@inproceedings{ title = {High performance computing enabled simulation of the food-water-energy system}, type = {inproceedings}, year = {2017}, keywords = {Agricultural machinery,Agriculture,Agro ecosystems,Agro-IBIS,Benchmarking,Climate model,Computer clusters,Fi}, pages = {10}, volume = {Part F1287}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85025829960&doi=10.1145%2F3093338.3093381&partnerID=40&md5=69d8a119998a6e0aac5a0a4ac12415ad}, publisher = {Association for Computing Machinery}, id = {58331b28-ad60-3770-a4db-30b5f6fe84e8}, created = {2018-02-27T18:07:30.754Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-26T19:07:38.370Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Dennis2017}, source_type = {conference}, notes = {cited By 0; Conference of 2017 Practice and Experience in Advanced Research Computing, PEARC 2017 ; Conference Date: 9 July 2017 Through 13 July 2017; Conference Code:128771}, private_publication = {false}, abstract = {Domain science experts are commonly limited by computational efficiency of their code and hardware resources available for execution of desired simulations. Here, we detail a collaboration between domain scientists focused on simulating an ensemble of climate and human management decisions to drive environmental (e.g., water quality) and economic (e.g., crop yield) outcomes. Brie.y, the domain scientists developed a message passing interface to execute the formerly serial code across a number of processors, anticipating signi.cant performance improvement by moving to a cluster computing environment from their desktop machines. The code is both too complex to efficiently re-code from scratch and has a shared codebase that must continue to function on desktop machines as well as the parallel implementation. However, ineff-ciencies in the code caused the LUSTRE .lesystem to bo.leneck performance for all users. The domain scientists collaborated with Indiana University's Science Applications and Performance Tuning and High Performance File System teams to address the unforeseen performance limitations. The non-linear process of testing so.ware advances and hardware performance is a model of the failures and successes that can be anticipated in similar applications. Ultimately, through a series of iterative so.ware and hardware advances the team worked collaboratively to increase performance of the code, cluster, and .le system to enable more than 100-fold increases in performance. As a result, the domain science is able to assess ensembles of climate and human forcing on the model, and sensitivities of ecologically and economically important outcomes of intensively managed agricultural landscapes. © 2017 Copyright is held by the owner/author(s).}, bibtype = {inproceedings}, author = {Dennis, H E B and Ward, A S and Balson, T and Li, Y and Henschel, R and Slavin, S and Simms, S and Brunst, H}, doi = {10.1145/3093338.3093381}, booktitle = {Proceedings of the Practice and Experience in Advanced Research Computing 2017 on Sustainability, Success and Impact (PEARC17)} }
@inproceedings{ title = {Stampede 2: The Evolution of an XSEDE supercomputer}, type = {inproceedings}, year = {2017}, keywords = {Computational simulation,Computer applications,Computer programming,Exascale computing,Hig,Supercomputers}, pages = {8}, volume = {Part F1287}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85025827654&doi=10.1145%2F3093338.3093385&partnerID=40&md5=4ed856ab16c3decf2b1f95272d3fe109}, publisher = {Association for Computing Machinery}, id = {d5b2ed0f-f847-34af-af33-2d11c0218eee}, created = {2018-02-27T18:07:31.900Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-29T18:21:19.409Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stanzione2017}, source_type = {conference}, notes = {cited By 0; Conference of 2017 Practice and Experience in Advanced Research Computing, PEARC 2017 ; Conference Date: 9 July 2017 Through 13 July 2017; Conference Code:128771}, private_publication = {false}, abstract = {The Stampede 1 supercomputer was a tremendous success as an XSEDE resource, providing more than eight million successful computational simulations and data analysis jobs to more than ten thousand users. In addition, Stampede 1 introduced new technology that began to move users towards many core processors. As Stampede 1 reaches the end of its production life, it is being replaced in phases by a new supercomputer, Stampede 2, that will not only take up much of the original system's workload, but continue the bridge to technologies on the path to exascale computing. This paper provides a brief summary of the experiences of Stampede 1, and details the design and architecture of Stampede 2. Early results are presented from a subset of Intel Knights Landing nodes that are bridging between the two systems. © 2017 Association for Computing Machinery.}, bibtype = {inproceedings}, author = {Stanzione, D and Barth, B and Gaffney, N and Gaither, K and Mehringer, S and Hempel, C and Wernert, E and Minyard, T and Tufo, H and Panda, D and Teller, P}, doi = {10.1145/3093338.3093385}, booktitle = {Proceedings of the Practice and Experience in Advanced Research Computing 2017 on Sustainability, Success and Impact (PEARC17)} }
@article{ title = {Forward Observer system for radar data workflows: Big data management in the field}, type = {article}, year = {2017}, keywords = {Big data; Data handling; Data storage equipment; D,Computer clusters; Data analysis system; Data sto,Information management}, pages = {92-97}, volume = {76}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85020697663&doi=10.1016%2Fj.future.2017.05.031&partnerID=40&md5=a5874af9af295c2eb064bc71abf890b2}, publisher = {Elsevier B.V.}, id = {d0c755b3-e411-3efe-b1f1-0dc01e876b88}, created = {2018-02-27T18:07:32.001Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:32.001Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Knepper201792}, source_type = {article}, notes = {cited By 0}, private_publication = {false}, abstract = {There are unique challenges in managing data collection and management from instruments in the field in general. These issues become extreme when “in the field” means “in a plane over the Antarctic”. In this paper we present the design and function of the Forward Observer a computer cluster and data analysis system that flies in a plane in the Arctic and Antarctic to collect, analyze in real time, and store Synthetic Aperture Radar (SAR) data. SAR is used to analyze the thickness and structure of polar ice sheets. We also discuss the processing of data once it is returned to the continental US and made available via data grids. The needs for in-flight data analysis and storage in the Antarctic and Arctic are highly unusual, and we have developed a novel system to meet those needs. We describe the constraints and requirements that led to the creation of this system and the general functionality which it applies to any instrument. We discuss the main means for handling replication and creating checksum information to ensure that data collected in polar regions are returned safely to mainland US for analysis. So far, not a single byte of data collected in the field has failed to make it home to the US for analysis (although many particular data storage devices have failed or been damaged due to the challenges of the extreme environments in which this system is used). While the Forward Observer system is developed for the extreme situation of data management in the field in the Antarctic, the technology and solutions we have developed are applicable and potentially usable in many situations where researchers wish to do real time data management in the field in areas that are constrained in terms of electrical supply. © 2017 Elsevier B.V.}, bibtype = {article}, author = {Knepper, R and Standish, M}, doi = {10.1016/j.future.2017.05.031}, journal = {Future Generation Computer Systems} }
@inproceedings{ title = {Campus Compute Co-operative (CCC): A service oriented cloud federation}, type = {inproceedings}, year = {2017}, id = {8b1453cb-e434-3213-8139-205f93644f30}, created = {2018-02-27T18:07:32.581Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:32.581Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {© 2016 IEEE. Universities struggle to provide both the quantity and diversity of compute resources that their researchers need when their researchers need them. Purchasing resources to meet peak demand for all resource types is cost prohibitive for all but a few institutions. Renting capacity on commercial clouds is seen as an alternative to owning. Commercial clouds though expect to be paid. The Campus Compute Cooperative (CCC) provides an alternative to purchasing capacity from commercial providers that provides increased value to member institutions at reduced cost. Member institutions trade their resources with one another to meet both local peak demand as well as provide access to resource types not available on the local campus that are available elsewhere. Participating institutions have dual roles. First as consumers of resources when their researchers use CCC machines, and second as producers of resources when CCC users from other institutions use their resources. In order to avoid the tragedy of the commons in which everyone only wants to use resources, the resource providers will receive credit when their resources are used by others. The consumer is charged based on the quality of service (high, medium, low) and the particulars of the resource provided (speed, interconnection network, memory, etc.). Account balances are cleared monthly. This paper describes solutions to both the technical and sociopolitical challenges of federating university resources and early results with the CCC. Technical issues include the security model, accounting, job specification/management and user interfaces. Socio-political issues include institutional risk management, how to manage market forces and incentives to avoid sub-optimal outcomes, and budget predictability.}, bibtype = {inproceedings}, author = {Grimshaw, A. and Prodhan, M.A. and Thomas, A. and Stewart, C. and Knepper, R.}, doi = {10.1109/eScience.2016.7870880}, booktitle = {Proceedings of the 2016 IEEE 12th International Conference on e-Science, e-Science 2016} }
@techreport{ title = {Pervasive Technology Institute Annual Report: Research Innovations and Advanced Cyberinfrastructure Services in Support of IU Strategic Goals During FY 2017}, type = {techreport}, year = {2017}, websites = {http://hdl.handle.net/2022/21809}, id = {d4d67bc9-bbbe-364b-896c-b2816e08960f}, created = {2018-02-27T18:07:33.554Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T00:01:48.051Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2017b}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig; and Plale, Beth; and Welch, Von; and Pierce, Marlon; and Fox, Geoffrey C.; and Doak, Thomas G.; Hancock, David Y.; Henschel, Robert; and Link, Matthew R.; and Miller, Therese; and Wernert, Eric; and Boyles, Michael J.; and Fulton, Ben; and Weakley, Le Mai; and Ping, Robert; and Gniady, Tassie; and Snapp-Childs, Winona;} }
@techreport{ title = {Addressing the national need for more diversity and aggregate investment in cyberinfrastructure supporting open science and engineering research}, type = {techreport}, year = {2017}, id = {eef6f263-1762-31d0-bd6b-fd1f1daa220a}, created = {2018-02-27T18:07:33.693Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.151Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2017a}, source_type = {JOUR}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A} }
@article{ title = {Using the Jetstream Research Cloud to provide Science Gateway resources}, type = {article}, year = {2017}, pages = {753-757}, websites = {http://ieeexplore.ieee.org/document/7973774/}, month = {5}, publisher = {IEEE}, id = {d8853fbf-b222-3b0b-9cde-ad261535c459}, created = {2018-02-27T18:07:34.147Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:34.147Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {article}, private_publication = {false}, bibtype = {article}, author = {Knepper, Richard and Coulter, Eric and Pierce, Marlon and Marru, Suresh and Pamidighantam, Sudhakar}, doi = {10.1109/CCGRID.2017.121}, journal = {2017 17th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing (CCGRID)} }
@inproceedings{ title = {Jetstream: Early Operations Performance, Adoption, and Impacts}, type = {inproceedings}, year = {2017}, keywords = {architecture,atmosphere,cloud,cyberinfrastructure,digital,education,globus,identity,jetstream,openstack,outreach,research,storage,training}, pages = {7}, websites = {http://doi.acm.org/10.1145/3147213.3155104}, publisher = {ACM}, city = {New York, NY, USA}, series = {UCC '17}, id = {af3859f6-e3c0-32cf-89fb-75fcccf85f73}, created = {2018-08-09T16:38:15.492Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-08-09T16:38:15.492Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Hancock:2017:JEO:3147213.3155104}, source_type = {inproceedings}, private_publication = {false}, bibtype = {inproceedings}, author = {Hancock, David}, doi = {10.1145/3147213.3155104}, booktitle = {Proceedings of the10th International Conference on Utility and Cloud Computing} }
@inproceedings{ title = {Scalable photogrammetry with high performance computing}, type = {inproceedings}, year = {2017}, keywords = {Benchmarking,Compute resources,Cultural heritages,Graduate s,Photogrammetry,Students}, pages = {3}, volume = {Part F1287}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85025826260&doi=10.1145%2F3093338.3104174&partnerID=40&md5=3d7c365c1289d519e724ab1c35dfad07}, publisher = {Association for Computing Machinery}, id = {1b613bc0-0610-3573-badb-6c05df967054}, created = {2018-08-09T16:38:15.973Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-27T16:51:40.266Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Gniady2017}, source_type = {conference}, notes = {cited By 0; Conference of 2017 Practice and Experience in Advanced Research Computing, PEARC 2017 ; Conference Date: 9 July 2017 Through 13 July 2017; Conference Code:128771}, private_publication = {false}, abstract = {Photogrammetry is used to build 3-dimensional models of everything from terrains to ancient statues. In the past, the stitching process was done on powerful PCs and could take weeks for large datasets. Even relatively small objects often required several hours to stitch together. With the availability of parallel processing options in the latest release of Agisoft PhotoScan, it is possible to leverage the power of high performance computing on large datasets. This poster presents the results of benchmarking tests for three datasets processed at two different model quality levels, medium and high (there are four times more points in the dense point cloud at the high setting) using 2, 4, 8, and 16 nodes. The purpose of the benchmarking is to determine how to optimize software license usage and compute resources against time and output quality. The poster also details the matrix of user-specified parameters that have been built into the python script that submits the parallel jobs. These parameters have evolved through the assessment of needs of users who are using the HPC deployment of photogrammetry as a service.We are excited by the uptake of this new service around campus in different fields across multiple disciplines. A group of cultural heritage experts will be using the service from Italy this summer, and a graduate student in anthropology will be stitching aerial data sets from Mexico. © 2017 Copyright held by the owner/author(s).}, bibtype = {inproceedings}, author = {Gniady, T and Ruan, G and Sherman, W and Tuna, E and Wernert, E}, doi = {10.1145/3093338.3104174}, booktitle = {Proceedings of the Practice and Experience in Advanced Research Computing 2017 on Sustainability, Success and Impact (PEARC17)} }
@inbook{ type = {inbook}, year = {2017}, pages = {1063-1074}, websites = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-5225-2255-3.ch092}, publisher = {IGI Global}, city = {Hershey, PA, USA}, id = {dd3c140e-75d2-3a40-92a9-d4718bb97350}, created = {2019-08-15T19:12:18.993Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-15T19:45:52.432Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, source_type = {CHAP}, private_publication = {false}, abstract = {Computers accelerate our ability to achieve scientific breakthroughs. As technology evolves and new research needs come to light, the role for cyberinfrastructure as “knowledge” infrastructure continues to expand. In essence, cyberinfrastructure can be thought of as the integration of supercomputers, data resources, visualization, and people that extends the impact and utility of information technology. This article discusses cyberinfrastructure, the related topics of science gateways and campus bridging, and identifies future challenges and opportunities in cyberinfrastructure.}, bibtype = {inbook}, author = {Stewart, Craig A and Knepper, Richard and Link, Matthew R and Pierce, Marlon and Wernert, Eric and Wilkins-Diehr, Nancy}, editor = {Mehdi Khosrow-Pour, D B A}, doi = {10.4018/978-1-5225-2255-3.ch092}, chapter = {Cyberinfrastructure, Cloud Computing, Science Gateways, Visualization, and Cyberinfrastructure Ease of Use}, title = {Encyclopedia of Information Science and Technology, Fourth Edition} }
@inproceedings{ title = {XDMoD value analytics: A tool for measuring the financial and intellectual ROI of your campus cyberinfrastructure facilities}, type = {inproceedings}, year = {2017}, keywords = {Funding,HPC,HPC monitoring,ROI,XDMoD}, pages = {7}, volume = {Part F1287}, websites = {http://doi.acm.org/10.1145/3093338.3093358}, month = {7}, publisher = {Association for Computing Machinery}, day = {9}, id = {3de5a84c-8c4b-3fb2-bc1a-5959548f8431}, created = {2019-08-26T16:31:44.324Z}, accessed = {2019-08-26}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-29T18:55:43.683Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Understanding the financial and intellectual value of campus-based cyberinfrastructure (CI) to the institutions that invest in such CI is intrinsically dificult. Given today's financial pressures, there is o.en administrative pressure questioning the value of campus-based and campus-funded CI resources. In this paper we describe new financial analytics capabilities being added to the widely used system analysis tool Open XDMoD (XSEDE Metrics on Demand) to create a new realm of metrics that will allow us to correlate usage of high performance computing with funding and publications. The capabilities to be added will eventually allow CI centers to view metrics relevant to both scientific output in terms of publications, and financial data in terms of awarded grants. The creation of Open XDMoD Value Analytics was funded by the National Science Foundation as a two year project. We are now nearing the end of the first year of this award, during which we focused on financial analytics. During the second year of this project we will focus on analytics of intellectual output.This module will allow the same sorts of analyses about systems and users as the financial analytics module, but in terms of intellectual outputs such as number of publications, citations to publications, and H indices. This module will also have capabilities to visualize such data, integrated with financial data. We plan to present these tools at PEARC '18.}, bibtype = {inproceedings}, author = {Fulton, Ben and Gallo, Steven and Henschel, Robert and Yearke, Tom and Börner, Katy and DeLeon, Robert L. and Furlani, Thomas and Stewart, Craig A. and Link, Matt}, doi = {10.1145/3093338.3093358}, booktitle = {Proceedings of the Practice and Experience in Advanced Research Computing 2017 on Sustainability, Success and Impact (PEARC17 )} }
@inproceedings{ title = {Jetstream}, type = {inproceedings}, year = {2017}, keywords = {Atmosphere,Cloud,Cyberinfras-tructure,Digital,EOT,Education,Globus,Jetstream,Openstack,Outreach,Research,Training,XD,XSEDE}, pages = {67-72}, volume = {Part F1317}, websites = {http://dl.acm.org/citation.cfm?doid=3123458.3123466}, month = {10}, publisher = {ACM Press}, day = {1}, city = {New York, New York, USA}, id = {390b752a-0b0f-301e-9e17-f175f54db7d2}, created = {2019-09-12T18:51:36.028Z}, accessed = {2019-09-12}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T19:27:36.154Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {© 2017 Copyright held by the owner/author(s). Jetstream is the frst production cloud funded by the NSF for conducting general-purpose science and engineering research as well as an easy-to-use platform for education activities. Unlike many high-performance computing systems, Jetstream uses the interactive Atmosphere graphical user interface developed as part of the iPlant (now CyVerse) project and focuses on interactive use on uniprocessor or multiprocessor. This interface provides for a lower barrier of entry for use by educators, students, practicing scientists, and engineers. A key part of Jetstream's mission is to extend the reach of the NSF's eXtreme Digital (XD) program to a community of users who have not previously utilized NSF XD program resources, including those communities and institutions that traditionally lack signifcant cyberinfrastructure resources. One manner in which Jetstream eases this access is via virtual desktops facilitating use in education and research at small colleges and universities, including Historically Black Colleges and Universities (HBCUs), Minority Serving Institutions (MSIs), Tribal colleges, and higher education institutions in states designated by the NSF as eligible for funding via the Experimental Program to Stimulate Competitive Research (EPSCoR). Jetstream entered into full production in September 2016 and during the frst six months it has supported more than a dozen educational eforts across the United States. Here, we discuss how educators at institutions of higher education have been using Jetstream in the classroom and at student-focused workshops. Specifcally, we explore success stories, difculties encountered, and everything in between. We also discuss plans for increasing the use of cloud-based systems in higher education. A primary goal in this paper is to spark discussions between educators and information technologists on how to improve using cloud resources in education.}, bibtype = {inproceedings}, author = {Fischer, Jeremy and Hancock, David Y. and Lowe, John Michael and Turner, George and Snapp-Childs, Winona and Stewart, Craig A.}, doi = {10.1145/3123458.3123466}, booktitle = {Proceedings of the 2017 ACM Annual Conference on SIGUCCS - SIGUCCS '17} }
@inproceedings{ title = {Benchmarking Harp-DAAL: High Performance Hadoop on KNL Clusters}, type = {inproceedings}, year = {2017}, keywords = {BigData,HPC,Xeon Phi}, pages = {82-89}, volume = {2017-June}, month = {9}, publisher = {IEEE Computer Society}, day = {8}, id = {c90953af-1a47-300f-b801-4eabebbf2c89}, created = {2020-09-09T19:45:44.340Z}, accessed = {2020-09-09}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T19:45:44.468Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Data analytics is undergoing a revolution in many scientific domains, and demands cost-effective parallel data analysis techniques. Traditional Java-based Big Data processing tools like Hadoop MapReduce are designed for commodity CPUs. In contrast, emerging manycore processors like the Xeon Phi have an order of magnitude greater computation power and memory bandwidth. To harness their computing capabilities, we propose the Harp-DAAL framework. We show that enhanced versions of MapReduce can be replaced by Harp, a Hadoop plug-in, that offers useful data abstractions for both high-performance iterative computation and MPI-quality communication, as well as drive Intel's native DAAL library. We select a subset of three machine learning algorithms and implement them within Harp-DAAL. Our scalability benchmarks ran on Knights Landing (KNL) clusters and achieved up to 2.5 times speedup of performance over the HPC solution in NOMAD and 15 to 40 times speedup over Java-based solutions in Spark. We further quantify the workloads on single node KNL with a performance breakdown at the micro-architecture level.}, bibtype = {inproceedings}, author = {Chen, Langshi and Peng, Bo and Zhang, Bingjing and Liu, Tony and Zou, Yiming and Jiang, Lei and Henschel, Robert and Stewart, Craig and Zhang, Zhang and McCallum, Emily and Tom, Zahniser and Jon, Omer and Qiu, Judy}, doi = {10.1109/CLOUD.2017.19}, booktitle = {IEEE International Conference on Cloud Computing, CLOUD} }
@techreport{ title = {Jetstream Stakeholder Advisory Board Meeting February 2017: Presenters' Report}, type = {techreport}, year = {2017}, keywords = {Jetstream,Technical Report,cloud computing}, websites = {http://hdl.handle.net/2022/21247}, month = {2}, day = {28}, id = {738a349a-4298-345a-912d-aa563d4346c3}, created = {2020-09-09T23:50:56.732Z}, accessed = {2020-09-09}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T23:50:56.732Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A.; and Hancock, David Y.; and Vaughn, Matthew; and Merchant, Nirav; and Lowe, John Michael; and Fischer, Jeremy; and Liming, Lee; and Taylor, James; and Turner, George; and Hammond, C. Bret; and Skidmore, Edwin; and Packard, Michael; and Miller, Therese; and Foster, Ian; and Rad, Paul; and Mehringer, Susan;} }
@techreport{ title = {Jetstream (NSF Award 1445604) Year Program Year 3 Annual Report (December 1, 2016 – November 28, 2017)}, type = {techreport}, year = {2017}, keywords = {Technical Report}, websites = {http://hdl.handle.net/2022/21854.}, month = {12}, day = {15}, id = {4c3c1695-5078-37ac-a9cf-2f44f174d6f5}, created = {2020-09-11T15:29:56.836Z}, accessed = {2020-09-11}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-11T15:29:56.836Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Hancock, David Y and Vaughn, Matthew and Merchant, Nirav and Fischer, Jeremy and Michael Lowe, J and Liming, Lee and Taylor, James and Afgan, Enis and Turner, George and Skidmore, Edwin and Packard, Michael and Beck, Brian W and Foster, Ian} }
@techreport{ title = {Summary of the Survey of Field/Marine Station Directors and Managers}, type = {techreport}, year = {2017}, keywords = {Technical Report}, websites = {http://hdl.handle.net/2022/21835}, month = {7}, day = {31}, id = {b5c97b4c-0e89-393e-b935-5669f73996ac}, created = {2020-09-11T16:55:38.870Z}, accessed = {2020-09-11}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-11T16:55:38.870Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Doak, Thomas G.; Stewart, Craig A.; Wernert, Julie; Hancock, David Y.; Miller, Therese;} }
@techreport{ title = {High Performance Computing System Acquisition: Jetstream–A Self-Provisioned, Scalable Science and Engineering Cloud Environment (Year 1 Annual Report)}, type = {techreport}, year = {2016}, id = {b444add9-6920-31bd-b34d-6d07afe8f871}, created = {2018-02-27T18:07:25.397Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.058Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2016f}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Foster, Ian and Merchant, Nirav C and Taylor, James and Vaughn, Matthew W} }
@techreport{ title = {System Acceptance Report for NSF award 1445604” High Performance Computing System Acquisition: Jetstream-A Self-Provisioned, Scalable Science and Engineering Cloud Environment”}, type = {techreport}, year = {2016}, id = {1f7da13b-a6af-3a14-a7e9-c05f8503f7f2}, created = {2018-02-27T18:07:25.843Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.954Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2016e}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Hancock, David Y and Vaughn, Matthew and Merchant, Nirav C and Lowe, John Michael and Fischer, Jeremy and Liming, Lee and Taylor, James and Afgan, Enis and Turner, George} }
@techreport{ title = {NSF High Performance Computing System Acquisition system description: Jetstream-a self-provisioned, scalable science and engineering cloud environment}, type = {techreport}, year = {2016}, id = {b4e9ce36-f545-3e52-aeea-4ba4c6e5a027}, created = {2018-02-27T18:07:26.497Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.966Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2016g}, source_type = {JOUR}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Stanzione, Daniel and Cockerill, Timothy and Skidmore, Edwin and Fischer, Jeremy and Lowe, John Michael and Hammond, Bret and Turner, George and Hancock, David Y and Miller, Therese} }
@inproceedings{ title = {RabbitQR: Fast and flexible Big Data Processing at LSST data rates using existing, shared-use hardware}, type = {inproceedings}, year = {2016}, id = {f0c859bb-fbd5-3653-8561-4922b7ee1d84}, created = {2018-02-27T18:07:26.758Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.365Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Kotulla2016}, source_type = {JOUR}, private_publication = {false}, bibtype = {inproceedings}, author = {Kotulla, Ralf and Gopu, Arvind and Hayashi, Soichi}, doi = {10.1117/12.2233527}, booktitle = {SPIE 9913, Software and Cyberinfrastructure for Astronomy} }
@techreport{ title = {Trident: Scalable compute archives: Workflows, visualization, and analysis}, type = {techreport}, year = {2016}, source = {Proceedings of SPIE - The International Society for Optical Engineering}, keywords = {AngularJS,Applicat,Application programming interfaces (API),Big data,Docker,IU trident,Javascript,Micros}, volume = {9913}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85006489750&doi=10.1117%2F12.2233111&partnerID=40&md5=b9ffe3c3a1a9583114539c136797249f}, publisher = {SPIE}, id = {9280896f-51a2-3b95-a440-50a35a7167e3}, created = {2018-02-27T18:07:26.816Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:26.816Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Gopu2016}, source_type = {conference}, notes = {cited By 0; Conference of Software and Cyberinfrastructure for Astronomy IV ; Conference Date: 26 June 2016 Through 30 June 2016; Conference Code:125147}, private_publication = {false}, abstract = {The Astronomy scientific community has embraced Big Data processing challenges, e.g. Associated with time-domain astronomy, and come up with a variety of novel and efficient data processing solutions. However, data processing is only a small part of the Big Data challenge. Efficient knowledge discovery and scientific advancement in the Big Data era requires new and equally efficient tools: modern user interfaces for searching, identifying and viewing data online without direct access to the data; tracking of data provenance; searching, plotting and analyzing metadata; interactive visual analysis, especially of (time-dependent) image data; and the ability to execute pipelines on supercomputing and cloud resources with minimal user overhead or expertise even to novice computing users. The Trident project at Indiana University offers a comprehensive web and cloud-based microservice software suite that enables the straight forward deployment of highly customized Scalable Compute Archive (SCA) systems; including extensive visualization and analysis capabilities, with minimal amount of additional coding. Trident seamlessly scales up or down in terms of data volumes and computational needs, and allows feature sets within a web user interface to be quickly adapted to meet individual project requirements. Domain experts only have to provide code or business logic about handling/visualizing their domain's data products and about executing their pipelines and application work flows. Trident's microservices architecture is made up of light-weight services connected by a REST API and/or a message bus; a web interface elements are built using NodeJS, AngularJS, and HighCharts JavaScript libraries among others while backend services are written in NodeJS, PHP/Zend, and Python. The software suite currently consists of (1) a simple work flow execution framework to integrate, deploy, and execute pipelines and applications (2) a progress service to monitor work flows and sub-work flows (3) ImageX, an interactive image visualization service (3) an authentication and authorization service (4) a data service that handles archival, staging and serving of data products, and (5) a notification service that serves statistical collation and reporting needs of various projects. Several other additional components are under development. Trident is an umbrella project, that evolved from the One Degree Imager, Portal, Pipeline, and Archive (ODI-PPA) project which we had initially refactored toward (1) a powerful analysis/visualization portal for Globular Cluster System (GCS) survey data collected by IU researchers, 2) a data search and download portal for the IU Electron Microscopy Center's data (EMC-SCA), 3) a prototype archive for the Ludwig Maximilian University's Wide Field Imager. The new Trident software has been used to deploy (1) a metadata quality control and analytics portal (RADY-SCA) for DICOM formatted medical imaging data produced by the IU Radiology Center, 2) Several prototype work flows for different domains, 3) a snapshot tool within IU's Karst Desktop environment, 4) a limited component-set to serve GIS data within the IU GIS web portal. Trident SCA systems leverage supercomputing and storage resources at Indiana University but can be configured to make use of any cloud/grid resource, from local workstations/servers to (inter)national supercomputing facilities such as XSEDE. © COPYRIGHT SPIE. Downloading of the abstract is permitted for personal use only.}, bibtype = {techreport}, author = {Gopu, A and Hayashi, S and Young, M D and Kotulla, R and Henschel, R and Harbeck, D}, editor = {Chiozzi G., Guzman J C}, doi = {10.1117/12.2233111} }
@techreport{ title = {Jetstream–A Self-Provisioned, Scalable Science and Engineering Cloud Environment-NSF Acceptance Report}, type = {techreport}, year = {2016}, id = {930a60f3-714e-355f-b6cb-6c55b4b5692f}, created = {2018-02-27T18:07:26.891Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.910Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2016h}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Foster, Ian and Merchant, Nirav C and Taylor, James and Vaughn, Matthew W} }
@article{ title = {Innovations from the early user phase on the Jetstream Research Cloud}, type = {article}, year = {2016}, id = {6a285097-6c20-33a4-84fe-5ba95c19bad3}, created = {2018-02-27T18:07:27.893Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.383Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Knepper2016a}, source_type = {JOUR}, private_publication = {false}, bibtype = {article}, author = {Knepper, Richard and Fischer, Jeremy and Stewart, Craig A and Hancock, David Y and Link, Matthew R}, journal = {2016 Federated Conference on Computer Science and Information Systems} }
@inproceedings{ title = {Cyberinfrastructure as a platform to facilitate effective collaboration between institutions and support collaboratories}, type = {inproceedings}, year = {2016}, volume = {06-09-Nove}, id = {fc389ea3-2c92-321a-99cb-979c8c99dd87}, created = {2018-02-27T18:07:28.079Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:28.079Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Researchers, scientists, engineers, granting agencies, and increasingly complex research problems have given rise to the scientific "collaboratory"-large organizations that span many institutions, with individual members working together to explore a particular phenomenon. These organizations require computational resources in order to support analyses and to provide platforms where the collaborators can interact. The XSEDE Community Infrastructure (XCI) group assists campuses in using their own resources and promotes the sharing of those resources in order to create collaboratories improving use of the nation's collective cyberinfrastructure. Currently XCI provides toolkits and training, and collaborates with organizations such as ACI-REF, XSEDE Campus Champions, and the Open Science Grid to identify tools and best practices that support the community. This paper discusses the progress in and barriers to developing a robust collaborative environment where computational resources can be shared.}, bibtype = {inproceedings}, author = {Coulter, E. and Knepper, R. and Pierce, M. and Fischer, J. and Lifka, D. and Stewart, C. and Hallock, B. and Navarro, J.P.}, doi = {10.1145/2974927.2974962}, booktitle = {Proceedings ACM SIGUCCS User Services Conference} }
@inproceedings{ title = {Improving the scalability of a charge detection mass spectrometry workflow}, type = {inproceedings}, year = {2016}, keywords = {Application performance,Big data,Charge detection,Chemic,Chemical compounds,Chemical detection,Mass spect}, pages = {8}, volume = {17-21-July}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84989172207&doi=10.1145%2F2949550.2949563&partnerID=40&md5=72e130c503d903ca3cc63cfe00eb39e1}, publisher = {Association for Computing Machinery}, id = {2215d0c0-f371-3aa5-9c9d-0d5361361e8f}, created = {2018-02-27T18:07:28.189Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-26T19:07:38.369Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {McClary2016}, source_type = {conference}, notes = {cited By 0; Conference of Conference on Diversity, Big Data, and Science at Scale, XSEDE 2016 ; Conference Date: 17 July 2016 Through 21 July 2016; Conference Code:123713}, private_publication = {false}, abstract = {The Indiana University (IU) Department of Chemistry's Martin F. Jarrold (MFJ) Research Group studies a specialized technique of mass spectrometry called Charge Detection Mass Spectrometry (CDMS). The goal of mass spectrometry is to determine the mass of chemical and biological compounds, and with CDMS, the MFJ Research Group is extending the upper limit of mass detection. These researchers have developed a scientific application, which accurately analyzes raw CDMS data generated from their mass spectrometer. This paper explains the comprehensive process of optimizing the group's workflow by improving both the latency and throughput of their CDMS application. These significant performance improvements enabled high efficiency and scalability across IU's Advanced Cyberinfrastructure; overall, this analysis and development resulted in a 25x speedup of the application. © 2016 ACM.}, bibtype = {inproceedings}, author = {McClary, S and Henschel, R and Thota, A and Brunst, H and Draper, B}, doi = {10.1145/2949550.2949563}, booktitle = {Proceedings of the XSEDE16 Conference on Diversity, Big Data, and Science at Scale (XSEDE16)} }
@inproceedings{ title = {Implementation of simple XSEDE-like clusters: Science enabled and lessons learned}, type = {inproceedings}, year = {2016}, volume = {17-21-July}, publisher = {ACM Press, New York}, id = {b52a854b-1219-389a-b25b-cf8a5c822c3b}, created = {2018-02-27T18:07:29.099Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-26T16:31:57.443Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {The Extreme Science and Engineering Discovery Environ-ment (XSEDE) has created a suite of software that is col-lectively known as the XSEDE-Compatible Basic Cluster (XCBC). It is designed to enable smaller, resource-constrained research groups or universities to quickly and easily imple-ment a computing environment similar to XSEDE comput-ing resources. The XCBC system consists of the Rocks Cluster Manager, developed at the San Diego Supercom-puter Center for use on Gordon and Comet, and an XSEDE-specific "Rocks Roll", containing a selection of libraries, compilers, and scientific software curated by the Campus Bridg-ing (CB) group in the XSEDE project, kept current with those implemented on XSEDE resources. The Campus Bridging team has helped several universities implement the XCBC, and finds the design to be extremely useful for resourcelimited (in time, administrator knowledge, or funding) research groups or institutions. Here, we detail our recent experiences in implementing the XCBC design at university campuses across the country. These XCBC implementations were carried out with Campus Bridging staff traveling on-site to the partner institutions to directly assist with the cluster build. In implementing XCBC on campuses, we found that number of the needs described by campus communities as well as the broader cyberinfrastructure community are solved by technical means, although financial issues remain. The remaining issue to be addressed is technical interoperation between systems, and we describe efforts to improve here.}, bibtype = {inproceedings}, author = {Coulter, E. and Fischer, J. and Hallock, B. and Knepper, R. and Stewart, C.}, doi = {10.1145/2949550.2949570}, booktitle = {Proceedings of the XSEDE16 Conference on Diversity, Big Data, and Science at Scale (XSEDE16)} }
@inproceedings{ title = {Evaluation of SMP shared memory machines for use with in-memory and OpenMP big data applications}, type = {inproceedings}, year = {2016}, keywords = {Application programming interfaces (API); Astrophy,Distributed computer systems,ScaleMP; SGI UV; Symmetric multi processing; Virt}, pages = {1597-1606}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84991677893&doi=10.1109%2FIPDPSW.2016.133&partnerID=40&md5=f5680d15f3df0b6086275edcd9a8457e}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, id = {2ace4030-3e18-38a6-b238-79b6e774b66d}, created = {2018-02-27T18:07:29.162Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:29.162Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Younge20161597}, source_type = {conference}, notes = {cited By 0; Conference of 30th IEEE International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2016 ; Conference Date: 23 May 2016 Through 27 May 2016; Conference Code:122812}, private_publication = {false}, abstract = {While distributed memory systems have shaped the field of distributed systems for decades, the demand for many-core shared memory resources is increasing. Symmetric Multiprocessor Systems (SMPs) have become increasingly important recently among a wide array of disciplines, rangingfrom Bioinformatics to astrophysics, and beyond. With the increase in big data computing, the size and scope of traditional commodity server systems is often outpaced. While some big data applications can be mapped to distributed memory systems found through many cluster and cloud technologies today, this effort represents a large barrier of entry that some projects cannot cross. Shared memory SMP systems look to effectively and efficiently fill this niche within distributed systems by providing high throughput and performance with minimized development effort, as the computing environment often represents what many researchers are already familiar with. In this paper, we look at the use of two common shared memory systems, the ScaleMP vSMP virtualized SMP deployment at Indiana University, and the SGI UV architecture deployed at University of Arizona. While both systems are notably different in their design, their potential impact on computing is remarkably similar. As such, we look to compare each system first under a set of OpenMP threaded benchmarks via the SPEC group, and to follow up with our experience using each machine for Trinity de-novo assembly. We find both SMP systems are well suited to support various big data applications, with the newer vSMP deployment often slightly faster, however, certain caveats and performance considerations are necessary when considering such SMP systems. © 2016 IEEE.}, bibtype = {inproceedings}, author = {Younge, A J and Reidy, C and Henschel, R and Fox, G C}, doi = {10.1109/IPDPSW.2016.133}, booktitle = {Proceedings - 2016 IEEE 30th International Parallel and Distributed Processing Symposium, IPDPS 2016} }
@article{ title = {From describing to prescribing parallelism: Translating the SPEC ACCEL OpenACC suite to OpenMP target directives}, type = {article}, year = {2016}, keywords = {Application programming interfaces (API); Codes (s,Benchmarking,Offloading; Openacc; OpenMP; SPEC; SPEC ACCEL}, pages = {470-488}, volume = {9945 LNCS}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84992549244&doi=10.1007%2F978-3-319-46079-6_33&partnerID=40&md5=9655cf681ac699c57e5a2bdef2a48e0c}, publisher = {Springer Verlag}, id = {f059e596-2315-362f-b090-9d4dc678fd81}, created = {2018-02-27T18:07:30.078Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:30.078Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Juckeland2016470}, source_type = {article}, notes = {cited By 0; Conference of International Workshops on High Performance Computing, ISC High Performance 2016 and Workshop on 2nd International Workshop on Communication Architectures at Extreme Scale, ExaComm 2016, Workshop on Exascale Multi/Many Core Computing Systems, E-MuCoCoS 2016, HPC I/O in the Data Center, HPC-IODC 2016, Application Performance on Intel Xeon Phi – Being Prepared for KNL and Beyond, IXPUG 2016, International Workshop on OpenPOWER for HPC, IWOPH 2016, International Workshop on Performance Portable Programming Models for Accelerators, P^3MA 2016, Workshop on Virtualization in High-Performance Cloud Computing, VHPC 2016, Workshop on Performance and Scalability of Storage Systems, WOPSSS 2016 ; Conference Date: 19 June 2016 Through 23 June 2016; Conference Code:185039}, private_publication = {false}, abstract = {Current and next generation HPC systems will exploit accelerators and self-hosting devices within their compute nodes to accelerate applications. This comes at a time when programmer productivity and the ability to produce portable code has been recognized as a major concern. One of the goals of OpenMP and OpenACC is to allow the user to specify parallelism via directives so that compilers can generate device specific code and optimizations. However, the challenge of porting codes becomes more complex because of the different types of parallelism and memory hierarchies available on different architectures. In this paper we discuss our experience with porting the SPEC ACCEL benchmarks from OpenACC to OpenMP 4.5 using a performance portable style that lets the compiler make platform-specific optimizations to achieve good performance on a variety of systems. The ported SPEC ACCEL OpenMP benchmarks were validated on different platforms including Xeon Phi, GPUs and CPUs. We believe that this experience can help the community and compiler vendors understand how users plan to write OpenMP 4.5 applications in a performance portable style. © Springer International Publishing AG 2016.}, bibtype = {article}, author = {Juckeland, G and Hernandez, O and Jacob, A C and Neilson, D and Larrea, V G V and Wienke, S and Bobyr, A and Brantley, W C and Chandrasekaran, S and Colgrove, M and Grund, A and Henschel, R and Joubert, W and Müller, M S and Raddatz, D and Shelepugin, P and Whitney, B and Wang, B and Kumaran, K}, editor = {Mohr B. Kunkel J.M., Taufer M}, doi = {10.1007/978-3-319-46079-6_33}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@techreport{ title = {Pervasive Technology Institute Annual Report: Research Innovations and Advanced Cyberinfrastructure Services in Support of IU Strategic Goals During FY 2016}, type = {techreport}, year = {2016}, id = {54b3f0f7-33e6-3121-99bc-558201680cb5}, created = {2018-02-27T18:07:30.803Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.594Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2016j}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Plale, Beth and Welch, Von and Fox, Geoffrey C and Link, Matthew R and Miller, Therese and Wernert, Eric A and Boyles, Michael J and Fulton, Ben and Hancock, David Y} }
@article{ title = {Comparing the consumption of CPU hours with scientific output for the extreme science and engineering discovery environment (XSEDE)}, type = {article}, year = {2016}, keywords = {Authorship; Bibliometrics; Computer Terminals; Co,astronomy; atmosphere; biology; chemistry; consume}, volume = {11}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84976348924&doi=10.1371%2Fjournal.pone.0157628&partnerID=40&md5=f0a4ef8d94ed9e9c752ef2a2a46801ca}, publisher = {Public Library of Science}, id = {09c7daca-e110-374a-ab0c-5cc1fcc5ad72}, created = {2018-02-27T18:07:31.690Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:31.690Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Knepper2016}, source_type = {article}, notes = {cited By 0}, private_publication = {false}, abstract = {This paper presents the results of a study that compares resource usage with publication output using data about the consumption of CPU cycles from the Extreme Science and Engineering Discovery Environment (XSEDE) and resulting scientific publications for 2,691 institutions/teams. Specifically, the datasets comprise a total of 5,374,032,696 central processing unit (CPU) hours run in XSEDE during July 1, 2011 to August 18, 2015 and 2,882 publications that cite the XSEDE resource. Three types of studies were conducted: a geospatial analysis of XSEDE providers and consumers, co-authorship network analysis of XSEDE publications, and bi-modal network analysis of how XSEDE resources are used by different research fields. Resulting visualizations show that a diverse set of consumers make use of XSEDE resources, that users of XSEDE publish together frequently, and that the users of XSEDE with the highest resource usage tend to be "traditional" high-performance computing (HPC) community members from astronomy, atmospheric science, physics, chemistry, and biology. © 2016 Knepper, Börner.}, bibtype = {article}, author = {Knepper, R and Börner, K}, doi = {10.1371/journal.pone.0157628}, journal = {PLoS ONE}, number = {6} }
@techreport{ title = {Updated Acceptance Test Results for the Jetstream Production Environment}, type = {techreport}, year = {2016}, id = {a6b686fa-90ea-3c42-aded-14c02f922bf5}, created = {2018-02-27T18:07:32.682Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.668Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Hancock2016b}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Hancock, David Y and Packard, Michael and Turner, George and Stewart, Craig A} }
@techreport{ title = {Jetstream (NSF Award 1445604) Year Program Year 2 Annual Report (Dec 1, 2015–Nov 30, 2016)}, type = {techreport}, year = {2016}, id = {f5409edc-0f92-3e51-bc8a-15f13a24cc20}, created = {2018-02-27T18:07:33.179Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.383Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2016i}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Hancock, David Y and Vaughn, Matthew and Merchant, Nirav and Lowe, John Michael and Fischer, Jeremy and Liming, Lee and Taylor, James and Afgan, Enis and Turner, George} }
@inproceedings{ title = {Situating cyberinfrastructure in the public realm: The teragrid and XSEDE projects}, type = {inproceedings}, year = {2016}, keywords = {Case study methodologies; Cy-berinfrastructure; C,Complex networks,Sustainable development}, pages = {129-135}, volume = {08-10-June}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978763642&doi=10.1145%2F2912160.2912209&partnerID=40&md5=1b96431d39ddd31f8ff8beb57840aa2f}, publisher = {Association for Computing Machinery}, id = {cbd69dca-2029-3283-bbd2-d941dfec9b2f}, created = {2018-02-27T18:07:33.221Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:33.221Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Knepper2016129}, source_type = {conference}, notes = {cited By 0; Conference of 17th Annual International Conference on Digital Government Research, dg.o 2016 ; Conference Date: 8 June 2016 Through 10 June 2016; Conference Code:122243}, private_publication = {false}, abstract = {We examine the evolution of one NSF-funded cyberinfras-tructure organization, the TeraGrid, into its successor or-ganization, the Extreme Science and Engineering Discovery Environment (XSEDE) via a case study methodology with two di-erent frames of reference: virtual organization lit-erature and public management networks theory. Our aim in examining these two projects is to situate the study of cyberinfrastructure in support of basic science in both the virtual organization and public management networks lit-erature. We -nd that our cases con-rm the utility of hi-erarchy when dealing with complexity and that expert use of ICT for governance tasks improves network capacity, es-pecially when time-Tested technologies are available in the environment. These ICT resources can be used for moni-toring of both service delivery functions as well as network health. We also note that the establishment of common inter-organizational standards facilitates interoperability be-tween organizations, allowing sustainability and modularity of network members. © 2016 ACM.}, bibtype = {inproceedings}, author = {Knepper, R and Chen, Y.-C.}, editor = {Kim Y., Liu S M}, doi = {10.1145/2912160.2912209}, booktitle = {ACM International Conference Proceeding Series} }
@inproceedings{ title = {Jetstream}, type = {inproceedings}, year = {2016}, keywords = {Cloud computing,Long tail of science,National Science Foundation,OpenStack,Virtual machines}, pages = {1-8}, volume = {17-21-July}, websites = {http://dl.acm.org/citation.cfm?doid=2949550.2949639}, month = {7}, publisher = {ACM Press}, day = {17}, city = {New York, New York, USA}, id = {53fc2c14-94ee-344f-8423-f227c2a39e97}, created = {2019-09-12T19:01:23.592Z}, accessed = {2019-09-12}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T19:27:36.309Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Jetstream is a first-of-a-kind system for the NSF - a distributed production cloud resource. The NSF awarded funds to create Jetstream in November 2014. Here we review the purpose for creating Jetstream, present the acceptance test results that define Jetstream's key characteristics, describe our experiences in standing up an OpenStack-based cloud environment, and share some of the early scientific results that have been obtained by researchers and students using this system. Jetstream offers unique capability within the XSEDE-supported US national cyberinfrastructure, delivering interactive virtual machines (VMs) via the Atmosphere interface developed by the University of Arizona. As a multi-region deployment that operates as a single integrated system, Jetstream is proving effective in supporting modes and disciplines of research traditionally underrepresented on larger XSEDE-supported clusters and supercomputers. Already, researchers in biology, network science, economics, earth science, and computer science have used Jetstream to perform research -much of it research in the "long tail of science.".}, bibtype = {inproceedings}, author = {Stewart, Craig A. and Stanzione, Daniel C. and Taylor, James and Skidmore, Edwin and Hancock, David Y. and Vaughn, Matthew and Fischer, Jeremy and Cockerill, Tim and Liming, Lee and Merchant, Nirav and Miller, Therese and Lowe, John Michael}, doi = {10.1145/2949550.2949639}, booktitle = {Proceedings of the XSEDE16 on Diversity, Big Data, and Science at Scale - XSEDE16} }
@inproceedings{ title = {A PetaFLOPS supercomputer as a campus resource: Innovation, impact, and models for locally-owned high performance computing at research colleges and universities}, type = {inproceedings}, year = {2016}, keywords = {HPC,Impact,Innovation,Petaops,Supercomputing,University,Value, analytics,analytics}, pages = {61-68}, volume = {06-09-Nove}, month = {11}, publisher = {Association for Computing Machinery}, day = {1}, id = {eb36b9e6-237e-3659-82ff-2f35fa2d4bc9}, created = {2019-12-02T19:29:58.700Z}, accessed = {2019-12-02}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-12-02T19:29:58.801Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {In 1997, Indiana University (IU) began a purposeful and steady drive to expand the use of supercomputers and what we now call cyberinfrastructure. In 2001, IU implemented the first 1 TFLOPS supercomputer owned by and operated for a single US University. In 2013, IU made an analogous investment and achievement at the 1 PFLOPS level: Big Red II, a Cray XE6/XK7, was the first supercomputer capable of 1 PFLOPS (theoretical) performance that was a dedicated university resource [2]. IU0s high performance computing (HPC) resources have fostered innovation in disciplines from biology to chemistry to medicine. Currently, 185 disciplines and sub disciplines are represented on Big Red II with a wide variety of usage needs. Quantitative data suggest that investment in this supercomputer has been a good value to IU in terms of academic achievement and federal grant income. Here we will discuss how investment in Big Red II has benefited IU, and argue that locally-owned computational resources (scaled appropriately to needs and budgets) may be of benefit to many colleges and universities. We will also discuss software tools under development that will aid others in quantifying the benefit of investment in high performance computing to their campuses.}, bibtype = {inproceedings}, author = {Thota, Abhinav and Fulton, Ben and Weakley, Le Mai and Henschel, Robert and Hancock, David and Allen, Matt and Tillotson, Jenett and Link, Matt and Stewart, Craig A.}, doi = {10.1145/2974927.2974956}, booktitle = {Proceedings ACM SIGUCCS User Services Conference} }
@inproceedings{ title = {Authentication and Authorization Considerations for a Multi-tenant Service}, type = {inproceedings}, year = {2015}, pages = {29-35}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962377238&doi=10.1145%2F2753524.2753534&partnerID=40&md5=06c617703f402a57e3922f8a290ed55d,http://dl.acm.org/citation.cfm?doid=2753524.2753534}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {e0b8e1d9-4237-384d-b101-249173d6d02f}, created = {2018-02-27T18:07:25.725Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T14:53:04.278Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Heiland201529}, source_type = {conference}, notes = {cited By 3; Conference of 1st Workshop on the Science of Cyberinfrastructure: Research, Experience, Applications and Models, SCREAM 2015 ; Conference Date: 16 June 2015; Conference Code:116136}, private_publication = {false}, abstract = {Distributed cyberinfrastructure requires users (and machines) to perform some sort of authentication and authorization (together simply known as auth). In the early days of computing, authentication was performed with just a username and password combination, and this is still prevalent today. But during the past several years, we have seen an evolution of approaches and protocols for auth: Kerberos, SSH keys, X.509, OpenID, API keys, OAuth, and more. Not surprisingly, there are trade-offs, both technical and social, for each approach. The NSF Science Gateway communities have had to deal with a variety of auth issues. However, most of the early gateways were rather restrictive in their model of access and development. The practice of using community credentials (certificates), a well-intentioned idea to alleviate restrictive access, still posed a barrier to researchers and challenges for security and auditing. And while the web portal-based gateway clients offered users easy access from a browser, both the interface and the back-end functionality were constrained in the flexibility and extensibility they could provide. Designing a well-defined application programming interface (API) to fine-grained, generic gateway services (on secure, hosted cyberinfrastructure), together with an auth approach that has a lower barrier to entry, will hopefully present a more welcoming environment for both users and developers. This paper provides a review and some thoughts on these topics, with a focus on the role of auth between a Science Gateway and a service provider.}, bibtype = {inproceedings}, author = {Heiland, Randy and Koranda, Scott and Marru, Suresh and Pierce, Marlon and Welch, Von}, doi = {10.1145/2753524.2753534}, booktitle = {Proceedings of the 1st Workshop on The Science of Cyberinfrastructure Research, Experience, Applications and Models - SCREAM '15} }
@techreport{ title = {UITS Research Technologies update}, type = {techreport}, year = {2015}, id = {413b81ef-2ea6-3c30-a9b8-fe1aea89bb32}, created = {2018-02-27T18:07:26.064Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.272Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2015c}, source_type = {JOUR}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A} }
@techreport{ title = {Use of IU parallel computing resources and high performance file systems-July 2013 to Dec 2014}, type = {techreport}, year = {2015}, id = {2b95fa1d-8bb2-3310-9512-9d7aab250567}, created = {2018-02-27T18:07:26.328Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.796Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Link2015}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Link, Matthew R and Simms, Stephen C and Stewart, Craig A and Henschel, Robert and Fulton, Benjamin} }
@article{ title = {Science gateways today and tomorrow: Positive perspectives of nearly 5000 members of the research community}, type = {article}, year = {2015}, keywords = {Web interfaces,cyberinfrastructure,portals,science/engineering gateways,software development,survey}, pages = {4252-4268}, volume = {27}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84944911705&doi=10.1002%2Fcpe.3526&partnerID=40&md5=4dc5238c285d05e8713efa05ff42d452}, publisher = {John Wiley and Sons Ltd}, id = {a6df8df4-e5c1-39e9-8659-d2f01919c145}, created = {2018-02-27T18:07:26.403Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:26.403Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Lawrence20154252}, source_type = {article}, notes = {cited By 11}, private_publication = {false}, abstract = {Science gateways are digital interfaces to advanced technologies that\nsupport science/engineering research/education. Frequently implemented\nas Web and mobile applications, they provide access to community\nresources such as software, data, collaboration tools, instrumentation,\nand high-performance computing. We anticipate opportunities for growth\nwithin a fragmented community. Through a large-scale survey, we measured\nthe extent and characteristics of the gateway community (reliance on\ngateways and nature of existing resources) to understand useful services\nand support for builders and users. We administered an online survey to\nnearly 29,000 principal investigators, senior administrators, and people\nwith gateway affiliations. Nearly 5000 respondents represented diverse\nexpertise and geography. The majority of researchers/educators indicated\nthat specialized online resources were important to their work. They\nchoose technologies by asking colleagues and looking for documentation,\ndemonstrated reliability, and technical support; adaptability via\ncustomizing or open-source standards was another priority. Research\ngroups commonly provide their own resources, but public/academic\ninstitutions and commercial services also provide substantial offerings.\nApplication creators and administrators welcome external services\nproviding guidance such as technology selection, sustainability\nplanning, evaluation, and specialized expertise (e.g., quality assurance\nand design). Technologies are diverse, so flexibility and ongoing\ncommunity input are essential, as is offering specific, easy-to-access\ntraining, community support, and professional development. Copyright (c)\n2015 John Wiley & Sons, Ltd.}, bibtype = {article}, author = {Lawrence, Katherine A. and Zentner, Michael and Wilkins-Diehr, Nancy and Wernert, Julie A. and Pierce, Marlon and Marru, Suresh and Michael, Scott}, doi = {10.1002/cpe.3526}, journal = {Concurrency Computation }, number = {16} }
@article{ title = {Big Data on Ice: The Forward Observer System for In-flight Synthetic Aperture Radar Processing}, type = {article}, year = {2015}, pages = {1504-1513}, volume = {51}, websites = {http://hdl.handle.net/2022/20471,http://www.sciencedirect.com/science/article/pii/S1877050915011485}, month = {11}, publisher = {Elsevier}, id = {21449d3b-613e-3f0d-9517-64b2e912855b}, created = {2018-02-27T18:07:26.484Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:11.924Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Knepper2015a}, source_type = {article}, private_publication = {false}, abstract = {We introduce the Forward Observer system, which is designed to provide data assurance in field data acquisition while receiving significant amounts (several terabytes per flight) of Synthetic Aperture Radar data during flights over the polar regions, which provide unique requirements for developing data collection and processing systems. Under polar conditions in the field and given the difficulty and expense of collecting data, data retention is absolutely critical. Our system provides a storage and analysis cluster with software that connects to field instruments via standard protocols, replicates data to multiple stores automatically as soon as it is written, and provides pre-processing of data so that initial visualizations are available immediately after collection, where they can provide feedback to researchers in the aircraft during the flight.}, bibtype = {article}, author = {Knepper, Richard and Link, Matthew R and Standish, Matthew}, doi = {10.1016/J.PROCS.2015.05.340}, journal = {Procedia Computer Science} }
@techreport{ title = {Acceptance Test for Jetstream Test Cluster — Jetstream-Arizona (JA) Dell PowerEdge Test and Development Cluster}, type = {techreport}, year = {2015}, websites = {http://hdl.handle.net/2022/20355}, month = {8}, institution = {Indiana University}, id = {8d87fa3a-717d-3711-8633-cb12e11af168}, created = {2018-02-27T18:07:26.687Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:26.687Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {techreport}, notes = {PTI Technical Report - PTI-TR15-007}, private_publication = {false}, abstract = {This paper details the system description and the performance targets, methods used to perform the acceptance tests, and the achieved performance of the Jetstream test cluster.}, bibtype = {techreport}, author = {Hancock, D Y and Link, M R and Stewart, C A and Turner, G W} }
@inproceedings{ title = {Research proposal: Barriers to new user and new domain adoption of the XSEDE cyberinfrastructure}, type = {inproceedings}, year = {2015}, keywords = {Barriers to adoption; Computational resources; Cy,Engineering research,Information systems}, pages = {1023-1028}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84958093783&doi=10.15439%2F2015F244&partnerID=40&md5=4978f6decd6b66dfc9e05cb107ba922a}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, id = {83124f02-702a-3bdb-8632-e04a3a454501}, created = {2018-02-27T18:07:26.873Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:26.873Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Knepper20151023}, source_type = {conference}, notes = {cited By 0; Conference of Federated Conference on Computer Science and Information Systems, FedCSIS 2015 ; Conference Date: 13 September 2015 Through 16 September 2015; Conference Code:117625}, private_publication = {false}, abstract = {This research proposal proposes the examination of user attitudes about the Extreme Science and Engineering Discovery Environment (XSEDE). The XSEDE project supports basic research with a common system for making use of national cyberinfrastructure. The systems and infrastructure that make XSEDE useful for researchers are part of an actor network: these systems are socially constructed and they play their own part in the work of XSEDE, and in turn have an effect on the progress of basic research. I have completed previous work on the user relationships in the predecessor to XSEDE, the TeraGrid, and currently carry out participant observation with the management groups of the XSEDE project. By understanding the barriers to adoption of XSEDE by new researchers and new scientific domains, I hope to explore the linkage between resources (in this case computational resources) and scientific outputs. © 2015, IEEE.}, bibtype = {inproceedings}, author = {Knepper, R}, editor = {Paprzycki M. Maciaszek L., Ganzha M Maciaszek L}, doi = {10.15439/2015F244}, booktitle = {Proceedings of the 2015 Federated Conference on Computer Science and Information Systems, FedCSIS 2015} }
@article{ title = {Performance Optimization for the Trinity RNA-Seq Assembler}, type = {article}, year = {2015}, websites = {http://hdl.handle.net/2022/20490}, publisher = {Springer}, id = {5c9e7b4f-d9a2-3b72-b25a-2cdab705f453}, created = {2018-02-27T18:07:27.239Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:27.239Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {article}, medium = {(to appear)}, notes = {Article from presentation at 9th Parallel Tools Workshop, September 2-3, 2015 in Dresden, Germany}, private_publication = {false}, abstract = {Utilizing the enormous computing resources of high performance computing systems is anything but a trivial task. Performance analysis tools are designed to assist developers in this challenging task by helping to understand the application behavior and identify critical performance issues. In this paper we share our efforts and experiences in analyzing and optimizing Trinity, a well-established framework for the de novo reconstruction of transcriptomes from RNA-seq reads. Thereby, we try to reflect all aspects of the ongoing performance engineering: the identification of optimization targets, the code improvements resulting in 20% overall runtime reduction, as well as the challenges we encountered getting there.}, bibtype = {article}, author = {Wagner, M and Fulton, B and Henschel, R}, journal = {Tools for High Performance Computing} }
@inproceedings{ title = {Cyberinfrastructure resources enabling creation of the loblolly pine reference transcriptome}, type = {inproceedings}, year = {2015}, pages = {1-6}, volume = {2015-July}, websites = {http://hdl.handle.net/2022/20488,http://dl.acm.org/citation.cfm?doid=2792745.2792748}, month = {7}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {6b4836e2-d4c4-3ea5-9bae-dd3e676b1eb0}, created = {2018-02-27T18:07:28.227Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-11T16:09:52.924Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Wu2015a}, source_type = {inproceedings}, private_publication = {false}, abstract = {Today's genomics technologies generate more sequence data than ever before possible, and at substantially lower costs, serving researchers across biological disciplines in transformative ways. Building transcriptome assemblies from RNA sequencing reads is one application of next-generation sequencing (NGS) that has held a central role in biological discovery in both model and non-model organisms, with and without whole genome sequence references. A major limitation in effective building of transcriptome references is no longer the sequencing data generation itself, but the computing infrastructure and expertise needed to assemble, analyze and manage the data. Here we describe a currently available resource dedicated to achieving such goals, and its use for extensive RNA assembly of up to 1.3 billion reads representing the massive transcriptome of loblolly pine, using four major assembly software installations. The Mason cluster, an XSEDE second tier resource at Indiana University, provides the necessary fast CPU cycles, large memory, and high I/O throughput for conducting large-scale genomics research. The National Center for Genome Analysis Support, or NCGAS, provides technical support in using HPC systems, bioinformatic support for determining the appropriate method to analyze a given dataset, and practical assistance in running computations. We demonstrate that a sufficient supercomputing resource and good workflow design are elements that are essential to large eukaryotic genomics and transcriptomics projects such as the complex transcriptome of loblolly pine, gene expression data that inform annotation and functional interpretation of the largest genome sequence reference to date.}, bibtype = {inproceedings}, author = {Wu, Le-Shin and Ganote, Carrie L. and Doak, Thomas G and Barnett, William and Mockaitis, Keithanne and Stewart, Craig A}, doi = {10.1145/2792745.2792748}, booktitle = {Proceedings of the 2015 XSEDE Conference on Scientific Advancements Enabled by Enhanced Cyberinfrastructure - XSEDE '15} }
@article{ title = {SPEC ACCEL: A standard application suite for measuring hardware accelerator performance}, type = {article}, year = {2015}, keywords = {Acceleration; Electric power measurement; Hardware,Benchmarking,Hardware accelerators; Openacc; OpenCL; Performan}, pages = {46-67}, volume = {8966}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84942519551&doi=10.1007%2F978-3-319-17248-4_3&partnerID=40&md5=45e24ea1a1e933c5be9ffb4b9c7a137b}, publisher = {Springer Verlag}, id = {7e6b8911-6d96-308b-a357-32552ee78667}, created = {2018-02-27T18:07:28.696Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:28.696Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Juckeland201546}, source_type = {article}, notes = {cited By 8; Conference of 5th International Workshop on Performance Modeling, Benchmarking, and Simulation of High Performance Computing Systems, PMBS 2014 ; Conference Date: 16 November 2014 Through 16 November 2014; Conference Code:142329}, private_publication = {false}, abstract = {Hybrid nodes with hardware accelerators are becoming very common in systems today. Users often find it difficult to characterize and understand the performance advantage of such accelerators for their applications. The SPEC High Performance Group (HPG) has developed a set of performance metrics to evaluate the performance and power consumption of accelerators for various science applications. The new benchmark comprises two suites of applications written in OpenCL and OpenACC and measures the performance of accelerators with respect to a reference platform. The first set of published results demonstrate the viability and relevance of the new metrics in comparing accelerator performance. This paper discusses the benchmark suites and selected published results in great detail. © Springer International Publishing Switzerland 2015.}, bibtype = {article}, author = {Juckeland, G and Brantley, W and Chandrasekaran, S and Chapman, B and Che, S and Colgrove, M and Feng, H and Grund, A and Henschel, R and Hwu, W.-M.W. and Li, H and Müller, M S and Nagel, W E and Perminov, M and Shelepugin, P and Skadron, K and Stratton, J and Titov, A and Wang, K and Van Waveren, M and Whitney, B and Wienke, S and Xu, R and Kumaran, K}, editor = {Hammond S.D. Jarvis S.A., Wright S A}, doi = {10.1007/978-3-319-17248-4_3}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Cyberinfrastructure Begins at Home}, type = {inproceedings}, year = {2015}, websites = {http://hdl.handle.net/2022/20139}, id = {68c62c35-3b05-39b7-80dc-d9147af7a0db}, created = {2018-02-27T18:07:28.993Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:28.993Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {inproceedings}, private_publication = {false}, abstract = {This presentation at the SPXXL winter workshop looks at cyberinfrastructure at Indiana University. It begins with an extensive look at IU's background and discusses where we are now, how we got here, and where we think we are going. It also covers some generalizations as well as looking at specific services and projects at IU including participation in XSEDE and the Jetstream system.}, bibtype = {inproceedings}, author = {Hancock, D Y and Henschel, R and Kallback-Rose, K and Stewart, C A}, booktitle = {SPXXL Winter Workshop} }
@inproceedings{ title = {Sustained software for cyberinfrastructure: Analyses of successful efforts with a focus on NSF-funded software}, type = {inproceedings}, year = {2015}, pages = {63-72}, websites = {http://dl.acm.org/citation.cfm?doid=2753524.2753533}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {1545514c-e872-3487-86c7-56fa4db90846}, created = {2018-02-27T18:07:29.628Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:58:49.848Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2015g}, private_publication = {false}, abstract = {Reliable software that provides needed functionality is clearly essential for an effective distributed cyberinfrastructure (CI) that supports comprehensive, balanced, and flexible distributed CI. Effective distributed cyberinfrastructure, in turn, supports science and engineering applications. The purpose of this study was to understand what factors lead to software projects being well sustained over the long run, focusing on software created with funding from the US National Science Foundation (NSF) and/or used by researchers funded by the NSF. We surveyed NSF-funded researchers and performed in-depth studies of software projects that have been sustained over many years. Successful projects generally used open-source software licenses and employed good software engineering practices and test practices. However, many projects that have not been well sustained over time also met these criteria. The features that stood out about successful projects included deeply committed leadership and some sort of user forum or conference at least annually. In some cases, software project leaders have employed multiple financial strategies over the course of a decades-old software project. Such well-sustained software is used in major distributed CI projects that support thousands of users, and this software is critical to the operation of major distributed CI facilities in the US. The findings of our study identify some characteristics of software that is relevant to the NSF-supported research community, and that has been sustained over many years.}, bibtype = {inproceedings}, author = {Stewart, Craig A. and Barnett, William K. and Wernert, Eric A. and Wernert, Julie A. and Welch, Von and Knepper, Richard}, doi = {10.1145/2753524.2753533}, booktitle = {Proceedings of the 1st Workshop on The Science of Cyberinfrastructure Research, Experience, Applications and Models - SCREAM '15} }
@inproceedings{ title = {Facilitating scientific collaborations by delegating identity management: Reducing barriers & roadmap for incremental implementation}, type = {inproceedings}, year = {2015}, keywords = {Access control,Cyber security; Delegation; Identity; Identity ma,Risk management}, pages = {15-19}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84979747691&doi=10.1145%2F2752499.2752501&partnerID=40&md5=83a416d313602ca3c75ba6528f549833}, publisher = {Association for Computing Machinery, Inc}, id = {d04e8c7a-eab1-3847-8d16-8aa50be523a3}, created = {2018-02-27T18:07:29.817Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:29.817Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Cowles201515}, source_type = {conference}, notes = {cited By 0; Conference of 2nd Workshop on Changing Landscapes in HPC Security, CLHS 2015 ; Conference Date: 16 June 2015; Conference Code:122511}, private_publication = {false}, abstract = {DOE Labs are often presented with conflicting requirements for providing services to scientific collaboratories. An identity management model involving transitive trust is increasingly common. We show how existing policies allow for increased delegation of identity management within an acceptable risk management framework. Specific topics addressed include deemed exports, DOE orders, Inertia and Risk, Traceability, and Technology Limitations. Real life examples of an incremental approach to implementing transitive trust are presented.}, bibtype = {inproceedings}, author = {Cowles, R and Jackson, C and Welch, V}, doi = {10.1145/2752499.2752501}, booktitle = {CLHS 2015 - Proceedings of the 2015 Workshop on Changing Landscapes in HPC Security, Part of HPDC 2015} }
@techreport{ title = {Use of IU advanced computational systems–parallelism, job mixes, and queue wait times}, type = {techreport}, year = {2015}, id = {e6730e94-0af7-399a-9126-c142b9fbd4bb}, created = {2018-02-27T18:07:30.535Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.443Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Link2015a}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Link, Matthew R and Henschel, Robert and Hancock, David Y and Stewart, Craig A} }
@techreport{ title = {IU PTI/UITS Research Technologies Annual Report: FY 2014}, type = {techreport}, year = {2015}, id = {8971e85d-5b7c-3647-afec-b23d436dd5f8}, created = {2018-02-27T18:07:31.743Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.937Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2015e}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Miller, Therese} }
@inproceedings{ title = {Programmable Immersive Peripheral Environmental System (PIPES): A prototype control system for environmental feedback devices}, type = {inproceedings}, year = {2015}, keywords = {Computer programming; Control systems; Sensory fee,Electronic systems; End to end latencies; Environ,Virtual reality}, volume = {9392}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84928473397&doi=10.1117%2F12.2083410&partnerID=40&md5=2ae847efbe40d8d9e0a6ac4d2ed3f04e}, publisher = {SPIE}, id = {26e816c0-b9fa-3e4e-87e8-ab84f338ae7b}, created = {2018-02-27T18:07:32.133Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:32.133Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Frend2015}, source_type = {conference}, notes = {cited By 0; Conference of Engineering Reality of Virtual Reality 2015 ; Conference Date: 9 February 2015 Through 10 February 2015; Conference Code:111961}, private_publication = {false}, abstract = {This paper describes an environmental feedback device (EFD) control system aimed at simplifying the VR development cycle. Programmable Immersive Peripheral Environmental System (PIPES) affords VR developers a custom approach to programming and controlling EFD behaviors while relaxing the required knowledge and expertise of electronic systems. PIPES has been implemented for the Unity engine and features EFD control using the Arduino integrated development environment. PIPES was installed and tested on two VR systems, a large format CAVE system and an Oculus Rift HMD system. A photocell based end-to-end latency experiment was conducted to measure latency within the system. This work extends previously unpublished prototypes of a similar design. Development and experiments described in this paper are part of the VR community goal to understand and apply environment effects to VEs that ultimately add to users' perceived presence. © 2015 SPIE-IS&T.}, bibtype = {inproceedings}, author = {Frend, C and Boyles, M}, editor = {Dolinsky M., McDowall I E}, doi = {10.1117/12.2083410}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering} }
@article{ title = {Scalability testing of dne2 in lustre 2.7}, type = {article}, year = {2015}, id = {4e062fe8-744c-35b9-a3ba-86dfacc0d016}, created = {2018-02-27T18:07:32.275Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.771Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Crowe2015}, source_type = {JOUR}, private_publication = {false}, bibtype = {article}, author = {Crowe, Tom and Lavender, Nathan and Simms, Stephen}, journal = {Lustre Users Group} }
@techreport{ title = {Results of 2013 Survey of Parallel Computing Needs Focusing on NSF-funded Researchers}, type = {techreport}, year = {2015}, id = {d91512ba-e29c-3fd2-b637-01c842436828}, created = {2018-02-27T18:07:32.630Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.216Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2015f}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Arenson, Andrew and Fischer, Jeremy and Link, Matthew R and Michael, Scott A and Wernert, Julie A} }
@techreport{ title = {Workshop Report: Campus Bridging: Reducing Obstacles on the Path to Big Answers 2015}, type = {techreport}, year = {2015}, websites = {http://hdl.handle.net/2022/20538}, month = {9}, city = {Bloomington, IN}, institution = {Indiana University}, id = {1ad03aae-8aca-34fa-8902-b74d4ae1cf12}, created = {2018-02-27T18:07:33.442Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:33.442Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {techreport}, private_publication = {false}, abstract = {For the researcher whose experiments require large-scale cyberinfrastructure, there exists significant challenges to successful completion. These challenges are broad and go far beyond the simple issue that there are not enough large-scale resources available; these solvable issues range from a lack of documentation written for a non-technical audience to a need for greater consistency with regard to system configuration and consistent software configuration and availability on the large-scale resources at national tier supercomputing centers, with a number of other challenges existing alongside the ones mentioned here. Campus Bridging is a relatively young discipline that aims to mitigate these issues for the academic end-user, for whom the entire process can feel like a path comprised entirely of obstacles. The solutions to these problems must by necessity include multiple approaches, with focus not only on the end user but on the system administrators responsible for supporting these resources as well as the systems themselves. These system resources include not only those at the supercomputing centers but also those that exist at the campus or departmental level and even on the personal computing devices the researcher uses to complete his or her work. This workshop report compiles the results of a half-day workshop, held in conjunction with IEEE Cluster 2015 in Chicago, IL.}, bibtype = {techreport}, author = {Hallock, Barbara and Knepper, Richard and Stewart, Craig A} }
@inproceedings{ title = {XCBC and XNIT - Tools for Cluster Implementation and Management in Research and Training}, type = {inproceedings}, year = {2015}, pages = {857-864}, volume = {2015-Octob}, websites = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7307692}, month = {9}, publisher = {IEEE}, id = {a173ed22-fb2b-301f-a55c-4b791e80a65f}, created = {2018-02-27T18:07:33.740Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T19:07:21.882Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {© 2015 IEEE. The Extreme Science and Engineering Discovery Environment has created a suite of software collectively known as the XSEDE-compatible basic cluster (XCBC). It has been distributed as a Rocks Roll for some time. The same scientific and supporting packages are available as individual RPM packages as the XSEDE National Integration Toolkit (XNIT), so they can be downloaded and installed in portions as appropriate on existing clusters. In this paper, we examine using the LittleFe design created by the Earlham College Cluster Computing Group as a teaching tool to show the deployment of XCBC from Rocks. In addition, the demonstration of the commercial Limulus HPC200 Deskside Cluster solution is shown as a viable, off-the-shelf cluster that can be adapted to become an XSEDE-like cluster through the use of the XNIT repository. The goal is to demonstrate building practical XCBCs while showing that an XCBC need not be an expensive resource to be useful.}, bibtype = {inproceedings}, author = {Fischer, Jeremy and Coulter, Eric and Knepper, Richard and Peck, Charles and Stewart, Craig A.}, doi = {10.1109/CLUSTER.2015.143}, booktitle = {2015 IEEE International Conference on Cluster Computing} }
@inproceedings{ title = {XSEDE value added, cost avoidance, and return on investment}, type = {inproceedings}, year = {2015}, keywords = {Advanced cyberinfrastructure,Cost avoidance,ROI,Return on investment,Value added,XSEDE}, pages = {1-8}, volume = {2015-July}, websites = {http://dl.acm.org/citation.cfm?doid=2792745.2792768}, month = {7}, publisher = {ACM Press}, day = {26}, city = {New York, New York, USA}, id = {2cc70d8b-9586-3a51-bdd8-a797e6b8b31a}, created = {2019-08-29T18:20:53.174Z}, accessed = {2019-08-29}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-06-15T23:18:08.220Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {© 2015 Copyright is held by the owner/author(s). It is difficult for large research facilities to quantify a return on the investments that fund their operations. This is because there can be a time lag of years or decades between an innovation or discovery and the realization of its value through practical application. This report presents a three-part methodology that attempts to assess the value of federal investment in XSEDE: 1) a qualitative examination of the areas where XSEDE adds value to the activities of the open research community, 2) a "thought model" examining the cost avoidance realized by the National Science Foundation (NSF) through the centralization and coordination XSEDE provides, and 3) an assessment of the value XSEDE provides to Service Providers in the XD ecosystem. XSEDE adds significantly to the US research community because it functions as a unified interface to the XD ecosystem and because of its scale. A partly quantitative, partly qualitative analysis suggests the Return on Investment of NSF spending on XSEDE is greater than 1.0, indicating that the aggregate value received by the nation from XSEDE is greater than the cost of direct federal investment in XSEDE.}, bibtype = {inproceedings}, author = {Stewart, Craig A. and Roskies, Ralph and Knepper, Richard and Moore, Richard L. and Whitt, Justin and Cockerill, Timothy M.}, doi = {10.1145/2792745.2792768}, booktitle = {Proceedings of the 2015 XSEDE Conference on Scientific Advancements Enabled by Enhanced Cyberinfrastructure - XSEDE '15} }
@inproceedings{ title = {Jetstream: A Distributed Cloud Infrastructure for Under Resourced Higher Education Communities}, type = {inproceedings}, year = {2015}, keywords = {Atmosphere,Cloud,Cyberinfrastructure,Digital,EOT,EXtreme,Education,Globus,Jetstream,Outreach,Research,Training,XD,XSEDE}, pages = {53-61}, websites = {http://dl.acm.org/citation.cfm?doid=2753524.2753530}, month = {6}, publisher = {ACM Press}, day = {16}, city = {New York, New York, USA}, id = {6d111c4f-74a0-3717-9a67-e29cdb092a5d}, created = {2019-09-12T19:06:54.199Z}, accessed = {2019-09-12}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T19:33:21.147Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Copyright © 2015 ACM. The US National Science Foundation (NSF) in 2015 awarded funding for a first-of-a-kind distributed cyberinfrastructure (DCI) system called Jetstream. Jetstream will be the NSF's first production cloud for general-purpose science and engineering research and education. Jetstream, scheduled for production in January 2016, will be based on the OpenStack cloud environment software with a menu-driven interface to make it easy for users to select a pre-composed Virtual Machine (VM) to perform a particular discipline-specific analysis. Jetstream will use the Atmosphere user interface developed as part of iPlant, providing a low barrier to use by practicing scientists, engineers, educators, and students, and Globus services from the University of Chicago for seamless integration into the national cyberinfrastructure fabric. The team implementing Jetstream has as their primary mission extending the reach of the NSF's eXtreme Digital (XD) program to researchers, educators, and research students who have not previously used NSF XD program resources, including those in communities and at institutions that traditionally lack significant cyberinfrastructure resources. We will, for example, use virtual Linux Desktops to deliver DCI capabilities supporting research and research education at small colleges and universities, including Historically Black Colleges and Universities (HBCUs), Minority Serving Institutions (MSIs), Tribal colleges, and higher education institutions in states designated by the NSF as eligible for funding via the Experimental Program to Stimulate Competitive Research (EPSCoR). Jetstream will be a novel distributed cyberinfrastructure, with production components in Indiana and Texas. In particular, Jetstream will deliver virtual Linux desktops to tablet devices and PDAs with reasonable responsiveness running over cellular networks. This paper will discuss design and application plans for Jetstream as a novel Distributed CyberInfrastructure system for research education.}, bibtype = {inproceedings}, author = {Fischer, Jeremy and Tuecke, Steven and Foster, Ian and Stewart, Craig A.}, doi = {10.1145/2753524.2753530}, booktitle = {Proceedings of the 1st Workshop on The Science of Cyberinfrastructure Research, Experience, Applications and Models - SCREAM '15} }
@inproceedings{ title = {Building a Chemical-Protein Interactome on the Open Science Grid}, type = {inproceedings}, year = {2015}, pages = {15-20}, websites = {https://scitech.isi.edu/wordpress/wp-content/papercite-data/pdf/osg-splinter-2015.pdf}, id = {b2a49618-5bdb-32a7-8ec8-550443efafba}, created = {2020-04-23T05:38:21.881Z}, accessed = {2020-04-22}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-04-23T05:38:21.881Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {The Structural Protein-Ligand Interactome (SPLINTER) project predicts the interaction of thousands of small molecules with thousands of proteins. These interactions are predicted using the three-dimensional structure of the bound complex between each pair of protein and compound that is predicted by molecular docking. These docking runs consist of millions of individual short jobs each lasting only minutes. However, computing resources to execute these jobs (which cumulatively take tens of millions of CPU hours) are not readily or easily available in a cost effective manner. By looking to National Cyberinfrastructure resources, and specifically the Open Science Grid (OSG), we have been able to harness CPU power for researchers at the Indiana University School of Medicine to provide a quick and efficient solution to their unmet computing needs. Using the job submission infrastructure provided by the OSG, the docking data and simulation executable was sent to more than 100 universities and research centers worldwide. These op-portunistic resources provided millions of CPU hours in a matter of days, greatly reducing time docking simulation time for the research group. The overall impact of this approach allows researchers to identify small molecule candidates for individual proteins, or new protein targets for existing FDA-approved drugs and biologically active compounds.}, bibtype = {inproceedings}, author = {Quick, Rob and Hayashi, Soichi and Meroueh, Samy and Rynge, Mats and Teige, Scott and Wang, Bo and Xu, David and Sinica, Academia and Taipei, Taiwan}, doi = {https://doi.org/10.22323/1.239.0024}, booktitle = {International Symposium on Grids and Clouds (ISGC)} }
@inproceedings{ title = {Jetstream: a self-provisioned, scalable science and engineering cloud environment}, type = {inproceedings}, year = {2015}, pages = {1-8}, volume = {2015-July}, websites = {http://dl.acm.org/citation.cfm?doid=2792745.2792774}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {fc61c569-0c39-3f01-9239-65f8b8930a5a}, created = {2020-09-09T19:33:18.761Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T19:33:18.761Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2015b}, private_publication = {false}, abstract = {Copyright © 2015 ACM. Jetstream will be the first production cloud resource supporting general science and engineering research within the XD ecosystem. In this report we describe the motivation for proposing Jetstream, the configuration of the Jetstream system as funded by the NSF, the team that is implementing Jetstream, and the communities we expect to use this new system. Our hope and plan is that Jetstream, which will become available for production use in 2016, will aid thousands of researchers who need modest amounts of computing power interactively. The implementation of Jetstream should increase the size and disciplinary diversity of the US research community that makes use of the resources of the XD ecosystem.}, bibtype = {inproceedings}, author = {Stewart, Craig A. and Cockerill, Timothy M. and Foster, Ian and Hancock, David and Merchant, Nirav and Skidmore, Edwin and Stanzione, Daniel and Taylor, James and Tuecke, Steven and Turner, George and Vaughn, Matthew and Gaffney, Niall I.}, doi = {10.1145/2792745.2792774}, booktitle = {Proceedings of the 2015 XSEDE Conference on Scientific Advancements Enabled by Enhanced Cyberinfrastructure - XSEDE '15} }
@techreport{ title = {Pervasive Technology Institute annual report: Research innovations and advanced cyberinfrastructure services in support of IU strategic goals during FY 2015}, type = {techreport}, year = {2015}, keywords = {CACR,D2I,DSC,NCGAS,PTI,RT,Technical Report,advanced cyberinfrastructure,engagement,outreach,research,storage,students}, websites = {http://hdl.handle.net/2022/20566}, id = {2aa836d7-f26e-3d12-9aa5-12fcd73a86ca}, created = {2020-09-10T14:25:38.222Z}, accessed = {2020-09-10}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T16:51:14.461Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A.; Plale, Beth; Welch, Von; Link, Matthew R.; Miller, Therese; Wernert, Eric A.; Boyles, Michael J.; Fulton, Ben; Hancock, David Y.; Henschel, Robert; Michael, Scott A.; Pierce, Marlon; Ping, Robert J.; Gniady, Tassie; Fox, Geoffrey C.; Mi, Gary;} }
@techreport{ title = {Indiana University’s advanced cyberinfrastructure in service of IU strategic goals: Activities of the Research Technologies Division of UITS and National Center for Genome Analysis Support – two Pervasive Technology Institute cyberinfrastructure and servi}, type = {techreport}, year = {2015}, keywords = {ABITC,Clinical Affairs Schools,IUSM,NCGAS,PTI,advanced cyberinfrastructure,digital collections,engagement,health sciences,research,storage,students}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/19805}, publisher = {Indiana University}, id = {d5f4f918-b36b-3cc1-b35d-60ef145a17fe}, created = {2020-09-10T17:46:57.668Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T17:46:57.668Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A.; and Plale, Beth; and Welch, Von; and Fox, Geoffrey C.; and Link, Matthew R.; and Miller, Therese; and Wernert, Eric A.; and Boyles, Michael J.; and Fulton, Ben; and Hancock, David Y.; Henschel, Robert; and Michael, Scott A.; and Pierce, Marlon; and Ping, Robert J.; and Miksik, Gary; and Gniady, Tassie;} }
@inproceedings{ title = {Leveraging Your Local Resources and National Cyberinfrastructure Resources without Tears}, type = {inproceedings}, year = {2014}, pages = {107-110}, websites = {http://dl.acm.org/citation.cfm?doid=2661172.2661202}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {925f6800-a2a0-3e6a-a1e0-b12063f26c2f}, created = {2018-02-27T18:07:25.257Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:26:33.925Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Compute resources for conducting research inhabit a wide range, including researchers' personal computers, servers in labs, campus clusters and condos, regional resource-sharing models, and national cyberinfrastructure. Researchers agree that there are not enough resources available on a broad scale, and significant barriers exist for getting analyses moved from smaller- to largerscale cyberinfrastructure. The XSEDE Campus Bridging program disseminates several tools that assist researchers and campus IT administrators in reducing barriers to the effective use of national cyberinfrastructure for research. Tools for data management, job submission and steering, best practices for building and administering clusters, and common documentation and training activities all support a flexible environment that allows cyberinfrastructure to be as simple to utilize as a plug-and-play peripheral. In this paper and the accompanying poster we provide an overview of campus bridging, including specific challenges and solutions to the problem of making the computerized parts of research easier. We focus particularly on tools that facilitate management of campus computing clusters and integration of such clusters with the national cyberinfrastructure.}, bibtype = {inproceedings}, author = {Hallock, Barbara and Knepper, Richard and Ferguson, James and Stewart, Craig A.}, doi = {10.1145/2661172.2661202}, booktitle = {Proceedings of the 2014 ACM SIGUCCS Annual Conference on User Services Conference - SIGUCCS '14} }
@article{ title = {OASIS: a data and software distribution service for Open Science Grid}, type = {article}, year = {2014}, websites = {http://hdl.handle.net/2022/17649}, id = {ad983035-4142-3bbf-a8e7-a4074095dc58}, created = {2018-02-27T18:07:25.462Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-19T17:41:41.876Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, source_type = {article}, private_publication = {false}, bibtype = {article}, author = {Bockelman, Brian and Caballero, Jose and De Stefano, J and Hover, John and Quick, Robert E and Teige, Scott}, doi = {10.1088/1742-6596/513/3/032013}, journal = {Journal of Physics: Conference Series 513} }
@inproceedings{ title = {XSEDE Campus Bridging Pilot Case Study}, type = {inproceedings}, year = {2014}, pages = {1-5}, websites = {http://dl.acm.org/citation.cfm?doid=2616498.2616570}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {f3c6d15f-720b-393a-b91c-95dc76cb4f88}, created = {2018-02-27T18:07:25.764Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:26:33.925Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {The major goals of the XSEDE Campus Bridging pilot were to simplify the transition between resources local to the researcher and those at the national scale, as well as those resources intermediary to them; to put in place software and other resources that facilitate diverse researcher workflows; and to begin resolving programming and usability issues with the software selected for these purposes. In this paper, we situate the pilot within the domain of existing research cyberinfrastructure (and in the context of campus bridging) and examine the process by which the pilot program was completed and evaluated. We then present a status update for the selected software packages and explore further advancements to be made in this realm. Copyright 2014 ACM.}, bibtype = {inproceedings}, author = {Hallock, Barbara and Knepper, Richard and Ferguson, James and Stewart, Craig}, doi = {10.1145/2616498.2616570}, booktitle = {Proceedings of the 2014 Annual Conference on Extreme Science and Engineering Discovery Environment - XSEDE '14} }
@article{ title = {Corrigendum: Systems survey of endocytosis by multiparametric image analysis (Nature (2010) 464, (243-249) DOI: 10.1038/nature08779)}, type = {article}, year = {2014}, keywords = {Erratum; error}, pages = {444}, volume = {513}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84926337981&doi=10.1038%2Fnature13676&partnerID=40&md5=0797289f868c57ee6b1bf0c3173abc74}, publisher = {Nature Publishing Group}, id = {89d2c3b4-4eaf-3cc2-a928-27ea60764a2e}, created = {2018-02-27T18:07:25.914Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:25.914Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Collinet2014444}, source_type = {article}, notes = {cited By 0}, private_publication = {false}, bibtype = {article}, author = {Collinet, C and Stöter, M and Bradshaw, C R and Samusik, N and Rink, J C and Kenski, D and Habermann, B and Buchholz, F and Henschel, R and Mueller, M S and Nagel, W E and Fava, E and Kalaidzidis, Y and Zerial, M}, doi = {10.1038/nature13676}, journal = {Nature}, number = {7518} }
@article{ title = {Comparison of multi-sample variant calling methods for whole genome sequencing}, type = {article}, year = {2014}, keywords = {ADNI; GATK; HaplotypeCaller; Multi-samples; Whole,Chromosomes; Neurodegenerative diseases; Neuroimag,Genes}, pages = {59-62}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84920124381&doi=10.1109%2FISB.2014.6990432&partnerID=40&md5=7f2e10dbcf90763fd58afbdecb65be6a}, publisher = {IEEE Computer Society}, id = {256801ef-1a33-39c9-a162-43d043912aa6}, created = {2018-02-27T18:07:26.286Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:26.286Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Nho201459}, source_type = {article}, notes = {cited By 5; Conference of 8th International Conference on Systems Biology, ISB 2014 ; Conference Date: 24 August 2014 Through 27 August 2014; Conference Code:109794}, private_publication = {false}, abstract = {Rapid advancement of next-generation sequencing (NGS) technologies has facilitated the search for genetic susceptibility factors that influence disease risk in the field of human genetics. In particular whole genome sequencing (WGS) has been used to obtain the most comprehensive genetic variation of an individual and perform detailed evaluation of all genetic variation. To this end, sophisticated methods to accurately call high-quality variants and genotypes simultaneously on a cohort of individuals from raw sequence data are required. On chromosome 22 of 818 WGS data from the Alzheimer's Disease Neuroimaging Initiative (ADNI), which is the largest WGS related to a single disease, we compared two multi-sample variant calling methods for the detection of single nucleotide variants (SNVs) and short insertions and deletions (indels) in WGS: (1) reduce the analysis-ready reads (BAM) file to a manageable size by keeping only essential information for variant calling ('REDUCE') and (2) call variants individually on each sample and then perform a joint genotyping analysis of the variant files produced for all samples in a cohort ('JOINT'). JOINT identified 515,210 SNVs and 60,042 indels, while REDUCE identified 358,303 SNVs and 52,855 indels. JOINT identified many more SNVs and indels compared to REDUCE. Both methods had concordance rate of 99.60% for SNVs and 99.06% for indels. For SNVs, evaluation with HumanOmni 2.5M genotyping arrays revealed a concordance rate of 99.68% for JOINT and 99.50% for REDUCE. REDUCE needed more computational time and memory compared to JOINT. Our findings indicate that the multi-sample variant calling method using the JOINT process is a promising strategy for the variant detection, which should facilitate our understanding of the underlying pathogenesis of human diseases. © 2014 IEEE.}, bibtype = {article}, author = {Nho, K and West, J D and Li, H and Henschel, R and Tavares, M C and Bharthur, A and Weiner, M W and Green, R C and Toga, A W and Saykin, A J}, editor = {Wu L.-Y. Wang Y., Chen L Zhang X.-S.}, doi = {10.1109/ISB.2014.6990432}, journal = {International Conference on Systems Biology, ISB} }
@techreport{ title = {Models for Sustainability for Robust Cyberinfrastructure Software-Software Sustainability Survey}, type = {techreport}, year = {2014}, id = {c97b8dda-5563-3964-88bb-4c30fd1c1788}, created = {2018-02-27T18:07:26.318Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.904Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Wernert2014a}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Wernert, Julie and Wernert, Eric and Stewart, Craig} }
@techreport{ title = {IEEE Cluster 2013 Conference final report-hosted by Indiana University Pervasive Technology Institute}, type = {techreport}, year = {2014}, id = {e7c67b4a-702d-3474-a897-d3cf8d887f27}, created = {2018-02-27T18:07:27.221Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.566Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Ping2014b}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Ping, Robert J and Miller, Therese and Wernert, Eric A and Stewart, Craig A} }
@techreport{ title = {Services and Support for the IU School of Medicine and Other Clinical Affairs Schools Provided by the Research Technologies Division of UITS and the Advanced Biomedical Information Technology Core in FY 2013–Condensed Version}, type = {techreport}, year = {2014}, websites = {http://hdl.handle.net/2022/17429}, id = {2c23efa2-b890-38e2-b515-cce738becc61}, created = {2018-02-27T18:07:29.002Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T22:17:45.359Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2014m}, source_type = {RPRT}, private_publication = {false}, abstract = {This is a condensed version of a longer report at http://hdl.handle.net/2022/17216 and presents information on services delivered in FY 2013 by ABITC and RT to the IU School of Medicine and the other Clinical Affairs schools that include the Schools of Nursing, Dentistry, Health and Rehabilitation Sciences, and Optometry; the Fairbanks School of Public Health at IUPUI; the School of Public Health at IU Bloomington; and the School of Social Work.}, bibtype = {techreport}, author = {Stewart, Craig A and Barnett, W K and Link, Matthew R and Shankar, G and Miller, Therese and Michael, S and Henschel, Robert and Boyles, M J and Wernert, Eric and Quick, Rob} }
@inproceedings{ title = {Galaxy based BLAST submission to distributed national high throughput computing resources}, type = {inproceedings}, year = {2014}, keywords = {Bioinformatics; Cost effectiveness; Throughput,Cost-effective solutions; Cyber infrastructures;,Galaxies}, volume = {23-28-Marc}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84976271088&partnerID=40&md5=ea9c8364e53276d64fa78ae4c984171f}, publisher = {Proceedings of Science (PoS)}, id = {798ec5b2-bd86-3a54-952c-2ee693b1dc70}, created = {2018-02-27T18:07:29.225Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:29.225Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Hayashi2014}, source_type = {conference}, notes = {cited By 1; Conference of International Symposium on Grids and Clouds, ISGC 2014 ; Conference Date: 23 March 2014 Through 28 March 2014; Conference Code:121995}, private_publication = {false}, abstract = {To assist the bioinformatic community in leveraging the national cyberinfrastructure, the National Center for Genomic Analysis Support (NCGAS) along with Indiana University's High Throughput Computing (HTC) group have engineered a method to use the Galaxy to submit BLAST jobs to the Open Science Grid (OSG). OSG is a collaboration of resource providers that utilize opportunistic cycles at more than 100 universities and research centers in the US. BLAST jobs make a significant portion of the research conducted on NCGAS resources, moving jobs that are conducive to an HTC environment to the national cyberinfrastructure would alleviate load on resources at NCGAS and provide a cost effective solution for getting more cycles to reduce the unmet needs of bioinformatic researchers. To this point researchers have tackled this issue by purchasing additional resources or enlisting collaborators doing the same type of research while HTC experts have focused on expanding the number of resources available to historically HTC friendly science workflows. In this paper, we bring together expertise from both areas to address how a bioinformatics researcher using their normal interface, Galaxy, can seamlessly access the OSG which routinely supplies researchers with millions of compute hours daily. Efficient use of these results will supply additional compute time to researcher and help provide a yet unmet need for BLAST computing cycles. © Copyright owned by the author(s) under the terms of the Creative Commons Attribution-NonCommercial-ShareAlike Licence.}, bibtype = {inproceedings}, author = {Hayashi, S and Gesing, S and Quick, R and Teige, S and Ganote, C and Wu, L.-S. and Prout, E}, booktitle = {Proceedings of Science} }
@article{ title = {A case study: Holistic performance analysis on heterogeneous architectures using the vampir toolchain}, type = {article}, year = {2014}, pages = {793-802}, volume = {25}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84902282377&doi=10.3233%2F978-1-61499-381-0-793&partnerID=40&md5=79fc3b36d0e75edf5b7e2975868eecc2}, publisher = {IOS Press BV}, id = {da7d8ee8-b728-3284-83a9-d2aebdcfed71}, created = {2018-02-27T18:07:30.191Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:30.191Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Dietrich2014793}, source_type = {article}, notes = {cited By 1}, private_publication = {false}, abstract = {State-of-the-art high performance computing (HPC) applications have to scale over an increasing number of processing elements, meanwhile application developers recently have to face the programming of special accelerator hardware. Although computing languages like CUDA and programming standards like OpenACC provide a fairly easy way to exploit the computational power of general purpose graphics processing units (GPGPUs), their programming is still challenging. Performance analysis is a vital procedure to efficiently use the available hardware and programming models. This paper presents the Vampir performance analysis capabilities by taking the example of a molecular dynamics code, which uses message passing (MPI), threading (OpenMP) and offloading to accelerators (OpenACC and CUDA). It is shown that the Vampir tool-set allows a holistic view on the combined usage of all commonly utilized programming paradigms in heterogeneous HPC applications. © 2014 The authors and IOS Press.}, bibtype = {article}, author = {Dietrich, R and Winkler, F and William, T and Stolle, J and Henschel, R and Berry, D K}, doi = {10.3233/978-1-61499-381-0-793}, journal = {Advances in Parallel Computing} }
@inproceedings{ title = {Visualization on Spherical Displays: Challenges and Opportunities}, type = {inproceedings}, year = {2014}, websites = {http://visap.uic.edu/2014/papers/12_Vega_SphericalDisplays_VISAP2014.pdf}, city = {Paris, France}, id = {4f4b20ea-a75f-3da8-b930-b45aecf25c38}, created = {2018-02-27T18:07:30.285Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:30.285Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {proceedings}, private_publication = {false}, bibtype = {inproceedings}, author = {Vega, Karla and Wernert, Eric and Beard, Patrick and Gniady, Cassandre and Reagan, David and Boyles, Michael and Eller, Chris}, booktitle = {IEEE VIS 2014 Arts Program} }
@inproceedings{ title = {OSG PKI transition: Experiences and lessons learned}, type = {inproceedings}, year = {2014}, keywords = {Authentication; Distributed computer systems; Proj,Certificate Services; Federated identity; Identit,Public key cryptography}, volume = {23-28-Marc}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84976287425&partnerID=40&md5=91fa0ceddc7a53735878e4a650ae9d76}, publisher = {Proceedings of Science (PoS)}, id = {b6e18a43-ad0b-3553-bd36-ffd8f900a898}, created = {2018-02-27T18:07:30.337Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:30.337Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Welch2014}, source_type = {conference}, notes = {cited By 0; Conference of International Symposium on Grids and Clouds, ISGC 2014 ; Conference Date: 23 March 2014 Through 28 March 2014; Conference Code:121995}, private_publication = {false}, abstract = {Over the course of 2012-13 the Open Science Grid (OSG) transitioned the identity management system for its science user community from the DOE Grids public key infrastructure (PKI) to a new OSG PKI. This transition was significant in its scope, touching on nearly all aspects of the OSG infrastructure and community. The transition also entailed the adoption of a commercial certificate service as a key component of OSG's PKI. This transition offers a rare opportunity to better understand identity management and how to prepare for and implement changes in an identity management system. In this paper, we describe OSG's transition and lessons learned from it. We discuss the overall project management approach, including a division of the project into planning, piloting, design, development, implementation and transition phases. We discuss the considered alternatives, both for implementations of the OSG PKI as well as alternatives to a PKI such as federated identity, as well as the criteria we used to make our decision. We conclude with a set of lessons learned from both implementation and in retrospect, and a set of recommendations for other identity systems. © Copyright owned by the author(s) under the terms of the Creative Commons Attribution-NonCommercial-ShareAlike Licence.}, bibtype = {inproceedings}, author = {Welch, V and Deximo, A and Hayashi, S and Khadke, V D and Mathure, R and Quick, R and Altunay, M and Sehgal, C S and Tiradani, A and Basney, J}, booktitle = {Proceedings of Science} }
@article{ title = {The role of the collaboratory in enabling large-scale identity management for HEP}, type = {article}, year = {2014}, keywords = {Collaboratories; Identity management; Motivation,Computation theory; High energy physics,Nuclear physics}, volume = {513}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84903481864&doi=10.1088%2F1742-6596%2F513%2F3%2F032022&partnerID=40&md5=87f93e27f62020498a4aa38580a3bb50}, publisher = {Institute of Physics Publishing}, city = {Amsterdam}, id = {89132f9d-170b-369d-a649-e81238eed134}, created = {2018-02-27T18:07:30.547Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:30.547Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Cowles2014}, source_type = {article}, notes = {cited By 0; Conference of 20th International Conference on Computing in High Energy and Nuclear Physics, CHEP 2013 ; Conference Date: 14 October 2013 Through 18 October 2013; Conference Code:108171}, private_publication = {false}, abstract = {The authors are defining a model that describes and guides existing and future scientific collaboratory identity management implementations. Our ultimate goal is to provide guidance to virtual organizations and resource providers in designing an identity management implementation. Our model is captured in previously published work. Here, we substantially extend our analysis in terms of six motivation factors (user isolation, persistence of user data, complexity of virtual organization roles, cultural and historical inertia, scaling, and incentive for collaboration), observed in interviews with community members involved in identity management, that impact implementation decisions. This analysis is a significant step towards our ultimate goal of providing guidance to virtual organizations.}, bibtype = {article}, author = {Cowles, R and Jackson, C and Welch, V}, doi = {10.1088/1742-6596/513/3/032022}, journal = {Journal of Physics: Conference Series}, number = {TRACK 3} }
@article{ title = {Making campus bridging work for researchers: Can campus bridging experts accelerate discovery?}, type = {article}, year = {2014}, keywords = {Computer applications,Computer networks; Concurrency control,campus bridging; Collaborative support; High perf}, pages = {2141-2148}, volume = {26}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84905092643&doi=10.1002%2Fcpe.3266&partnerID=40&md5=3c681a463b096bb5fe36b7cee22b080e}, publisher = {John Wiley and Sons Ltd}, id = {b6b50a3d-d0a1-30d2-ad5e-83c88b7febea}, created = {2018-02-27T18:07:30.728Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:30.728Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, citation_key = {Michael20142141}, source_type = {article}, notes = {cited By 1}, private_publication = {false}, abstract = {The computational demands of an ever increasing number of scholars at universities and research institutions throughout the country are outgrowing the capacity of desktop workstations. Researchers are turning to high performance computing facilities, both on their campuses and at regional and national centers, to run simulations and analyze data. The Extreme Science and Engineering Discovery Environment (XSEDE) is one of the first places researchers turn to when they outgrow their campus resources. XSEDE machines are far larger (by at least an order of magnitude) than what most universities offer. Transitioning from a campus resource to an XSEDE resource is seldom a trivial task. XSEDE has taken many steps to make this transition easier, including the campus bridging initiative, the Campus Champions program, and the Extended Collaborative Support Service program. In this paper, we present a new facet to the campus bridging initiative in the form of the campus bridging expert, an information technology professional dedicated to aid researchers in transitioning from desktop, to campus, to regional, and to national resources. We outline the current state of affairs and explore how campus bridging experts could provide maximal impact for minimal investment on the part of the organizing body. Copyright © 2014 John Wiley & Sons, Ltd.}, bibtype = {article}, author = {Michael, S and Thota, A and Henschel, R and Knepper, R}, doi = {10.1002/cpe.3266}, journal = {Concurrency Computation Practice and Experience}, number = {13} }
@inproceedings{ title = {ODI-portal, pipeline, and archive (ODI-PPA): A web-based astronomical compute archive, visualization, and analysis service}, type = {inproceedings}, year = {2014}, keywords = {Astronomy; Digital storage; Enterprise resource ma,Compute archive; Distributed systems; HTML5; Java,Data visualization}, volume = {9152}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906920544&doi=10.1117%2F12.2057123&partnerID=40&md5=ef790b848476b3f6a1a194e49cdcf172}, publisher = {SPIE}, city = {Montreal, QC}, id = {20b36b78-bd9c-3a9d-b0d9-5bb6afd27122}, created = {2018-02-27T18:07:31.102Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:31.102Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Gopu2014}, source_type = {conference}, notes = {cited By 4; Conference of Software and Cyberinfrastructure for Astronomy III ; Conference Date: 22 June 2014 Through 26 June 2014; Conference Code:107330}, private_publication = {false}, abstract = {The One Degree Imager-Portal, Pipeline, and Archive (ODI-PPA) is a web science gateway that provides astronomers a modern web interface that acts as a single point of access to their data, and rich computational and visualization capabilities. Its goal is to support scientists in handling complex data sets, and to enhance WIYN Observatory's scientific productivity beyond data acquisition on its 3.5m telescope. ODI-PPA is designed, with periodic user feedback, to be a compute archive that has built-in frameworks including: (1) Collections that allow an astronomer to create logical collations of data products intended for publication, further research, instructional purposes, or to execute data processing tasks (2) Image Explorer and Source Explorer, which together enable real-time interactive visual analysis of massive astronomical data products within an HTML5 capable web browser, and overlaid standard catalog and Source Extractor-generated source markers (3) Workflow framework which enables rapid integration of data processing pipelines on an associated compute cluster and users to request such pipelines to be executed on their data via custom user interfaces. ODI-PPA is made up of several light-weight services connected by a message bus; the web portal built using Twitter/Bootstrap, AngularJS and jQuery JavaScript libraries, and backend services written in PHP (using the Zend framework) and Python; it leverages supercomputing and storage resources at Indiana University. ODI-PPA is designed to be reconfigurable for use in other science domains with large and complex datasets, including an ongoing offshoot project for electron microscopy data. © 2014 SPIE.}, bibtype = {inproceedings}, author = {Gopu, A and Hayashi, S and Young, M D and Harbeck, D R and Boroson, T and Liu, W and Kotulla, R and Shaw, R and Henschel, R and Rajagopal, J and Stobie, E and Knezek, P and Martin, R P and Archbold, K}, doi = {10.1117/12.2057123}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering} }
@inproceedings{ title = {A model for identity management in future scientific collaboratories}, type = {inproceedings}, year = {2014}, keywords = {Collaboratories; e-Science; Extreme scale; Identit}, volume = {23-28-Marc}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84976254441&partnerID=40&md5=6e634aeaa09b2745f880e182e1cf237e}, publisher = {Proceedings of Science (PoS)}, id = {64ecb3da-98be-3185-be8f-73ffdcda7eb6}, created = {2018-02-27T18:07:31.446Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:31.446Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Cowles2014}, source_type = {conference}, notes = {cited By 0; Conference of International Symposium on Grids and Clouds, ISGC 2014 ; Conference Date: 23 March 2014 Through 28 March 2014; Conference Code:121995}, private_publication = {false}, abstract = {Over the past two decades, the virtual organization (VO) has allowed for increasingly large and complex scientific projects spanning multiple organizations and countries. The eXtreme Scale Identity Management (XSIM) project has surveyed a number of these VOs and the resource providers (RPs) that serve them, and built a model expressing the identity management (IdM) implementations supporting these large scientific VOs. The initial model was presented at eScience 2013. This work refines that initial VO-IdM model with XSIM efforts since the original eScience 2013 paper, capturing results from additional interviews and initial applications of the model, and begins to extend the model to include federated IdM environments, portal-based VOs and cloud and exascale RPs. © Copyright owned by the author(s) under the terms of the Creative Commons Attribution-NonCommercial-ShareAlike Licence.}, bibtype = {inproceedings}, author = {Cowles, R and Jackson, C and Welch, V and Cholia, S}, booktitle = {Proceedings of Science} }
@inproceedings{ title = {Methods For Creating XSEDE Compatible Clusters}, type = {inproceedings}, year = {2014}, pages = {1-5}, websites = {http://dl.acm.org/citation.cfm?doid=2616498.2616578}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {4445155a-cb1e-36c7-9000-24f503178b85}, created = {2018-02-27T18:07:31.490Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:26:33.926Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {The Extreme Science and Engineering Discovery Environment has created a suite of software that is collectively known as the basic XSEDE-compatible cluster build. It has been distributed as a Rocks roll for some time. It is now available as individual RPM packages, so that it can be downloaded and installed in portions as appropriate on existing and working clusters. In this paper, we explain the concept of the XSEDE-compatible cluster and explain how to install individual components as RPMs through use of Puppet and the XSEDE compatible cluster YUM repository. Copyright 2014 ACM.}, bibtype = {inproceedings}, author = {Fischer, Jeremy and Knepper, Richard and Standish, Matthew and Stewart, Craig A. and Alvord, Resa and Lifka, David and Hallock, Barbara and Hazlewood, Victor}, doi = {10.1145/2616498.2616578}, booktitle = {Proceedings of the 2014 Annual Conference on Extreme Science and Engineering Discovery Environment - XSEDE '14} }
@inproceedings{ title = {Accelerating Sparse Canonical Correlation Analysis for Large Brain Imaging Genetics Data}, type = {inproceedings}, year = {2014}, keywords = {AVL,PTI,RT,RTV}, websites = {http://dl.acm.org/citation.cfm?id=2616515}, publisher = {ACM}, city = {Atlanta, GA USA}, id = {00756580-b50e-3bc4-9d8a-4cddb219a7bf}, created = {2018-02-27T18:07:31.707Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:31.707Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {inproceedings}, private_publication = {false}, bibtype = {inproceedings}, author = {Yan, Jingwen and Zhang, Hui and Du, Lei and Wernert, Eric A and Saykin, Andrew J and Shen, Li}, booktitle = {Proceedings of the 2014 Annual Conference on Extreme Science and Engineering Discovery Environment} }
@techreport{ title = {Sustainability of cyberinfrastructure software: Community needs, case studies, and success strategies}, type = {techreport}, year = {2014}, id = {5cbeaa43-3c51-3d48-b4dc-87d8cadc4ce8}, created = {2018-02-27T18:07:33.606Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.223Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2014n}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Barnett, William K and Wernert, Eric A and Wernert, Julie A and Welch, Von and Knepper, Richard} }
@inbook{ type = {inbook}, year = {2014}, pages = {6562-6572}, websites = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-4666-5888-2.ch645,http://hdl.handle.net/2022/18608}, publisher = {IGI Global}, city = {Hershey, PA, USA}, id = {17d2b265-79d6-3be1-9ea4-87a95f844415}, created = {2019-08-15T21:03:24.395Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:58:48.983Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, source_type = {CHAP}, private_publication = {false}, abstract = {Computers accelerate our ability to achieve scientific breakthroughs. As technology evolves and new research needs come to light, the role for cyberinfrastructure as “knowledge” infrastructure continues to expand. This article defines and discusses cyberinfrastructure and the related topics of science gateways and campus bridging; identifies future challenges in cyberinfrastructure; and discusses challenges and opportunities related to the evolution of cyberinfrastructure, “big data” (datacentric, data-enabled, and data-intensive research and data analytics), and cloud computing.}, bibtype = {inbook}, author = {Stewart, Craig A and Knepper, Richard and Link, Matthew R and Pierce, Marlon and Wernert, Eric and Wilkins-Diehr, Nancy}, editor = {Mehdi Khosrow-Pour, D B A}, doi = {10.4018/978-1-4666-5888-2.ch645}, chapter = {Cyberinfrastructure, Science Gateways, Campus Bridging, and Cloud Computing}, title = {Encyclopedia of Information Science and Technology, Third Edition} }
@techreport{ title = {XSEDE 12 Conference Final Report}, type = {techreport}, year = {2014}, keywords = {Technical Report}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/18493}, month = {7}, day = {11}, id = {7a0c071a-bc2a-34d5-aff1-53e0f94789ab}, created = {2020-09-10T20:46:58.546Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T20:46:58.546Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A. and Miller, Therese and Blood, Philip and Tillotson, Jenett and Froelich, Warran and DeStefano, Lizanne and Rivera, Lorna} }
@techreport{ title = {2013 annual report on training, education, and outreach activities of the Indiana University Pervasive Technology Institute and affiliated organizations}, type = {techreport}, year = {2014}, keywords = {Technical Report}, websites = {http://hdl.handle.net/2022/17581}, id = {7a073e4f-3c7d-3dbc-9bf2-32973447a68c}, created = {2020-09-10T21:24:49.471Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T21:24:49.471Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {This report summarizes training, education, and outreach activities for calendar 2013 of PTI and affiliated organizations, including the School of Informatics and Computing, Office of the Vice President for Information Technology, and Maurer School of Law. Reported activities include those led by PTI Research Centers (Center for Applied Cybersecurity Research, Center for Research in Extreme Scale Technologies, Data to Insight Center, Digital Science Center) and Service and Cyberinfrastructure Centers (Research Technologies Division of University Information Technology Services, National Center for Genome Assembly Support)}, bibtype = {techreport}, author = {Ping, Robert J and Miller, Therese and Plale, Beth and Stewart, Craig} }
@techreport{ title = {Economic development by the Indiana University Pervasive Technology Institute, Pervasive Technology Labs, and the Research Technologies Division of University Information Technology Services during FY 2012/2013}, type = {techreport}, year = {2014}, keywords = {Technical Report}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/18640}, month = {9}, day = {6}, id = {a456f47a-892e-3575-bcbc-d17481450d70}, created = {2020-09-11T16:53:46.551Z}, accessed = {2020-09-11}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-11T16:53:46.551Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Miller, Therese and Stewart, Craig A.} }
@techreport{ title = {Services and support for IU School of Medicine and Clinical Affairs Schools by the UITS/PTI Advanced Biomedical Information Technology Core and Research Technologies Division in FY 2013-Extended Version}, type = {techreport}, year = {2013}, websites = {http://hdl.handle.net/2022/17216}, publisher = {Indiana University: UITS/PTI ABITC and RT Division}, id = {85d9e22b-f5b8-31e4-ad98-802037200b82}, created = {2018-02-27T18:07:25.170Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T22:17:45.254Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2013p}, source_type = {RPRT}, private_publication = {false}, abstract = {The report presents information on services delivered in FY 2013 by ABITC and RT to the IU School of Medicine and the other Clinical Affairs schools that include the Schools of Nursing, Dentistry, Health and Rehabilitation Sciences, and Optometry; the Fairbanks School of Public Health at IUPUI; the School of Public Health at IU Bloomington; and the School of Social Work.}, bibtype = {techreport}, author = {Stewart, Craig A and Barnett, William K and Link, Matt R and Shankar, Ganesh and Miller, Therese and Michael, Scott and Henschel, Robert and Boyles, Mike J and Wernert, Eric and Quick, Robert} }
@misc{ title = {Visual exploration and analysis of human-robot interaction rules}, type = {misc}, year = {2013}, source = {IS&T/SPIE Electronic Imaging conference}, websites = {http://hdl.handle.net/2022/15307}, city = {Burlingame, CA}, id = {105b2d58-be73-3019-b5e5-cd4241ef853d}, created = {2018-02-27T18:07:26.021Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:26.021Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {misc}, private_publication = {false}, bibtype = {misc}, author = {Zhang, Hui and Boyles, Michael} }
@inproceedings{ title = {Science gateway security recommendations}, type = {inproceedings}, year = {2013}, keywords = {Cluster computing,Research,Research communities; Research data; Research resu}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84893618339&doi=10.1109%2FCLUSTER.2013.6702697&partnerID=40&md5=2e80780339d239a3e2703947890900ea}, city = {Indianapolis, IN}, id = {17dfaa3a-7134-3162-9dc5-8f78cce97c19}, created = {2018-02-27T18:07:27.052Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:27.052Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Basney2013}, source_type = {conference}, notes = {cited By 0; Conference of 15th IEEE International Conference on Cluster Computing, CLUSTER 2013 ; Conference Date: 23 September 2013 Through 27 September 2013; Conference Code:102435}, private_publication = {false}, abstract = {A science gateway is a web portal that provides a convenient interface to data and applications in support of a research community. Standard security concerns apply to science gateways, including confidentiality of pre-publication research data, integrity of research results, and availability of services provided to researchers. In this paper we identify existing science gateway security recommendations and provide our own perspective. © 2013 IEEE.}, bibtype = {inproceedings}, author = {Basney, J and Welch, V}, doi = {10.1109/CLUSTER.2013.6702697}, booktitle = {Proceedings - IEEE International Conference on Cluster Computing, ICCC} }
@techreport{ title = {XSEDE Campus Bridging–Cluster software distribution strategy and tactics}, type = {techreport}, year = {2013}, id = {fee9ab35-d55e-3355-923d-1476fd771bc5}, created = {2018-02-27T18:07:27.483Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.235Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Hazlewood2013a}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Hazlewood, Victor and Knepper, Richard and Lee, Steven and Lifka, David and Navarro, J P and Stewart, Craig A} }
@inproceedings{ title = {Identity management for virtual organizations: An experience-based model}, type = {inproceedings}, year = {2013}, keywords = {Collaboration; Identity management; Security; Tru,Computation theory,Risks}, pages = {278-284}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84893499173&doi=10.1109%2FeScience.2013.47&partnerID=40&md5=5f290ab32369f2caa599bf22f6c9aa1c}, publisher = {IEEE Computer Society}, city = {Beijing}, id = {1eafb101-119f-3f24-9f41-2804ddec2d3e}, created = {2018-02-27T18:07:28.285Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:28.285Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Cowles2013278}, source_type = {conference}, notes = {cited By 0; Conference of 9th IEEE International Conference on e-Science, e-Science 2013 ; Conference Date: 22 October 2013 Through 25 October 2013; Conference Code:102409}, private_publication = {false}, abstract = {In this paper we present our Virtual Organization (VO) Identity Management (IdM) Model, an overview of 14 interviews that informed it, and preliminary analysis of the factors that guide VOs and Resource Providers (RPs) to choose a particular IdM implementation. This model will serve both existing and future VOs and RPs to more effectively understand and implement their IdM relationships. The Virtual Organization has emerged as a fundamental way of structuring modern scientific collaborations and has shaped the computing infrastructure that supports those collaborations. One key aspect of this infrastructure is identity management, and the emergence of VOs introduces challenges regarding how much of the IdM process should be delegated from the RP to the VO. Many different implementation choices have been made; we conducted semi-structured interviews with 14 different VOs or RPs regarding their IdM choices and the bases behind those decisions. We analyzed the interview results to extract common parameters and values, which we used to inform our VO IdM Model. Copyright © 2013 by The Institute of Electrical and Electronics Engineers, Inc.}, bibtype = {inproceedings}, author = {Cowles, R and Jackson, C and Welch, V}, doi = {10.1109/eScience.2013.47}, booktitle = {Proceedings - IEEE 9th International Conference on e-Science, e-Science 2013} }
@inproceedings{ title = {XSEDE-enabled high-throughput lesion activity assessment}, type = {inproceedings}, year = {2013}, keywords = {2013,pti}, pages = {1}, websites = {http://doi.acm.org/10.1145/2484762.2484783,http://dl.acm.org/citation.cfm?doid=2484762.2484783}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {f2284ca1-0435-31fd-a99e-61a3d2fb1a3a}, created = {2018-02-27T18:07:28.300Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-19T17:50:12.157Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {inproceedings}, private_publication = {false}, bibtype = {inproceedings}, author = {Zhang, Hui and Boyles, Michael J and Ruan, Guangchen and Li, Huian and Shen, Hongwei and Ando, Masatoshi}, doi = {10.1145/2484762.2484783}, booktitle = {Proceedings of the Conference on Extreme Science and Engineering Discovery Environment Gateway to Discovery - XSEDE '13} }
@article{ title = {De novo transcript sequence reconstruction from RNA-seq using the Trinity platform for reference generation and analysis}, type = {article}, year = {2013}, keywords = {Base Sequence,Gene Expression Profiling,RNA,Sc,Software,Transcriptome,accuracy,article,clinical protocol,data analys,transcriptome}, pages = {1494-1512}, volume = {8}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84880266648&doi=10.1038%2Fnprot.2013.084&partnerID=40&md5=4b1c5151feb7cf22e24b0aa2a10a5bdb}, series = {NATURE PROTOCOLS}, id = {8c35cf8d-d569-3856-8be3-e5d60be21ffa}, created = {2018-02-27T18:07:28.405Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:28.405Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {hpygbbcellmoopswwwdh14}, source_type = {article}, notes = {<b>From Duplicate 1 (<i>De novo transcript sequence reconstruction from RNA-seq using the Trinity platform for reference generation and analysis</i> - Haas, B J; Papanicolaou, A; Yassour, M; Grabherr, M; Blood, P D; Bowden, J; Couger, M B; Eccles, D; Li, B; Lieber, M; MacManes, M D; Ott, M; Orvis, J; Pochet, N; Strozzi, F; Weeks, N; Westerman, R; William, T; Dewey, C N; Henschel, R; Leduc, R D; Friedman, N; Regev, A; others)<br/></b><br/><b>From Duplicate 2 (<i>De novo transcript sequence reconstruction from RNA-seq using the Trinity platform for reference generation and analysis</i> - Haas, B J; Papanicolaou, A; Yassour, M; Grabherr, M; Blood, P D; Bowden, J; Couger, M B; Eccles, D; Li, B; Lieber, M; Macmanes, M D; Ott, M; Orvis, J; Pochet, N; Strozzi, F; Weeks, N; Westerman, R; William, T; Dewey, C N; Henschel, R; Leduc, R D; Friedman, N; Regev, A)<br/></b><br/>cited By 1139<br/><br/><b>From Duplicate 2 (<i>De novo transcript sequence reconstruction from RNA-seq using the Trinity platform for reference generation and analysis</i> - Haas, B., Papanicolaou, A., Yassour, M., Grabherr, M., Blood, P., Bowden, J., Couger, M., Eccles, D., Li, B., Lieber, M., MacManes, M., Ott, M., Orvis, J., Pochet, N., Strozzi, F., Weeks, N., Westerman, R., William, T., Dewey, C., Henschel, R., et al; Haas, B J; Papanicolaou, A; Yassour, M; Grabherr, M; Blood, P D; Bowden, J; Couger, M B; Eccles, D; Li, B; Lieber, M; Macmanes, M D; Ott, M; Orvis, J; Pochet, N; Strozzi, F; Weeks, N; Westerman, R; William, T; Dewey, C N; Henschel, R; Leduc, R D; Friedman, N; Regev, A; others)<br/></b><br/><b>From Duplicate 2 (<i>De novo transcript sequence reconstruction from RNA-seq using the Trinity platform for reference generation and analysis</i> - Haas, B J; Papanicolaou, A; Yassour, M; Grabherr, M; Blood, P D; Bowden, J; Couger, M B; Eccles, D; Li, B; Lieber, M; Macmanes, M D; Ott, M; Orvis, J; Pochet, N; Strozzi, F; Weeks, N; Westerman, R; William, T; Dewey, C N; Henschel, R; Leduc, R D; Friedman, N; Regev, A)<br/></b><br/>cited By 1139<br/><br/><b>From Duplicate 3 (<i>De novo transcript sequence reconstruction from RNA-seq using the Trinity platform for reference generation and analysis</i> - Haas, B J; Papanicolaou, A; Yassour, M; Grabherr, M; Blood, P D; Bowden, J; Couger, M B; Eccles, D; Li, B; Lieber, M; MacManes, M D; Ott, M; Orvis, J; Pochet, N; Strozzi, F; Weeks, N; Westerman, R; William, T; Dewey, C N; Henschel, R; Leduc, R D; Friedman, N; Regev, A; others)<br/></b><br/><b>From Duplicate 2 (<i>De novo transcript sequence reconstruction from RNA-seq using the Trinity platform for reference generation and analysis</i> - Haas, B J; Papanicolaou, A; Yassour, M; Grabherr, M; Blood, P D; Bowden, J; Couger, M B; Eccles, D; Li, B; Lieber, M; Macmanes, M D; Ott, M; Orvis, J; Pochet, N; Strozzi, F; Weeks, N; Westerman, R; William, T; Dewey, C N; Henschel, R; Leduc, R D; Friedman, N; Regev, A)<br/></b><br/>cited By 1139}, private_publication = {false}, abstract = {De novo assembly of RNA-seq data enables researchers to study transcriptomes without the need for a genome sequence; this approach can be usefully applied, for instance, in research on 'non-model organisms' of ecological and evolutionary importance, cancer samples or the microbiome. In this protocol we describe the use of the Trinity platform for de novo transcriptome assembly from RNA-seq data in non-model organisms. We also present Trinity-supported companion utilities for downstream applications, including RSEM for transcript abundance estimation, R/Bioconductor packages for identifying differentially expressed transcripts across samples and approaches to identify protein-coding genes. In the procedure, we provide a workflow for genome-independent transcriptome analysis leveraging the Trinity platform. The software, documentation and demonstrations are freely available from http://trinityrnaseq.sourceforge.net. The run time of this protocol is highly dependent on the size and complexity of data to be analyzed. The example data set analyzed in the procedure detailed herein can be processed in less than 5 h.}, bibtype = {article}, author = {Haas, B., Papanicolaou, A., Yassour, M., Grabherr, M., Blood, P., Bowden, J., Couger, M., Eccles, D., Li, B., Lieber, M., MacManes, M., Ott, M., Orvis, J., Pochet, N., Strozzi, F., Weeks, N., Westerman, R., William, T., Dewey, C., Henschel, R., et al and Haas, B J and Papanicolaou, A and Yassour, M and Grabherr, M and Blood, P D and Bowden, J and Couger, M B and Eccles, D and Li, B and Lieber, M and Macmanes, M D and Ott, M and Orvis, J and Pochet, N and Strozzi, F and Weeks, N and Westerman, R and William, T and Dewey, C N and Henschel, R and Leduc, R D and Friedman, N and Regev, A and others, undefined}, doi = {10.1038/nprot.2013.084}, journal = {Nature Protocols}, number = {8} }
@techreport{ title = {Usage of Indiana University computation and data cyberinfrastructure in FY 2011/2012 and assessment of future needs}, type = {techreport}, year = {2013}, id = {58b8ce24-d76f-34e0-b045-ca34a54715b4}, created = {2018-02-27T18:07:28.731Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.744Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Link2013}, private_publication = {false}, bibtype = {techreport}, author = {Link, Matthew R and Hancock, David Y and Seiffert, Kurt and Simms, Stephen and Michael, Scott and Stewart, Craig A} }
@techreport{ title = {Discipline Categories (for Cyberinfrastructure) at Indiana University}, type = {techreport}, year = {2013}, id = {4d40c88e-3854-3d87-8147-b05f9271ef3e}, created = {2018-02-27T18:07:28.843Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.722Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2013q}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A} }
@article{ title = {Light curve of CR bootis 1990-2012 from the indiana long-term monitoring program}, type = {article}, year = {2013}, pages = {126-142}, volume = {125}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874240050&doi=10.1086%2F669542&partnerID=40&md5=d77f646e81a8194e5ff1ac389b42d370}, id = {a0741fdf-80c7-366e-b0c2-f8e58de4e2fb}, created = {2018-02-27T18:07:29.181Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:29.181Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Honeycutt2013126}, source_type = {article}, notes = {cited By 7}, private_publication = {false}, abstract = {Two telescopes are used at the Morgan-Monroe Observatory of Indiana University for autonomous long-term photometric monitoring of stellar sources, mostly cataclysmic variable stars. The instrumentation is designed and implemented to be appropriate for multiyear automated monitoring. The capabilities and limitations of the equipment are described, along with accounts of the software, the reduction procedures, the motivations for the scientific programs, and the execution of the observing campaigns. Data on the AM CVn-type cataclysmic variable CR Boo are presented and discussed as an example of the kinds of light curves generated at this facility. The He-rich disk in CR Boo has SU UMa-type outburst behavior, with both superoutbursts and what appear to be dwarf nova outbursts. However, the light curve is quite irregular and displays a wide variety of unusual features such as switching among several superoutburst recurrence intervals, and having intervals of dwarf nova-like outbursts that seem to come and go. We discuss the likelihood that deterministic chaos is responsible for these irregularities. © 2013. The Astronomical Society of the Pacific. All rights reserved.}, bibtype = {article}, author = {Honeycutt, R K and Adams, B R and Turner, G W and Robertson, J W and Ost, E M and Maxwell, J E}, doi = {10.1086/669542}, journal = {Publications of the Astronomical Society of the Pacific}, number = {924} }
@article{ title = {Initial findings from a study of best practices and models for cyberinfrastructure software sustainability}, type = {article}, year = {2013}, id = {b763c038-fcb7-3616-9fdc-46fe346704e5}, created = {2018-02-27T18:07:32.088Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.864Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2013r}, source_type = {JOUR}, private_publication = {false}, bibtype = {article}, author = {Stewart, Craig A and Wernert, Julie and Wernert, Eric A and Barnett, William K and Welch, Von}, journal = {arXiv preprint arXiv:1309.1817} }
@techreport{ title = {Storage Briefing: Trends and IU}, type = {techreport}, year = {2013}, id = {528ad600-38b6-3410-9c21-8835431ec3b1}, created = {2018-02-27T18:07:32.168Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.552Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Floyd2013a}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Floyd, Mike J and Seiffert, Kurt and Stewart, Craig A and Turner, George and Cromwell, Dennis and Hancock, Dave and Kallback-Rose, Kristy and Link, Matthew R and Simms, Steve and Williams, Troy} }
@inproceedings{ title = {Rockhopper, a true HPC system built with cloud concepts}, type = {inproceedings}, year = {2013}, websites = {http://ieeexplore.ieee.org/abstract/document/6702658/}, publisher = {IEEE}, id = {eab68cfe-5f47-38db-9b3a-59f3c42e451c}, created = {2018-02-27T18:07:32.283Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.690Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Knepper2013g}, source_type = {article}, private_publication = {false}, abstract = {A number of services for scientific computing based on cloud resources have recently drawn significant attention in both research and infrastructure provider communities. Most cloud resources currently available lack true high performance characteristics, such as high-speed interconnects or storage. Researchers studying cloud systems have pointed out that many cloud services do not provide service level agreements that may meet the needs of the research community. Furthermore, the lack of location information provided to the user and the shared nature of the systems use may create risk for users of the system, in the instance that their data is moved to an unknown location with an unknown level of security. © 2013 IEEE.}, bibtype = {inproceedings}, author = {Knepper, Richard and Hallock, Barbara and Stewart, Craig A and Link, Matthew R and Jacobs, M.}, doi = {10.1109/CLUSTER.2013.6702658}, booktitle = {Cluster Computing (CLUSTER), 2013 IEEE International Conference on} }
@inproceedings{ title = {Making campus bridging work for researchers: A case study with mlRho}, type = {inproceedings}, year = {2013}, keywords = {Application programs,Bigjob,Employment,Genes,Genetics,High-throughput,Job analysis,MlRho,Optimization,Performa,Re}, pages = {8}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84882383550&doi=10.1145%2F2484762.2484803&partnerID=40&md5=67cc69a3e36848a9909f0d5b80ef338c}, city = {San Diego, CA}, id = {b4381938-7fa7-3789-860a-ba5a2b65b15e}, created = {2018-02-27T18:07:33.065Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-26T19:07:38.566Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Thota2013}, source_type = {conference}, notes = {cited By 0; Conference of Conference on Extreme Science and Engineering Discovery Environment, XSEDE 2013 ; Conference Date: 22 July 2013 Through 25 July 2013; Conference Code:98539}, private_publication = {false}, abstract = {An increasing number of biologists' computational demands have outgrown the capacity of desktop workstations and they are turning to supercomputers to run their simulations and calculations. Many of today's computational problems, however, require larger resource commitments than even individual universities can provide. XSEDE is one of the first places researchers turn to when they outgrow their campus resources. XSEDE machines are far larger (by at least an order of magnitude) than what most universities offer. Transitioning from a campus resource to an XSEDE resource is seldom a trivial task. XSEDE has taken many steps to make this easier, including the Campus Bridging initiative, the Campus Champions program, the Extended Collaborative Support Service (ECSS) [1] program, and through education and outreach. In this paper, our team of biologists and application support analysts (including a Campus Champion) dissect a computationally intensive biology project and share the insights we gain to help strengthen the programs mentioned above. We worked on a project to calculate population mutation and recombination rates of tens of genome profiles using mlRho [2], a serial, open-source, genome analysis code. For the initial investigation, we estimated that we would need 6.3 million service units (SUs) on the Ranger system. Three of the most important places where the biologists needed help in transitioning to XSEDE were (i) preparing the proposal for 6.3 million SUs on XSEDE, (ii) scaling up the existing workow to hundreds of cores and (iii) performance optimization. The Campus Bridging initiative makes all of these tasks easier by providing tools and a consistent software stack across centers. Ideally, Campus Champions are able to provide support on (i), (ii) and (iii), while ECSS staff can assist with (ii) and (iii). But (i), (ii) and (iii) are often not part of a Campus Champion's regular job description. To someone writing an XSEDE proposal for the first time, a link to the guidelines and a few pointers may not always be enough for a successful application. In this paper we describe a new role for a campus bridging expert to play in closing the gaps between existing programs and present mlRho as a case study. © 2013 by the Association for Computing Machinery, Inc.}, bibtype = {inproceedings}, author = {Thota, A and Michael, S and Xu, S and Haubold, B and Doak, T and Henschel, R}, doi = {10.1145/2484762.2484803}, booktitle = {Proceedings of the Conference on Extreme Science and Engineering Discovery Environment: Gateway to Discovery (XSEDE '13)} }
@inproceedings{ title = {Performance evaluation of R with Intel Xeon Phi Coprocessor}, type = {inproceedings}, year = {2013}, keywords = {2013,avl,pti,rt,rtv}, websites = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=6691695&abstractAccess=no&userType=inst}, month = {10}, city = {Silicon Valley, CA, USA}, id = {36310b24-afb4-3483-906b-1dbe4f91042a}, created = {2018-02-27T18:07:33.940Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:33.940Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {proceedings}, private_publication = {false}, bibtype = {inproceedings}, author = {El-Khamra, Y and Gaffney, Neil and Walling, D and Wernert, Eric A and Xu, W and Zhang, Hui}, booktitle = {2013 IEEE International Conference on Big Data (IEEE Big Data 2013)} }
@article{ title = {Performance and quality of service of data and video movement over a 100 Gbps testbed}, type = {article}, year = {2013}, keywords = {High performance computing,Lustre,Networking,Performance analysis}, pages = {230-240}, volume = {29}, websites = {https://linkinghub.elsevier.com/retrieve/pii/S0167739X12001380}, month = {1}, id = {bfa40696-c300-360a-b4aa-46c916d7e08b}, created = {2019-08-29T18:58:49.399Z}, accessed = {2019-08-29}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:22:55.634Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Digital instruments and simulations are creating an ever-increasing amount of data. The need for institutions to acquire these data and transfer them for analysis, visualization, and archiving is growing as well. In parallel, networking technology is evolving, but at a much slower rate than our ability to create and store data. Single fiber 100 Gbps networking solutions have recently been deployed as national infrastructure. This article describes our experiences with data movement and video conferencing across a networking testbed, using the first commercially available single fiber 100 Gbps technology. The testbed is unique in its ability to be configured for a total length of 60, 200, or 400 km, allowing for tests with varying network latency. We performed low-level TCP tests and were able to use more than 99.9% of the theoretical available bandwidth with minimal tuning efforts. We used the Lustre file system to simulate how end users would interact with a remote file system over such a high performance link. We were able to use 94.4% of the theoretical available bandwidth with a standard file system benchmark, essentially saturating the wide area network. Finally, we performed tests with H.323 video conferencing hardware and quality of service (QoS) settings, showing that the link can reliably carry a full high-definition stream. Overall, we demonstrated the practicality of 100 Gbps networking and Lustre as excellent tools for data management. © 2012 Elsevier B.V. All rights reserved.}, bibtype = {article}, author = {Kluge, Michael and Simms, Stephen and William, Thomas and Henschel, Robert and Georgi, Andy and Meyer, Christian and Mueller, Matthias S. and Stewart, Craig A. and Wünsch, Wolfgang and Nagel, Wolfgang E.}, doi = {10.1016/j.future.2012.05.028}, journal = {Future Generation Computer Systems}, number = {1} }
@techreport{ title = {XSEDE Cloud Survey Report}, type = {techreport}, year = {2013}, keywords = {Technical Report}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/25307}, id = {9667fea0-a735-370c-ba4f-47d0eec547db}, created = {2020-09-10T21:54:53.365Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T21:54:53.365Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {A National Science Foundation-sponsored cloud user survey was conducted from September 2012 to April 2013 by the XSEDE Cloud Integration Investigation Team to better understand how cloud is used across a wide variety of scientific fields and the humanities, arts, and social sciences. Data was collected from 80 cloud users from around the globe. The project descriptions in this report illustrate the potential of cloud in accelerating research, enhancing collaboration, and enriching education. Cloud users provided extensive data on core usage, preferred storage, bandwidth, etc. and described cloud benefits and limitations for their specific use cases. Educators, research administrators, CIOs, and research computing practitioners may find value in this data when considering the use and/or deployment of public, private, or hybrid clouds to complement current cyberinfrastructure.}, bibtype = {techreport}, author = {Lifka, David; Foster, Ian; Mehringer, Susan; Parashar, Manish; Redfern, Paul; Stewart, Craig; Tuecke, Steve} }
@techreport{ title = {Final report on accomplishments of a Task Force on Campus Bridging sponsored workshop: Campus Leadership Engagement in Building a Coherent Campus Cyberinfrastructure 2013 Citation: "Final report on accomplishments of a Task Force on Campus Bridging sponso}, type = {techreport}, year = {2013}, keywords = {Technical Report}, websites = {http://hdl.handle.net/2022/15467}, id = {3e483123-435e-3040-8f91-5df0e5f79a5f}, created = {2020-09-10T22:17:43.725Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T22:17:43.725Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {In 2010, the National Science Foundation (NSF) awarded a grant of $49,840 to the University of North Carolina Chapel Hill to organize a workshop on the topic of campus cyberinfrastructure with the title “Campus Bridging Taskforce Sponsored Workshop: Campus Leadership Engagement in Building a Coherent Campus Cyberinfrastructure.” This report discusses the contents of the full workshop report to the NSF as well as the accomplishments and outcomes reported via the NSF’s online reporting system.}, bibtype = {techreport}, author = {Dreher, Patrick and Ahalt, Stanley C and Stewart, Craig A and Pepin, James M and Almes, Guy T and Mundrane, Michael and Dreher, P and Stewart, C A and Pepin, J M and Almes, G T and Mundrane, M and Ahalt, S C} }
@techreport{ title = {Report about the collaboration between UITS/Research Technologies at Indiana University and the Center for Information Services and High Performance Computing at Technische Universität Dresden, Germany (2011-2012)}, type = {techreport}, year = {2013}, keywords = {100Gbps network testbed,Technical Report,collaboration,futuregrid,memorandum of understanding,vampir}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/16670}, id = {aa7c9d61-e99d-3703-807d-3ad22a5edb48}, created = {2020-09-10T23:02:59.550Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T23:02:59.550Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {This report lists the activities and outcomes for July 2011-June 2012 of the collaboration between Research Technologies, a division of University Information Technology Services at Indiana University (IU), and the Center for Information Services and High Performance Computing (ZIH) at Technische Universität Dresden.}, bibtype = {techreport}, author = {Henschel, Robert and Stewart, Craig A and William, Thomas and Nagel, Wolfgang and Henschel, R and Stewart, C A and William, T and Müller, M and Nagel, W "} }
@article{ title = {The event notification and alarm system for the Open Science Grid operations center}, type = {article}, year = {2012}, keywords = {Alarm systems,Automated monitoring; Error condition; Event notif,Nuclear physics}, volume = {396}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84873302521&doi=10.1088%2F1742-6596%2F396%2F3%2F032105&partnerID=40&md5=7b5d9c45549428cd1c7c4fc8e5cdb164}, city = {New York, NY}, id = {51f38d1f-0fd3-3ab8-bc93-57f45ffbabb7}, created = {2018-02-27T18:07:25.542Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:25.542Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Hayashi2012}, source_type = {article}, notes = {cited By 1; Conference of International Conference on Computing in High Energy and Nuclear Physics 2012, CHEP 2012 ; Conference Date: 21 May 2012 Through 25 May 2012; Conference Code:95155}, private_publication = {false}, abstract = {The Open Science Grid Operations (OSG) Team operates a distributed set of services and tools that enable the utilization of the OSG by several HEP projects. Without these services users of the OSG would not be able to run jobs, locate resources, obtain information about the status of systems or generally use the OSG. For this reason these services must be highly available. This paper describes the automated monitoring and notification systems used to diagnose and report problems. Described here are the means used by OSG Operations to monitor systems such as physical facilities, network operations, server health, service availability and software error events. Once detected, an error condition generates a message sent to, for example, Email, SMS, Twitter, an Instant Message Server, etc. The mechanism being developed to integrate these monitoring systems into a prioritized and configurable alarming system is emphasized. © Published under licence by IOP Publishing Ltd.}, bibtype = {article}, author = {Hayashi, S and Teige, S and Quick, R}, doi = {10.1088/1742-6596/396/3/032105}, journal = {Journal of Physics: Conference Series}, number = {PART 3} }
@article{ title = {SPEC OMP2012 - An application benchmark suite for parallel systems using OpenMP}, type = {article}, year = {2012}, keywords = {Application programming interfaces (API),Benchmark,Benchmark suites,Benchmarking,Energy,OpenMP,Parallel appl}, pages = {223-236}, volume = {7312 LNCS}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84862186431&doi=10.1007%2F978-3-642-30961-8_17&partnerID=40&md5=208136d8f3921b3406eff77bf27bb605}, city = {Rome}, id = {53abe3c0-fba5-3478-bc1c-680da6ca2df7}, created = {2018-02-27T18:07:25.960Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:25.960Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Müller2012223}, source_type = {article}, notes = {cited By 17; Conference of 8th International Workshop on OpenMP, IWOMP 2012 ; Conference Date: 11 June 2012 Through 13 June 2012; Conference Code:90014}, private_publication = {false}, abstract = {This paper describes SPEC OMP2012, a benchmark developed by the SPEC High Performance Group. It consists of 15 OpenMP parallel applications from a wide range of fields. In addition to a performance metric based on the run time of the applications the benchmark adds an optional energy metric. The accompanying run rules detail how the benchmarks are executed and the results reported. They also cover the energy measurements. The first set of results provide scalability on three different platforms. © 2012 Springer-Verlag.}, bibtype = {article}, author = {Müller, M S and Baron, J and Brantley, W C and Feng, H and Hackenberg, D and Henschel, R and Jost, G and Molka, D and Parrott, C and Robichaux, J and Shelepugin, P and Van Waveren, M and Whitney, B and Kumaran, K}, doi = {10.1007/978-3-642-30961-8_17}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Demonstrating Lustre over a 100Gbps wide area network of 3,500km}, type = {inproceedings}, year = {2012}, pages = {1-8}, websites = {http://dl.acm.org/citation.cfm?id=2388996.2389005,http://ieeexplore.ieee.org/document/6468445/}, month = {11}, publisher = {IEEE}, city = {Los Alamitos, CA, USA}, series = {SC '12}, id = {fa647ac9-13c4-3a6a-914d-3004e18b1b2c}, created = {2018-02-27T18:07:26.449Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:22:55.637Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, source_type = {inproceedings}, private_publication = {false}, abstract = {As part of the SCinet Research Sandbox at the Supercomputing 2011 conference, Indiana University (IU) demonstrated use of the Lustre high performance parallel file system over a dedicated 100 Gbps wide area network (WAN) spanning more than 3,500 km (2,175 mi). This demonstration functioned as a proof of concept and provided an opportunity to study Lustre's performance over a 100 Gbps WAN. To characterize the performance of the network and file system, low level iperf network tests, file system tests with the IOR benchmark, and a suite of real-world applications reading and writing to the file system were run over a latency of 50.5 ms. In this article we describe the configuration and constraints of the demonstration and outline key findings.}, bibtype = {inproceedings}, author = {Henschel, Robert and Simms, Stephen and Hancock, David and Michael, Scott and Johnson, Tom and Heald, Nathan and William, Thomas and Berry, Donald and Allen, Matt and Knepper, Richard and Davy, Matthew and Link, Matthew and Stewart, Craig A}, doi = {10.1109/SC.2012.43}, booktitle = {2012 International Conference for High Performance Computing, Networking, Storage and Analysis} }
@inproceedings{ title = {Using stereoscopic 3D videos to inform the public about the benefits of computational science}, type = {inproceedings}, year = {2012}, keywords = {3D video; Computational science; outreach; Public,Display devices; Flow visualization; Three dimens,Three dimensional computer graphics}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84865321390&doi=10.1145%2F2335755.2335856&partnerID=40&md5=daeb30506c7d094b778db08ad2011a0d}, city = {Chicago, IL}, id = {46fa72ee-f0dd-3fbb-a0fe-fd9e8fb353c2}, created = {2018-02-27T18:07:26.934Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:26.934Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Boyles2012}, source_type = {conference}, notes = {cited By 0; Conference of 1st Conference of the Extreme Science and Engineering Discovery Environment: Bridging from the eXtreme to the Campus and Beyond, XSEDE12 ; Conference Date: 16 July 2012 Through 19 July 2012; Conference Code:92061}, private_publication = {false}, abstract = {This paper describes an effort to create and disseminate a series of stereoscopic 3D videos that raise awareness about the value of computational science. While the videos target the general population, including the K-12 community, the audience for this paper includes scientific or technical peers who may be interested in sharing or demonstrating their own work more broadly. After outlining the motivation and goals of the project, the authors describe the visual content and computational science behind each of the videos. We then discuss our highly collaborative production workflow that has evolved over the past decade, as well as our distribution mechanisms. We include a summary of the most relevant and appropriate stereoscopic display technologies for the intended audience. Lastly, we analyze and compare this work to other forms of engagement, summarize best practices, and describe potential improvements to future stereoscopic 3D video production. © 2012 ACM.}, bibtype = {inproceedings}, author = {Boyles, M J and Frend, C and William, A M and Eller, C}, doi = {10.1145/2335755.2335856}, booktitle = {ACM International Conference Proceeding Series} }
@inproceedings{ title = {What is campus bridging and what is XSEDE doing about it?}, type = {inproceedings}, year = {2012}, pages = {1}, websites = {http://dl.acm.org/citation.cfm?doid=2335755.2335844}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {1f0cfa3b-2f15-39e0-b66a-3a5bcecf1519}, created = {2018-02-27T18:07:27.915Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:22:56.534Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2012x}, private_publication = {false}, abstract = {The term "campus bridging" was first used in the charge given to an NSF Advisory Committee for Cyberinfrastructure task force. That task force developed this description of campus bridging: "Campus bridging is the seamlessly integrated use of cyberinfrastructure operated by a scientist or engineer with other cyberinfrastructure on the scientist's campus, at other campuses, and at the regional, national, and international levels as if they were proximate to the scientist, and when working within the context of a Virtual Organization (VO) make the 'virtual' aspect of the organization irrelevant (or helpful) to the work of the VO." Campus bridging is more a viewpoint and a set of approaches to usability, software, and information concerns than a particular set of tools or software. We outline here several specific use cases that have been identified as priorities for XSEDE in the next four years. These priorities include documentation, deployment of software used entirely outside of XSEDE, and software that helps bridge from individual researcher to campus to XSEDE cyberinfrastructure. We also describe early pilot tests and means by which the user community may stay informed of campus bridging activities and participate in the implementation of Campus Bridging tools created by XSEDE. Metrics are still being developed, and will include (1) the number of campuses that adopt and use Campus Bridging tools developed by XSEDE and (2) the number of and extent to which XSEDE-developed Campus Bridging tools are adopted among other CI projects. © 2012 ACM.}, bibtype = {inproceedings}, author = {Stewart, Craig A. and Bachmann, Felix and Hazlewood, Victor and Knepper, Richard and Foster, Ian and Ferguson, James and Grimshaw, Andrew and Lifka, David}, doi = {10.1145/2335755.2335844}, booktitle = {Proceedings of the 1st Conference of the Extreme Science and Engineering Discovery Environment on Bridging from the eXtreme to the campus and beyond - XSEDE '12} }
@article{ title = {The benefits and challenges of sharing glidein factory operations across nine time zones between OSG and CMS}, type = {article}, year = {2012}, keywords = {Common operations,Fermilab,Glideins,Indiana Uni,Nuclear physics,Problem solving}, volume = {396}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84873292528&doi=10.1088%2F1742-6596%2F396%2F3%2F032103&partnerID=40&md5=a7f3ed89f9bced18803b2247bbc554fb}, city = {New York, NY}, id = {747bf013-fd7c-3ab9-97a5-4a28a48b4edf}, created = {2018-02-27T18:07:28.668Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:28.668Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Sfiligoi2012}, source_type = {article}, notes = {cited By 0; Conference of International Conference on Computing in High Energy and Nuclear Physics 2012, CHEP 2012 ; Conference Date: 21 May 2012 Through 25 May 2012; Conference Code:95155}, private_publication = {false}, abstract = {OSG has been operating for a few years at UCSD a glideinWMS factory for several scientific communities, including CMS analysis, HCC and GLOW. This setup worked fine, but it had become a single point of failure. OSG thus recently added another instance at Indiana University, serving the same user communities. Similarly, CMS has been operating a glidein factory dedicated to reprocessing activities at Fermilab, with similar results. Recently, CMS decided to host another glidein factory at CERN, to increase the availability of the system, both for analysis, MC and reprocessing jobs. Given the large overlap between this new factory and the three factories in the US, and given that CMS represents a significant fraction of glideins going through the OSG factories, CMS and OSG formed a common operations team that operates all of the above factories. The reasoning behind this arrangement is that most operational issues stem from Grid-related problems, and are very similar for all the factory instances. Solving a problem in one instance thus very often solves the problem for all of them. This paper presents the operational experience of how we address both the social and technical issues of running multiple instances of a glideinWMS factory with operations staff spanning multiple time zones on two continents. © Published under licence by IOP Publishing Ltd.}, bibtype = {article}, author = {Sfiligoi, I and Dost, J M and Zvada, M and Butenas, I and Holzman, B and Wuerthwein, F and Kreuzer, P and Teige, S W and Quick, R and Hernández, J M and Flix, J}, doi = {10.1088/1742-6596/396/3/032103}, journal = {Journal of Physics: Conference Series}, number = {PART 3} }
@inproceedings{ title = {The Lustre File System and 100 Gigabit Wide Area Networking: An Example Case from SC11}, type = {inproceedings}, year = {2012}, pages = {260-267}, websites = {http://ieeexplore.ieee.org/document/6310901/}, month = {6}, publisher = {IEEE}, id = {22798298-2a3e-33e6-8116-675f093ac720}, created = {2018-02-27T18:07:28.972Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:28.972Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {inproceedings}, private_publication = {false}, bibtype = {inproceedings}, author = {Knepper, Richard and Michael, Scott and Johnson, William and Henschel, Robert and Link, Matthew}, doi = {10.1109/NAS.2012.36}, booktitle = {2012 IEEE Seventh International Conference on Networking, Architecture, and Storage} }
@techreport{ title = {Technical Report: Acceptance Test for FutureGrid Cray XT5m at Indiana University (Xray)}, type = {techreport}, year = {2012}, id = {256db36a-73bf-3e9c-8388-cf7f162ddc32}, created = {2018-02-27T18:07:29.236Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.889Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2012y}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Link, Matthew R and McCaulay, Scott and Henschel, Robert and Hancock, David} }
@techreport{ title = {48 Month Program Report}, type = {techreport}, year = {2012}, id = {950a18aa-0209-3984-93cf-78009247a1b1}, created = {2018-02-27T18:07:29.643Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.345Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {McRobbie2012}, source_type = {JOUR}, private_publication = {false}, bibtype = {techreport}, author = {McRobbie, Michael A and Wheeler, Bradley C and Plale, Beth A and Stewart, Craig A} }
@techreport{ title = {Technical Report: Report on Lustre use across an experimental 100Gb network spanning 2,175 mi}, type = {techreport}, year = {2012}, id = {0a56785a-a893-3ea7-a0f7-0e4c85fc412f}, created = {2018-02-27T18:07:29.873Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.556Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Henschel2012g}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Henschel, Robert and Simms, Stephen C and Hancock, David Y and Michael, Scott and Johnson, Tom and Heald, Nathan and William, Thomas and Berry, Donald and Allen, Matt and Knepper, Richard} }
@inproceedings{ title = {Conducting K-12 outreach to evoke early interest in IT, science, and advanced technology}, type = {inproceedings}, year = {2012}, pages = {1}, websites = {http://dl.acm.org/citation.cfm?doid=2335755.2335853,http://hdl.handle.net/2022/14807}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {17e0d245-c9ec-3bbf-a794-73db3f5845c3}, created = {2018-02-27T18:07:29.962Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:22:55.646Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {The Indiana University Pervasive Technology Institute has engaged for several years in K-12 Education, Outreach and Training (EOT) events related to technology in general and computing in particular. In each event we strive to positively influence children's perception of science and technology. We view K-12 EOT as a channel for technical professionals to engage young people in the pursuit of scientific and technical understanding. Our goal is for students to see these subjects as interesting, exciting and worth further pursuit. By providing opportunities for pre-college students to engage in science, technology, engineering and mathematics (STEM) activities first hand, we hope to influence their choices of careers and field-of-study later in life. In this paper we give an account of our experiences with providing EOT: we describe several of our workshops and events; we provide details regarding techniques that we found to be successful in working with both students and instructors; we discuss program costs and logistics; and we describe our plans for the future. © 2012 ACM.}, bibtype = {inproceedings}, author = {Kallback-Rose, Kristy and Seiffert, Kurt and Antolovic, Danko and Miller, Therese and Ping, Robert and Stewart, Craig}, doi = {10.1145/2335755.2335853}, booktitle = {Proceedings of the 1st Conference of the Extreme Science and Engineering Discovery Environment on Bridging from the eXtreme to the campus and beyond - XSEDE '12} }
@techreport{ title = {A common path forward for the immersive visualization community}, type = {techreport}, year = {2012}, publisher = {Idaho National Laboratory (INL)}, id = {325867b3-887c-34ff-a202-f4d92f052894}, created = {2018-02-27T18:07:30.366Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.605Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Wernert2012}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Wernert, Eric A and Sherman, William R and O'Leary, Patrick and Whiting, Eric} }
@article{ title = {Open Science Grid (OSG) ticket synchronization: Keeping your home field advantage in a distributed environment}, type = {article}, year = {2012}, keywords = {Distributed environments; Error prones; Exchange o,Grid computing; Nuclear physics; Ticket issuing m,Synchronization}, volume = {396}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84873307264&doi=10.1088%2F1742-6596%2F396%2F6%2F062009&partnerID=40&md5=dc44b304dd32216e31a7a4494191fa02}, city = {New York, NY}, id = {a143d17e-735d-365c-aeab-4fc4905f6784}, created = {2018-02-27T18:07:30.481Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:30.481Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Gross2012}, source_type = {article}, notes = {cited By 1; Conference of International Conference on Computing in High Energy and Nuclear Physics 2012, CHEP 2012 ; Conference Date: 21 May 2012 Through 25 May 2012; Conference Code:95155}, private_publication = {false}, abstract = {Large distributed computing collaborations, such as the Worldwide LHC Computing Grid (WLCG), face many issues when it comes to providing a working grid environment for their users. One of these is exchanging tickets between various ticketing systems in use by grid collaborations. Ticket systems such as Footprints, RT, Remedy, and ServiceNow all have different schema that must be addressed in order to provide a reliable exchange of information between support entities and users in different grid environments. To combat this problem, OSG Operations has created a ticket synchronization interface called GOC-TX that relies on web services instead of error-prone email parsing methods of the past. Synchronizing tickets between different ticketing systems allows any user or support entity to work on a ticket in their home environment, thus providing a familiar and comfortable place to provide updates without having to learn another ticketing system. The interface is built in a way that it is generic enough that it can be customized for nearly any ticketing system with a web-service interface with only minor changes. This allows us to be flexible and rapidly bring new ticket synchronization online. Synchronization can be triggered by different methods including mail, web services interface, and active messaging. GOC-TX currently interfaces with Global Grid User Support (GGUS) for WLCG, Remedy at Brookhaven National Lab (BNL), and Request Tracker (RT) at the Virtual Data Toolkit (VDT). Work is progressing on the Fermi National Accelerator Laboratory (FNAL) ServiceNow synchronization. This paper will explain the problems faced by OSG and how they led OSG to create and implement this ticket synchronization system along with the technical details that allow synchronization to be preformed at a production level.}, bibtype = {article}, author = {Gross, K and Hayashi, S and Teige, S and Quick, R}, doi = {10.1088/1742-6596/396/6/062009}, journal = {Journal of Physics: Conference Series}, number = {PART 6} }
@techreport{ title = {Xsede campus bridging use cases}, type = {techreport}, year = {2012}, id = {febd89c7-e11b-39c3-91c0-ca5d7c04f5f5}, created = {2018-02-27T18:07:30.862Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.578Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2012z}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Knepper, Richard and Grimshaw, Andrew and Foster, Ian and Bachmann, Felix and Lifka, David and Riedel, Morris and Tueke, Steven} }
@inproceedings{ title = {A large scale evolutionary analysis of the internet autonomous system network}, type = {inproceedings}, year = {2012}, id = {209460a1-4783-3cf7-8962-fb003ffd6469}, created = {2018-02-27T18:07:31.069Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:31.069Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {In this paper we present a large scale longitudinal analysis of the ASN network particularly studying evolution of its size, degree distribution and clustering. The analyzed ASN snapshot encompasses information collected and from a variety of sources over the course of five years. The study reveals several interesting trends about the evolutionary state of the ASN. © 2012 IEEE.}, bibtype = {inproceedings}, author = {Stewart, C. and Khan, J.I.}, doi = {10.1109/ITNG.2012.25}, booktitle = {Proceedings of the 9th International Conference on Information Technology, ITNG 2012} }
@techreport{ title = {Campus Bridging Use Case Quality Attribute Scenarios}, type = {techreport}, year = {2012}, id = {c88273d0-ca4b-307a-bf02-da088060f078}, created = {2018-02-27T18:07:31.212Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.604Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2012ba}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Knepper, Richard and Grimshaw, Andrew and Foster, Ian and Bachmann, Felix and Lifka, David and Riedel, Morris and Tuecke, Steven} }
@techreport{ title = {The IQ-wall and IQ-station--harnessing our collective intelligence to realize the potential of ultra-resolution and immersive visualization}, type = {techreport}, year = {2012}, publisher = {Idaho National Laboratory (INL)}, id = {f2f7330a-79a2-3cdb-b124-beead7c02671}, created = {2018-02-27T18:07:31.471Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.743Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Wernert2012a}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Wernert, Eric A and Sherman, William R and Eller, Chris and Reagan, David and Beard, Patrick D and Whiting, Eric T and O'Leary, Patrick} }
@article{ title = {Study of nuclear decays during a solar eclipse: Thule Greenland 2008}, type = {article}, year = {2012}, volume = {342}, id = {424929d5-c66f-38cd-abab-73759d0eabc7}, created = {2018-02-27T18:07:31.819Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:31.819Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Recent efforts to determine the cause of anomalous experimental nuclear decay fluctuations suggests a possible solar influence. Here we report on the results from several nuclear decay experiments performed at Thule Air Base in Greenland during the solar eclipse on 1 August 2008. Thule was ideal for this experiment due to its proximity to the magnetic north pole which amplified changes in the charged particle flux and provided relatively stabilized conditions for nearly all environmental factors. An exhaustive list of relevant factors were monitored during the eclipse to help rule out possible systematic effects in the event of unexpected results. We included measurements of temperature, pressure, and humidity as well as power supply outputs, neutron count rates, and the Earth's local electric and magnetic fields. Nuclear decay measurements of 14 C, 90 Sr, 99 Tc, 210 Bi, 234 Pa, and 241 Am were made using Geiger-Müller (GM) ionization chambers. Although our data exhibit no evidence for a statistically significant change in the decay rate of any nuclide measured during the 1 August 2008 solar eclipse, small anomalies remain to be understood. © 2012 US Government.}, bibtype = {article}, author = {Javorsek II, D. and Brewer, M.C. and Buncher, J.B. and Fischbach, E. and Gruenwald, J.T. and Heim, J. and Hoft, A.W. and Horan, T.J. and Kerford, J.L. and Kohler, M. and Lau, J.J. and Longman, A. and Mattes, J.J. and Mohsinally, T. and Newport, J.R. and Petrelli, M.A. and Stewart, C.A. and Jenkins, J.H. and Lee, R.H. and Morreale, B. and Morris, D.B. and Mudry, R. and O'Keefe, D. and Terry, B. and Silver, M.A. and Sturrock, P.A.}, doi = {10.1007/s10509-012-1148-9}, journal = {Astrophysics and Space Science}, number = {1} }
@inproceedings{ title = {Reordering virtual reality: Recording and recreating real-time experiences}, type = {inproceedings}, year = {2012}, keywords = {3d stereos; Archive; Dissemination; Interactive a,Data storage equipment; Digital libraries; Informa,Virtual reality}, volume = {8289}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84899055835&doi=10.1117%2F12.912053&partnerID=40&md5=d6833bd4ddac2771f76ab08e523ba9a6}, publisher = {SPIE}, city = {Burlingame, CA}, id = {9ef3c13a-fa7d-3e1a-8946-8287b5b57f01}, created = {2018-02-27T18:07:32.088Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:32.088Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Dolinsky2012}, source_type = {conference}, notes = {cited By 3; Conference of The Engineering Reality of Virtual Reality 2012 ; Conference Date: 24 January 2012 Through 25 January 2012; Conference Code:104653}, private_publication = {false}, abstract = {The proliferation of technological devices and artistic strategies has brought about an urgent and justifiable need to capture site-specific time-based virtual reality experiences. Interactive art experiences are specifically dependent on the orchestration of multiple sources including hardware, software, site-specific location, visitor inputs and 3D stereo and sensory interactions. Although a photograph or video may illustrate a particular component of the work, such as an illustration of the artwork or a sample of the sound, these only represent a fraction of the overall experience. This paper seeks to discuss documentation strategies that combine multiple approaches and capture the interactions between art projection, acting, stage design, sight movement, dialogue and audio design. © 2012 SPIE-IS&T.}, bibtype = {inproceedings}, author = {Dolinsky, M and Sherman, W and Wernert, E and Chi, Y C}, doi = {10.1117/12.912053}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering} }
@inproceedings{ title = {XSEDE12 panel: Security for science gateways and campus bridging}, type = {inproceedings}, year = {2012}, id = {9cbf4878-9ead-3d64-9c11-3266639f91d8}, created = {2018-02-27T18:07:32.901Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:32.901Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {The XSEDE science gateway and campus bridging programs share a mission to expand access to cyberinfrastructure, for scientific communities and campus researchers. Since the TeraGrid science gateway program began in 2003, science gateways have served researchers in a wide range of scientific disciplines, from a stronomy to seismology. In its 2011 report, the NSF ACCI Task Force on Campus Bridging identified the critical need for seamless integration of cyberinfrastructure from the scientist's desktop to the local campus, to other campuses, and to regional, national, and international cyberinfrastructure. To effectively expand access to cyberinfrastructure across communities and campuses, XSEDE must address security challenges in areas such as identity/access management, accounting, risk assessment, and incident response. Interoperable authentication, as provided by the InCommon federation, enables researchers to conveniently sign on to access cyberinfrastructure across campus and across the region/nation/world. Coordinated operational protection and response, as provided by REN-ISAC, maintains the availability and integrity of highly connected cyberinfrastructure. Serving large communities of researchers across many campuses requires security mechanisms, processes, and policies to scale to new levels. This panel will discuss the security challenges introduced by science gateways and campus bridging, potential approaches for addressing these challenges (for example, leveraging InCommon and REN-ISAC), and plans for the future. Panelists will solicit requirements and recommendations from attendees as input to future work. © 2012 Authors.}, bibtype = {inproceedings}, author = {Basney, J. and Butler, R. and Fraser, D. and Marru, S. and Stewart, C.}, doi = {10.1145/2335755.2335863}, booktitle = {ACM International Conference Proceeding Series} }
@inproceedings{ title = {Trinity RNA-Seq assembler performance optimization}, type = {inproceedings}, year = {2012}, keywords = {Application performance,Biological materials,Critical parts,DNA seque,Management,Optimization,RNA}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84865314650&doi=10.1145%2F2335755.2335842&partnerID=40&md5=30fc3625a3985dfa9953003f6cd44c42}, city = {Chicago, IL}, id = {5923c630-d786-3caf-b5ae-cad42d948b40}, created = {2018-02-27T18:07:33.633Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:33.633Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Henschel2012}, source_type = {conference}, notes = {<b>From Duplicate 2 (<i>Trinity RNA-Seq assembler performance optimization</i> - Henschel, R; Nista, P M; Lieber, M; Haas, B J; Wu, L.-S.; Leduc, R D)<br/></b><br/>cited By 0; Conference of 1st Conference of the Extreme Science and Engineering Discovery Environment: Bridging from the eXtreme to the Campus and Beyond, XSEDE12 ; Conference Date: 16 July 2012 Through 19 July 2012; Conference Code:92061}, private_publication = {false}, abstract = {RNA-sequencing is a technique to study RNA expression in biological material. It is quickly gaining popularity in the field of transcriptomics. Trinity is a software tool that was developed for efficient de novo reconstruction of transcriptomes from RNA-Seq data. In this paper we first conduct a performance study of Trinity and compare it to previously published data from 2011. The version from 2011 is much slower than many other de novo assemblers and biologists have thus been forced to choose between quality and speed. We examine the runtime behavior of Trinity as a whole as well as its individual components and then optimize the most performance critical parts. We find that standard best practices for HPC applications can also be applied to Trinity, especially on systems with large amounts of memory. When combining best practices for HPC applications along with our specific performance optimization, we can decrease the runtime of Trinity by a factor of 3.9. This brings the runtime of Trinity in line with other de novo assemblers while maintaining superior quality. The purpose of this paper is to describe a series of improvements to Trinity, quantify the execution improvements achieved, and document the new version of the software. © 2012 ACM.}, bibtype = {inproceedings}, author = {Henschel, R and Nista, P M and Lieber, M and Haas, B J and Wu, L.-S. and Leduc, R D and Henschel, R., Lieber, M., Wu, L-S, Nista, P. M., Haas, B.J., and LeDuc, R.D}, doi = {10.1145/2335755.2335842}, booktitle = {ACM International Conference Proceeding Series} }
@inproceedings{ title = {Exploiting HPC resources for the 3D-time series analysis of caries lesion activity}, type = {inproceedings}, year = {2012}, keywords = {3D models,Activity-based,CT Imag,Caries lesions,Computerized tomography,Content based retrieval,Three dimensional}, pages = {8}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84865327178&doi=10.1145%2F2335755.2335815&partnerID=40&md5=3d17f317032b8023fb6a20dec8aa3163}, city = {Chicago, IL}, id = {4a83fdac-c754-3018-8a07-2efd009c87ce}, created = {2018-02-27T18:07:33.784Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-29T18:55:43.751Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Zhang2012}, source_type = {conference}, notes = {cited By 0; Conference of 1st Conference of the Extreme Science and Engineering Discovery Environment: Bridging from the eXtreme to the Campus and Beyond, XSEDE12 ; Conference Date: 16 July 2012 Through 19 July 2012; Conference Code:92061}, private_publication = {false}, abstract = {We present a research framework to analyze 3D-time series caries lesion activity based on collections of SkyScan® μ-CT images taken at different times during the dynamic caries process. Analyzing caries progression (or reversal) is data-driven and computationally demanding. It involves segmenting high-resolution μ-CT images, constructing 3D models suitable for interactive visualization, and analyzing 3D and 4D (3D + time) dental images. Our development exploits XSEDE's supercomputing, storage, and visualization resources to facilitate the knowledge discovery process. In this paper, we describe the required image processing algorithms and then discuss the parallelization of these methods to utilize XSEDE's high performance computing resources. We then present a workflow for visualization and analysis using ParaView. This workflow enables quantitative analysis as well as three-dimensional comparison of multiple temporal datasets from the longitudinal dental research studies. Such quantitative assessment and visualization can help us to understand and evaluate the underlying processes that arise from dental treatment, and therefore can have significant impact in the clinical decision-making process and caries diagnosis. © 2012 ACM.}, bibtype = {inproceedings}, author = {Zhang, H and Henschel, R and Li, H and Kohara, E K and Boyles, M J and Ando, M}, doi = {10.1145/2335755.2335815}, booktitle = {ACM International Conference Proceeding SeriesProceedings of the 1st Conference of the Extreme Science and Engineering Discovery Environment: Bridging from the eXtreme to the campus and beyond (XSEDE '12)} }
@article{ title = {Distributed Monitoring Infrastructure for Worldwide LHC Computing Grid}, type = {article}, year = {2012}, keywords = {Sim06All-DC,mcc2006pow-n75}, pages = {032002}, volume = {396}, websites = {http://doi.acm.org/10.1145/1188455.1188711,http://dx.doi.org/10.1145/1878335.1878347,http://hdl.handle.net/2022/20488,https://www.scopus.com/inward/record.uri?eid=2-s2.0-51149122771&doi=10.1088%2F1742-6596%2F119%2F5%2F052028&partnerID=40&md5=3b462676f363e}, month = {12}, publisher = {ACM}, day = {13}, city = {New York, NY}, institution = {Indiana University}, id = {5427e4e2-cc79-3353-8a41-e5ab5d576971}, created = {2018-02-27T18:07:35.901Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-26T19:07:38.643Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Pordes2008}, source_type = {RPRT}, notes = {<b>From Duplicate 59 (<i>The open science grid status and architecture</i> - Pordes, R; Petravick, D; Kramer, B; Olson, D; Livny, M; Roy, A; Avery, P; Blackburn, K; Wenaus, T; Würthwein, F; Foster, I; Gardner, R; Wilde, M; Blatecky, A; McGee, J; Quick, R)<br/></b><br/>cited By 4<br/><br/><b>From Duplicate 60 (<i>MyOSG: A user-centric information resource for OSG infrastructure data sources</i> - Gopu, A; Hayashi, S; Quick, R)<br/></b><br/>cited By 0; Conference of 5th Grid Computing Environments Workshop at Supercomputing 2009, GCE09 ; Conference Date: 20 November 2009 Through 20 November 2009; Conference Code:79224<br/><br/><b>From Duplicate 61 (<i>The event notification and alarm system for the Open Science Grid operations center</i> - Hayashi, S; Teige, S; Quick, R)<br/>And Duplicate 69 (<i>Open Science Grid (OSG) ticket synchronization: Keeping your home field advantage in a distributed environment</i> - Gross, K; Hayashi, S; Teige, S; Quick, R)<br/></b><br/>cited By 1; Conference of International Conference on Computing in High Energy and Nuclear Physics 2012, CHEP 2012 ; Conference Date: 21 May 2012 Through 25 May 2012; Conference Code:95155<br/><br/><b>From Duplicate 62 (<i>Galaxy based BLAST submission to distributed national high throughput computing resources</i> - Hayashi, S; Gesing, S; Quick, R; Teige, S; Ganote, C; Wu, L.-S.; Prout, E)<br/></b><br/>cited By 1; Conference of International Symposium on Grids and Clouds, ISGC 2014 ; Conference Date: 23 March 2014 Through 28 March 2014; Conference Code:121995<br/><br/><b>From Duplicate 63 (<i>GOC-TX: A reliable ticket synchronization application for the open science grid</i> - Hayashi, S; Gopu, A; Quick, R)<br/></b><br/>cited By 3; Conference of International Conference on Computing in High Energy and Nuclear Physics, CHEP 2010 ; Conference Date: 18 October 2010 Through 22 October 2010; Conference Code:88870<br/><br/><b>From Duplicate 64 (<i>OSG PKI transition: Experiences and lessons learned</i> - Welch, V; Deximo, A; Hayashi, S; Khadke, V D; Mathure, R; Quick, R; Altunay, M; Sehgal, C S; Tiradani, A; Basney, J)<br/></b><br/>cited By 0; Conference of International Symposium on Grids and Clouds, ISGC 2014 ; Conference Date: 23 March 2014 Through 28 March 2014; Conference Code:121995<br/><br/><b>From Duplicate 65 (<i>New science on the Open Science Grid</i> - Pordes, R; Altunay, M; Avery, P; Bejan, A; Blackburn, K; Blatecky, A; Gardner, R; Kramer, B; Livny, M; McGee, J; Potekhin, M; Quick, R; Olson, D; Roy, A; Sehgal, C; Wenaus, T; Wilde, M; Würthwein, F)<br/></b><br/>cited By 7<br/><br/><b>From Duplicate 66 (<i>The Grid2003 Production Grid: Principles and Practice</i> - Foster, I; Gieraltowski, J; Gose, S; Maltsev, N; May, E; Rodriguez, A; Sulakhe, D; Vaniachine, A; Shank, J; Youssef, S; Adams, D; Baker, R; Deng, W; Smith, J; Yu, D; Legrand, I; Singh, S; Steenberg, C; Xia, Y; Afaq, A; Berman, E; Annis, J; Bauerdick, L A T; Ernst, M; Fisk, I; Giacchetti, L; Graham, G; Heavey, A; Kaiser, J; Kuropatkin, N; Pordes, R; Sekhri, V; Weigand, J; Wu, Y; Baker, K; Sorrillo, L; Huth, J; Allen, M; Grundhoefer, L; Hicks, J; Luehring, F; Peck, S; Quick, R; Simms, S; Fekete, G; van den Berg, J; Cho, K; Kwon, K; Son, D; Park, H; Canon, S; Jackson, K; Konerding, D E; Lee, J; Olson, D; Sakrejda, I; Tierney, B; Green, M; Miller, R; Letts, J; Martin, T; Bury, D; Dumitrescu, C; Engh, D; Gardner, R; Mambelli, M; Smirnov, Y; Voeckler, J; Wilde, M; Zhao, Y; Zhao, X; Avery, P; Cavanaugh, R; Kim, B; Prescott, C; Rodriguez, J; Zahn, A; McKee, S; Jordan, C; Prewett, J; Thomas, T; Severini, H; Clifford, B; Deelman, E; Flon, L; Kesselman, C; Mehta, G; Olomu, N; Vahi, K; De, K; McGuigan, P; Sosebee, M; Bradley, D; Couvares, P; de Smet, A; Kireyev, C; Paulson, E; Roy, A; Koranda, S; Moe, B; Brown, B; Sheldon, P)<br/></b><br/>cited By 55; Conference of Proceedings - 13th IEEE International Symposium on High Performance Distributed Computing ; Conference Date: 4 June 2004 Through 6 June 2004; Conference Code:63640<br/><br/><b>From Duplicate 67 (<i>Quasiperiodic oscillation and possible Second Law violation in a nanosystem</i> - Quick, R; Singharoy, A; Ortoleva, P)<br/></b><br/>cited By 3<br/><br/><b>From Duplicate 68 (<i>The open science grid</i> - Pordes, R; Petravick, D; Kramer, B; Olson, D; Livny, M; Roy, A; Avery, P; Blackburn, K; Wenaus, T; Würthwein, F; Foster, I; Gardner, R; Wilde, M; Blatecky, A; McGee, J; Quick, R)<br/></b><br/>cited By 149<br/><br/><b>From Duplicate 70 (<i>A Science Driven Production Cyberinfrastructure-the Open Science Grid</i> - Altunay, M; Avery, P; Blackburn, K; Bockelman, B; Ernst, M; Fraser, D; Quick, R; Gardner, R; Goasguen, S; Levshina, T; Livny, M; McGee, J; Olson, D; Pordes, R; Potekhin, M; Rana, A; Roy, A; Sehgal, C; Sfiligoi, I; Wuerthwein, F)<br/></b><br/>cited By 21<br/><br/><b>From Duplicate 71 (<i>The benefits and challenges of sharing glidein factory operations across nine time zones between OSG and CMS</i> - Sfiligoi, I; Dost, J M; Zvada, M; Butenas, I; Holzman, B; Wuerthwein, F; Kreuzer, P; Teige, S W; Quick, R; Hernández, J M; Flix, J)<br/></b><br/>cited By 0; Conference of International Conference on Computing in High Energy and Nuclear Physics 2012, CHEP 2012 ; Conference Date: 21 May 2012 Through 25 May 2012; Conference Code:95155<br/><br/><b>From Duplicate 72 (<i>Distributed monitoring infrastructure for worldwide LHC computing grid</i> - Andrade, P; Babik, M; Bhatt, K; Chand, P; Collados, D; Duggal, V; Fuente, P; Hayashi, S; Imamagic, E; Joshi, P; Kalmady, R; Karnani, U; Kumar, V; Lapka, W; Quick, R; Tarragon, J; Teige, S; Triantafyllidis, C)<br/></b><br/>cited By 2; Conference of International Conference on Computing in High Energy and Nuclear Physics 2012, CHEP 2012 ; Conference Date: 21 May 2012 Through 25 May 2012; Conference Code:95155<br/><br/><b>From Duplicate 106 (<i>Acceptance Test for Jetstream Test Cluster — Jetstream-Arizona (JA) Dell PowerEdge Test and Development Cluster</i> - Hancock, D Y; Link, M R; Stewart, C A; Turner, G W)<br/></b><br/>PTI Technical Report - PTI-TR15-007}, private_publication = {false}, abstract = {The Open Science Grid (OSG) is made up of researchers from several scientific domains that contribute hardware and software resources through Virtual Organizations (VOs). These VOs have developed a plethora of tools that have been found useful to members of the OSG. Unfortunately it is hard for everyone in the OSG community to keep up with all the tools, and their accessibility; Similarly OSG support staff and resource administrators have been known to have a hard time debugging an issue related to a reported issue because of the distributed nature of various tools; Additionally, new collaborators have repeatedly complained that most of the tools on the OSG are hard to discover; and even after they discover a tool, the interface is not uniform, and requires them to learn a new interface and its data format. MyOSG addresses these concerns: The primary idea is to use an authoritative source of information about OSG entities as a backbone, and organize data from different tools around this backbone to create a web portal. Further, MyOSG provides the ability for users to export / subscribe to a variety of information in formats such as XML, UWA - an industry standard widget format, iCal - for calendar type information, and others. This enables a user to construct Individual Information Centers (IIC) on tools such as iGoogle, Netvibes, Opera Widgets, and on mobile devices such as iPhone, etc. In summary, MyOSG is a highly customizable web portal that allows vastly different categories of users to access information they find important to their role in a format that is convenient to them. Copyright 2009 ACM.}, bibtype = {article}, author = {Andrade, P and Babik, M and Bhatt, K and Chand, P and Collados, D and Duggal, V and Fuente, P and Hayashi, Soichi and Imamagic, E and Joshi, P and Kalmady, R and Karnani, U and Kumar, V and Lapka, W and Quick, R and Tarragon, J and Teige, S and Triantafyllidis, C}, doi = {10.1088/1742-6596/396/3/032002}, journal = {Journal of Physics: Conference Series}, number = {3} }
@techreport{ title = {Indiana University Pervasive Technology Institute – Research Technologies: XSEDE Service Provider and XSEDE subcontract report (PY1: 1 July 2011 to 30 June 2012)}, type = {techreport}, year = {2012}, websites = {http://hdl.handle.net/2022/14702}, institution = {Indiana University}, id = {eba330d3-b8ef-3c66-b39e-706643470d76}, created = {2019-08-27T19:22:41.103Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-27T19:22:41.103Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, citation_key = {Stewart2012}, private_publication = {false}, abstract = {This document is a summary of the activities of the Research Technologies division of UITS, a Service & Cyberinfrastructure Center affiliated with the Indiana University Pervasive Technology Institute, as part of the eXtreme Science and Engineering Discovery Environment (XSEDE) during XSEDE Program Year 1 (1 July 2011 – 30 June 2012). This document consists of three parts: - Section 2 of this document describes IU’s activities as an XSEDE Service Provider, using the format prescribed by XSEDE for reporting such activities. - Section 3 of this document describes IU’s activities as part of XSEDE management, operations, and support activities funded under a subcontract from the National Center for Supercomputer Applications (NCSA), the lead organization for XSEDE. This section is organized by the XSEDE Work Breakdown Structure (WBS) plan. - Appendix 1 is a summary table of IU’s education, outreach, and training events funded and supported in whole or in part by IU’s subcontract from NCSA as part of XSEDE.}, bibtype = {techreport}, author = {Stewart, C.A. and Miller, T and Hancock, D.Y and Marru, S and Peirce, M and Link, M and Simms, SC and Sieffert, K and Wernert, J and Bolte, J} }
@techreport{ title = {2012 Annual Report - Advanced Biomedical Information Technology Core}, type = {techreport}, year = {2012}, keywords = {Technical Report}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/15229}, id = {92a14ef4-9944-3265-ab2b-b867cd54fca8}, created = {2020-09-10T21:44:59.455Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T21:44:59.455Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, bibtype = {techreport}, author = {Barnett, William K and Shankar, Ganesh and Hancock, David Y and Allen, Matt and Seiffert, Kurt and Boyles, Mike and Rogers, Jeffrey L and Wernert, Eric and Link, Matthew R and Stewart, Craig A and Barnett, W K and Shankar, G and Hancock, D Y and Allen, M and Seiffert, K and Boyles, M and Rogers, J L and Wernert, E and Link, M R and Stewart, C A} }
@techreport{ title = {Information technology in support of research, scholarship, and creative activities A strategic plan for Research Technologies-a division of UITS and a PTI Service and Cyberinfrastructure Center Citation: scholarship, and creative activities: A strategic}, type = {techreport}, year = {2012}, keywords = {Technical Report,research technologies,strategic plan}, websites = {http://hdl.handle.net/2022/14596}, id = {589ff963-04e2-3ce0-a00a-f3d9ad956d59}, created = {2020-09-10T22:23:09.351Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T22:23:09.351Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {IU is currently executing its second information technology strategic plan – Empowering People: Indiana University's Strategic Plan for Information Technology 2009 (hereafter referred to as Empowering People). In this document, we set out long-term goals for the Research Technologies (RT) division of UITS, reaffirm specific goals set for RT for 2019, describe Actions within Empowering People for which RT is responsible, and describe the new internal structure of Research Technologies. The mission of the Research Technologies division of UITS is to develop, deliver, and support advanced technology solutions that improve productivity of and enable new possibilities in research, scholarly endeavors, and creative activity at Indiana University and beyond; and to complement this with education and technology translation activities to improve the quality of life of people in Indiana, the nation, and the world.}, bibtype = {techreport}, author = {Stewart, Craig A and Link, Matthew R and Wernert, Eric and Barnett, William K and Miller, Therese and Stewart, C A and Link, M R and Wernert, E and Barnett, W K} }
@techreport{ title = {Benchmarking an HP DL580 cluster at Indiana University (Mason)}, type = {techreport}, year = {2012}, keywords = {Technical Report,acceptance testing,ncgas}, websites = {http://hdl.handle.net/2022/14078}, id = {a41cf616-2758-3983-bc62-0a19cb1d7d63}, created = {2020-09-10T22:34:12.960Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T22:34:12.960Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Detailed system description and benchmark performance of Mason, an HP DL580 system installed in 2011.}, bibtype = {techreport}, author = {Stewart, Craig A.; and Link, Matthew R.; and Henschel, Robert; and Hancock, David; and Li, Huian;} }
@article{ title = {A Science Driven Production Cyberinfrastructure-the Open Science Grid}, type = {article}, year = {2011}, keywords = {Arts computing; Throughput,Data-intensive computing; Distributed computing; G,Distributed computer systems}, pages = {201-218}, volume = {9}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79955721899&doi=10.1007%2Fs10723-010-9176-6&partnerID=40&md5=b00fc69c326b2a67098e77e565671776}, id = {6dd30ee3-369a-35dd-b642-b3bdefdde050}, created = {2018-02-27T18:07:25.298Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:25.298Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Altunay2011201}, source_type = {article}, notes = {cited By 21}, private_publication = {false}, abstract = {This article describes the Open Science Grid, a large distributed computational infrastructure in the United States which supports many different high-throughput scientific applications, and partners (federates) with other infrastructures nationally and internationally to form multi-domain integrated distributed systems for science. The Open Science Grid consortium not only provides services and software to an increasingly diverse set of scientific communities, but also fosters a collaborative team of practitioners and researchers who use, support and advance the state of the art in large-scale distributed computing. The scale of the infrastructure can be expressed by the daily throughput of around seven hundred thousand jobs, just under a million hours of computing, a million file transfers, and half a petabyte of data movement. In this paper we introduce and reflect on some of the OSG capabilities, usage and activities. © 2010 Springer Science+Business Media B.V. (outside the USA).}, bibtype = {article}, author = {Altunay, M and Avery, P and Blackburn, K and Bockelman, B and Ernst, M and Fraser, D and Quick, R and Gardner, R and Goasguen, S and Levshina, T and Livny, M and McGee, J and Olson, D and Pordes, R and Potekhin, M and Rana, A and Roy, A and Sehgal, C and Sfiligoi, I and Wuerthwein, F}, doi = {10.1007/s10723-010-9176-6}, journal = {Journal of Grid Computing}, number = {2} }
@techreport{ title = {Publications, presentations, and news pertaining to National Science Foundation grant number 0521433–MRI: Acquisition of a High-Speed, High Capacity Storage System to Support Scientific Computing: The Data Capacitor}, type = {techreport}, year = {2011}, id = {3ee22a6a-294e-34e7-b324-c08692462d75}, created = {2018-02-27T18:07:25.983Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.059Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2011t}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Simms, Stephen C} }
@inproceedings{ title = {A roadmap for using NSF cyberinfrastructure with InCommon}, type = {inproceedings}, year = {2011}, pages = {1}, websites = {http://dl.acm.org/citation.cfm?doid=2016741.2016771}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {39c240bf-61aa-3fcf-ac89-d4284e0a96e0}, created = {2018-02-27T18:07:26.243Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-11T16:36:20.624Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {The "Roadmap for Using NSF Cyberinfrastructure with InCommon" provides an in-depth discussion of benefits, challenges and practices for using the InCommon identity federation with National Science Foundation (NSF) cyberinfrastructure. In this abstract, we provide a summary of the Roadmap, the complete version of which can be found online [1]. © 2011 Authors.}, bibtype = {inproceedings}, author = {Welch, Von and Walsh, Alan and Barnett, William and Stewart, Craig A.}, doi = {10.1145/2016741.2016771}, booktitle = {Proceedings of the 2011 TeraGrid Conference on Extreme Digital Discovery - TG '11} }
@techreport{ title = {A Roadmap for Using NSF Cyberinfrastructure with InCommon: Abbreviated Version}, type = {techreport}, year = {2011}, id = {ec7c5a5b-491c-3288-9ed8-28b93161bd75}, created = {2018-02-27T18:07:27.157Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.714Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Barnett2011c}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Barnett, William and Welch, Von and Walsh, Alan and Stewart, Craig A} }
@techreport{ title = {Indiana University's Advanced Cyberinfrastructure}, type = {techreport}, year = {2011}, id = {d74087a7-f50d-3776-8a7d-8080cd6ecda5}, created = {2018-02-27T18:07:27.381Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-11T16:53:48.232Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Arenson2011}, source_type = {RPRT}, private_publication = {false}, abstract = {The purpose of this document is to introduce researchers to Indiana University’s cyberinfrastructure – to clarify what these facilities make possible, to discuss how to use them and the professional staff available to work with you. The resources described here are complex and varied, among the most advanced in the world. The intended audience is anyone unfamiliar with IU’s cyberinfrastructure.}, bibtype = {techreport}, author = {Arenson, Andrew and Boyles, Michael and Cruise, Robert and Gopu, Arvind and Hart, David and Lindenlaub, Peg and Papakhian, Mary and Samuel, John and Seiffert, Kurt and Shankar, Anurag} }
@inproceedings{ title = {Electronic poster: Performance studies of a molecular dynamics code}, type = {inproceedings}, year = {2011}, keywords = {Application programming interfaces (API); Stars,Code analysis; Configurable; Dresdens; Electronic,Scalability}, pages = {105-106}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84859019048&doi=10.1145%2F2148600.2148654&partnerID=40&md5=43c9fa0c4131be323c148c2a14186a2d}, city = {Seattle, WA}, id = {210dbd9b-22c8-38b6-b495-0f46ddd17bda}, created = {2018-02-27T18:07:27.410Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:27.410Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {William2011105}, source_type = {conference}, notes = {cited By 0; Conference of 2011 High Performance Computing Networking, Storage and Analysis, SC'11, Co-located with SC'11 ; Conference Date: 12 November 2011 Through 18 November 2011; Conference Code:89149}, private_publication = {false}, abstract = {A molecular dynamics code simulating the diffusion in dense nuclear matter in white dwarf stars is analyzed in this collaboration between PTI (Indiana University) and ZIH (Technische Universität Dresden). The code is highly configurable allowing MPI, OpenMP, or hybrid runs and additional fine-tuning with a range of parameters. The first step in the code analysis is to identify the best performing parameter set of the serial version. This configuration represents the most promising candidate for further studies. Aim of the parallel analysis is then to measure the scalability limits of the different parallel code implementations and to detect bottlenecks possibly preventing higher parallel efficiency. This work has been done with the parallel analysis framework Vampir.}, bibtype = {inproceedings}, author = {William, T and Berry, D K and Henschel, R}, doi = {10.1145/2148600.2148654}, booktitle = {SC'11 - Proceedings of the 2011 High Performance Computing Networking, Storage and Analysis Companion, Co-located with SC'11} }
@inproceedings{ title = {The shape of the TeraGrid: Analysis of TeraGrid users and projects as an affiliation network}, type = {inproceedings}, year = {2011}, keywords = {Centralized networks; Competition and cooperation;,Electric network analysis}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-80052320504&doi=10.1145%2F2016741.2016799&partnerID=40&md5=6d956e6bc834b73b986d75768b8df1b3}, city = {Salt Lake City, UT}, id = {4f5d5a28-7d0d-38a9-80cb-e646a9279110}, created = {2018-02-27T18:07:27.746Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:27.746Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Knepper2011}, source_type = {conference}, notes = {cited By 2; Conference of TeraGrid 2011 Conference: Extreme Digital Discovery, TG'11 ; Conference Date: 18 July 2011 Through 21 July 2011; Conference Code:86285}, private_publication = {false}, abstract = {I examine the makeup of the users and projects of the TeraGrid using social network analysis techniques. Analyzing the TeraGrid as an affiliation (two-mode) network allows for understanding the relationship between types of users and field of science and allocation size of projects. The TeraGrid data shows that while less than half of TeraGrid users are involved in projects that are connected to each other, a considerable core of the TeraGrid emerges that constitutes the most-commonly-related projects. The largest complete subgraph of TeraGrid users and projects constitutes a more dense and more centralized network core of TeraGrid users. I perform social network analysis on the largest complete subgraph in order to identify additional groupings of projects and users within the TeraGrid. This analysis of users and projects provides substantive information about the connections of individual scientists, projects groups, and fields of science in a large-scale environment that incorporates both competition and cooperation between actors. © 2011 Author.}, bibtype = {inproceedings}, author = {Knepper, R}, doi = {10.1145/2016741.2016799}, booktitle = {Proceedings of the TeraGrid 2011 Conference: Extreme Digital Discovery, TG'11} }
@techreport{ title = {Technical Report: Survey of cyberinfrastructure needs and interests of NSF-funded principal investigators}, type = {techreport}, year = {2011}, id = {0366026c-9153-3630-9044-e5e915cd2a3b}, created = {2018-02-27T18:07:28.733Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:11.868Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2011s}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Katz, Daniel S and Hart, David L and Lantrip, Dale and McCaulay, D Scott and Moore, Richard L} }
@inproceedings{ title = {Cyberinfrastructure Usage Modalities on the TeraGrid}, type = {inproceedings}, year = {2011}, keywords = {Component,Cyber infrastructures,Distributed parameter networks,Production grid}, pages = {932-939}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-83455229513&doi=10.1109%2FIPDPS.2011.239&partnerID=40&md5=1e102f58325dedaab0719725dcafbc9a,http://ieeexplore.ieee.org/document/6008940/}, month = {5}, publisher = {IEEE}, city = {Anchorage, AK}, id = {85d5f271-318f-3975-a733-5e772ff5c884}, created = {2018-02-27T18:07:29.528Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T18:48:09.611Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Katz2011932}, source_type = {conference}, notes = {cited By 7; Conference of 25th IEEE International Parallel and Distributed Processing Symposium, Workshops and Phd Forum, IPDPSW 2011 ; Conference Date: 16 May 2011 Through 20 May 2011; Conference Code:87731}, private_publication = {false}, abstract = {This paper is intended to explain how the TeraGrid would like to be able to measure "usage modalities." We would like to (and are beginning to) measure these modalities to understand what objectives our users are pursuing, how they go about achieving them, and why, so that we can make changes in the TeraGrid to better support them. © 2011 IEEE.}, bibtype = {inproceedings}, author = {Katz, Daniel S and Hart, David and Jordan, Chris and Majumdar, Amit and Navarro, J.P. and Smith, Warren and Towns, John and Welch, Von and Wilkins-Diehr, Nancy}, doi = {10.1109/IPDPS.2011.239}, booktitle = {Proceedings of the 2011 IEEE International Symposium on Parallel and Distributed Processing Workshops and PhD Forum (IPDPSW '11)} }
@techreport{ title = {Campus Bridging: Data and Networking Issues Workshop Report}, type = {techreport}, year = {2011}, id = {2c51ba8c-a431-3e91-bf04-89ee26c5f632}, created = {2018-02-27T18:07:29.604Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.013Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Almes2011a}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Almes, Guy T and Jent, David and Stewart, Craig A} }
@article{ title = {HMMerthread: Detecting remote, functional conserved domains in entire genomes by combining relaxed sequence-database searches with fold recognition}, type = {article}, year = {2011}, keywords = {A kinase anchor protein 10; adaptor protein; prote,Algorithms; Amino Acid Sequence; Animals; Cell Di,Protein; Disease; Genome; Humans; Molecular Seque,Protein; Software,Secondary; Protein Structure,Tertiary; Proteome; Sequence Alignment; Sequence,accuracy; algorithm; article; carboxy terminal se}, volume = {6}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79952521511&doi=10.1371%2Fjournal.pone.0017568&partnerID=40&md5=3d7d36d8e9f6cb0ff30f854430ada610}, id = {cfe26a3d-1320-3897-b89f-8994684e685f}, created = {2018-02-27T18:07:29.703Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:29.703Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Bradshaw2011}, source_type = {article}, notes = {cited By 1}, private_publication = {false}, abstract = {Conserved domains in proteins are one of the major sources of functional information for experimental design and genome-level annotation. Though search tools for conserved domain databases such as Hidden Markov Models (HMMs) are sensitive in detecting conserved domains in proteins when they share sufficient sequence similarity, they tend to miss more divergent family members, as they lack a reliable statistical framework for the detection of low sequence similarity. We have developed a greatly improved HMMerThread algorithm that can detect remotely conserved domains in highly divergent sequences. HMMerThread combines relaxed conserved domain searches with fold recognition to eliminate false positive, sequence-based identifications. With an accuracy of 90%, our software is able to automatically predict highly divergent members of conserved domain families with an associated 3-dimensional structure. We give additional confidence to our predictions by validation across species. We have run HMMerThread searches on eight proteomes including human and present a rich resource of remotely conserved domains, which adds significantly to the functional annotation of entire proteomes. We find ~4500 cross-species validated, remotely conserved domain predictions in the human proteome alone. As an example, we find a DNA-binding domain in the C-terminal part of the A-kinase anchor protein 10 (AKAP10), a PKA adaptor that has been implicated in cardiac arrhythmias and premature cardiac death, which upon stress likely translocates from mitochondria to the nucleus/nucleolus. Based on our prediction, we propose that with this HLH-domain, AKAP10 is involved in the transcriptional control of stress response. Further remotely conserved domains we discuss are examples from areas such as sporulation, chromosome segregation and signalling during immune response. The HMMerThread algorithm is able to automatically detect the presence of remotely conserved domains in proteins based on weak sequence similarity. Our predictions open up new avenues for biological and medical studies. Genome-wide HMMerThread domains are available at http://vm1-hmmerthread.age.mpg.de. © 2011 Bradshaw et al.}, bibtype = {article}, author = {Bradshaw, C R and Surendranath, V and Henschel, R and Mueller, M S and Habermann, B H}, doi = {10.1371/journal.pone.0017568}, journal = {PLoS ONE}, number = {3} }
@techreport{ title = {Technical Report: Acceptance Test for FutureGrid IBM iDataPlex at Indiana University (India)}, type = {techreport}, year = {2011}, id = {8f6c4174-3b0c-3d7d-ab00-b121e6ef9b6b}, created = {2018-02-27T18:07:29.744Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.222Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Henschel2011}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Henschel, Robert and Stewart, Craig A and Link, Matthew R and McCaulay, D Scott and Hancock, David Y} }
@techreport{ title = {Economic development by the Indiana University Pervasive Technology Institute, Pervasive Technology Labs, and the Research Technologies Division of University Information Technology Services September 1999–June 2011: a public report}, type = {techreport}, year = {2011}, id = {920b53a1-0c18-3ece-bd5a-b6b8b7af4da2}, created = {2018-02-27T18:07:30.779Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.845Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2011u}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Miller, Therese} }
@article{ title = {GOC-TX: A reliable ticket synchronization application for the open science grid}, type = {article}, year = {2011}, keywords = {Algorithms; Electronic mail; Grid computing; High,Collaboration; Configurable; Fermilab; Grid users;,Open systems}, volume = {331}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84858144152&doi=10.1088%2F1742-6596%2F331%2F8%2F082013&partnerID=40&md5=d659728d05615d329dbe863bda2dd1b6}, city = {Taipei}, id = {77b6a685-8683-31f8-9ee6-49bd5f8cfd4d}, created = {2018-02-27T18:07:31.328Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:31.328Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Hayashi2011}, source_type = {article}, notes = {cited By 3; Conference of International Conference on Computing in High Energy and Nuclear Physics, CHEP 2010 ; Conference Date: 18 October 2010 Through 22 October 2010; Conference Code:88870}, private_publication = {false}, abstract = {One of the major operational issues faced by large multi-institutional collaborations is permitting its users and support staff to use their native ticket tracking environment while also exchanging these tickets with collaborators. After several failed attempts at email-parser based ticket exchanges, the OSG Operations Group has designed a comprehensive ticket synchronizing application. The GOC-TX application uses web-service interfaces offered by various commercial, open source and other homegrown ticketing systems, to synchronize tickets between two or more of these systems. GOC-TX operates independently from any ticketing system. It can be triggered by one ticketing system via email, active messaging, or a web-services call to check for current sync-status, pull applicable recent updates since prior synchronizations to the source ticket, and apply the updates to a destination ticket. The currently deployed production version of GOC-TX is able to synchronize tickets between the Numara Footprints ticketing system used by the OSG and the following systems: European Grid Initiative's system Global Grid User Support (GGUS) and the Request Tracker (RT) system used by Brookhaven. Additional interfaces to the BMC Remedy system used by Fermilab, and to other instances of RT used by other OSG partners, are expected to be completed in summer 2010. A fully configurable open source version is expected to be made available by early autumn 2010. This paper will cover the structure of the GOC-TX application, its evolution, and the problems encountered by OSG Operations group with ticket exchange within the OSG Collaboration.}, bibtype = {article}, author = {Hayashi, S and Gopu, A and Quick, R}, doi = {10.1088/1742-6596/331/8/082013}, journal = {Journal of Physics: Conference Series}, number = {PART 8} }
@techreport{ title = {Campus Bridging: Campus Leadership Engagement in Building a Coherent Campus Cyberinfrastructure Workshop Report}, type = {techreport}, year = {2011}, id = {8c594b9d-1cdf-3e21-80fe-32dece04d2cb}, created = {2018-02-27T18:07:31.808Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.358Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Dreher2011c}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Dreher, Patrick and Ahalt, Stan and Almes, Guy and Mundrane, Michael and Pepin, James and Stewart, Craig A} }
@techreport{ title = {Technical Report: TeraGrid eXtreme Digital Campus Cyberinfrastructure and Campus Bridging Requirements Elicitation Meeting}, type = {techreport}, year = {2011}, id = {9af9d6e3-3f0e-3f14-97e8-3a62af999309}, created = {2018-02-27T18:07:32.353Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.671Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2011v}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and McCaulay, D Scott and Jul, Susanne and Moore, Richard L} }
@techreport{ title = {Campus Bridging Birds-of-a-Feather Session at TeraGrid 2011 Conference}, type = {techreport}, year = {2011}, id = {916371f9-5461-326e-9a62-6bda1df4bfe8}, created = {2018-02-27T18:07:32.545Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.207Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Knepper2011d}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Knepper, Richard and Stewart, Craig A} }
@inproceedings{ title = {Incorporating an advanced maintenance strategy improves equipment reliability and reduces cement plant costs}, type = {inproceedings}, year = {2011}, id = {a1b221cd-f949-302d-9e4c-57687021a88c}, created = {2018-02-27T18:07:32.939Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:32.939Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Deployment of a proactive maintenance approach and predictive maintenance technology with existing preventive and corrective maintenance actions has improved reliability of assets and stabilized maintenance costs at a 58 year old cement production facility. This advanced maintenance strategy with identification of a bad actors list was adopted for crucial cement plant clinker burning and milling operations to reduce failure rates and increase equipment production time. The Bad Actors List led the plant to a more pronounced reliability centered predictive maintenance program enabled through the computerized maintenance management system. The new program has quality results attached to the PM's along with measurable results to track failure rates and to predict corrective actions for the equipment health. Failure rates associated with the key performance indicators in the kiln, raw mill and finish mill operations have trended lower with each successive year of advanced maintenance strategy implementation. Equipment reliability remains improved even with budgetary changes associated with a recessed economy. This adoption of a proactive maintenance approach with predictive maintenance technologies led to a decreasing failure rate of all bearing types utilized throughout the plant. The average dollar savings in replacement bearing costs from 2006 through 2009, using 2005 as a baseline year, has been $96,000 per year. © 2011 IEEE.}, bibtype = {inproceedings}, author = {Conklin, C. and Stewart, C. and Kurosky, J.}, doi = {10.1109/CITCON.2011.5934564}, booktitle = {IEEE Cement Industry Technical Conference (Paper)} }
@techreport{ title = {A roadmap for using NSF cyberinfrastructure with InCommon}, type = {techreport}, year = {2011}, websites = {http://hdl.handle.net/2022/13024}, id = {b5a541ef-b1ff-3927-9545-1ed06c694441}, created = {2018-02-27T18:07:32.950Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-11T16:36:20.824Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Barnett2011d}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Barnett, William and Welch, Von and Walsh, Alan and Stewart, Craig A} }
@inproceedings{ title = {Analysis of virtualization technologies for high performance computing environments}, type = {inproceedings}, year = {2011}, keywords = {Cloud computing,Computer software selection and evaluation; Compu,Distributed systems; High performance computing; H}, pages = {9-16}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-80053156886&doi=10.1109%2FCLOUD.2011.29&partnerID=40&md5=9b37566e08b56cb54c83c9be6b8f615d}, city = {Washington, DC}, id = {142d8646-84d5-3570-ad67-cb00bd7ac91a}, created = {2018-02-27T18:07:33.611Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:33.611Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Younge20119}, source_type = {conference}, notes = {cited By 99; Conference of 2011 IEEE 4th International Conference on Cloud Computing, CLOUD 2011 ; Conference Date: 4 July 2011 Through 9 July 2011; Conference Code:86655}, private_publication = {false}, abstract = {As Cloud computing emerges as a dominant paradigm in distributed systems, it is important to fully understand the underlying technologies that make Clouds possible. One technology, and perhaps the most important, is virtualization. Recently virtualization, through the use of hypervisors, has become widely used and well understood by many. However, there are a large spread of different hypervisors, each with their own advantages and disadvantages. This paper provides an in-depth analysis of some of today's commonly accepted virtualization technologies from feature comparison to performance analysis, focusing on the applicability to High Performance Computing environments using FutureGrid resources. The results indicate virtualization sometimes introduces slight performance impacts depending on the hypervisor type, however the benefits of such technologies are profound and not all virtualization technologies are equal. From our experience, the KVM hypervisor is the optimal choice for supporting HPC applications within a Cloud infrastructure. © 2011 IEEE.}, bibtype = {inproceedings}, author = {Younge, A J and Henschel, R and Brown, J T and Von Laszewski, G and Qiu, J and Fox, G C}, doi = {10.1109/CLOUD.2011.29}, booktitle = {Proceedings - 2011 IEEE 4th International Conference on Cloud Computing, CLOUD 2011} }
@article{ title = {Power spectrum analyses of nuclear decay rates}, type = {article}, year = {2010}, volume = {34}, id = {67cc2e8f-8f06-3102-9d7d-161ee60802e4}, created = {2018-02-27T18:07:25.094Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:25.094Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {We provide the results from a spectral analysis of nuclear decay data displaying annually varying periodic fluctuations. The analyzed data were obtained from three distinct data sets: 32 Si and 36 Cl decays reported by an experiment performed at the Brookhaven National Laboratory (BNL), 56 Mn decay reported by the Children's Nutrition Research Center (CNRC), but also performed at BNL, and 226 Ra decay reported by an experiment performed at the Physikalisch-Technische Bundesanstalt (PTB) in Germany. All three data sets exhibit the same primary frequency mode consisting of an annual period. Additional spectral comparisons of the data to local ambient temperature, atmospheric pressure, relative humidity, Earth-Sun distance, and their reciprocals were performed. No common phases were found between the factors investigated and those exhibited by the nuclear decay data. This suggests that either a combination of factors was responsible, or that, if it was a single factor, its effects on the decay rate experiments are not a direct synchronous modulation. We conclude that the annual periodicity in these data sets is a real effect, but that further study involving additional carefully controlled experiments will be needed to establish its origin. © 2010 Elsevier B.V. All rights reserved.}, bibtype = {article}, author = {Javorsek, D. and Sturrock, P.A. and Lasenby, R.N. and Lasenby, A.N. and Buncher, J.B. and Fischbach, E. and Gruenwald, J.T. and Hoft, A.W. and Horan, T.J. and Jenkins, J.H. and Kerford, J.L. and Lee, R.H. and Longman, A. and Mattes, J.J. and Morreale, B.L. and Morris, D.B. and Mudry, R.N. and Newport, J.R. and O'Keefe, D. and Petrelli, M.A. and Silver, M.A. and Stewart, C.A. and Terry, B.}, doi = {10.1016/j.astropartphys.2010.06.011}, journal = {Astroparticle Physics}, number = {3} }
@inproceedings{ title = {A distributed workflow for an astrophysical OpenMP application: Using the data capacitor over WAN to enhance productivity}, type = {inproceedings}, year = {2010}, keywords = {Application programming interfaces (API); Astroph,Data capacitor; File systems; Lustre; OpenMP; Tera,Wide area networks}, pages = {644-650}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-78649988288&doi=10.1145%2F1851476.1851571&partnerID=40&md5=8b963adcab6d7eee29876b97e4e3da99}, city = {Chicago, IL}, id = {b917fdcd-4412-3b59-adcd-04217599d90e}, created = {2018-02-27T18:07:25.511Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:25.511Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, citation_key = {Henschel2010644}, source_type = {conference}, notes = {cited By 3; Conference of 19th ACM International Symposium on High Performance Distributed Computing, HPDC 2010 ; Conference Date: 21 June 2010 Through 25 June 2010; Conference Code:82622}, private_publication = {false}, abstract = {Astrophysical simulations of protoplanetary disks and gas giant planet formation are being performed with a variety of numerical methods. Some of the codes in use today have been producing scientifically significant results for several years, or even decades. Each must simulate millions of resolution elements for millions of time steps, capture and store output data, and rapidly and efficiently analyze this data. To do this effectively, a parallel code is needed that scales to tens or hundreds of processors. Furthermore, an efficient workflow for the transport, analysis, and interpretation of the output data is needed to achieve scientifically meaningful results. Since such simulations are usually performed on moderate to large parallel systems, the compute system is generally located at a remote institution. However, analysis of results is typically performed interactively, and due to the fact that most supercomputing centers do not offer dedicated interactive nodes, the transfer of simulation output data to local resources becomes necessary. Even if interactive resources were available, typical network latencies make X-forwarded displays nearly impossible to work with. Since data sets can be quite large and traditional transfer mechanisms such as scp and sftp offer relatively low throughput, this transfer of data sets becomes a bottleneck in the research workflow. In this article we measure the scalability of the Computational HYdronamics with MultiplE Radiation Algorithms (CHYMERA) code on the SGI Altix architecture. We find that it scales well up to 64 threads for moderate and large sized problems. We also present a novel approach to enable rapid transfer and analysis of simulation data via the Data Capacitor (DC) and LustreWAN (Wide Area Network) [17]. The usage of aWAN file system to tie batch system operated compute resources and interactive analysis and visualization resources together is of general interest and can be applied broadly. Copyright 2010 ACM.}, bibtype = {inproceedings}, author = {Henschel, R and Michael, S and Simms, S}, doi = {10.1145/1851476.1851571}, booktitle = {HPDC 2010 - Proceedings of the 19th ACM International Symposium on High Performance Distributed Computing} }
@article{ title = {Implementation of a shared data repository and common data dictionary for fetal alcohol spectrum disorders research}, type = {article}, year = {2010}, pages = {643-647}, volume = {44}, websites = {https://linkinghub.elsevier.com/retrieve/pii/S0741832909001530,http://hdl.handle.net/2022/7194}, month = {11}, id = {5906d916-675e-3c43-bf64-b425c5fe5cc4}, created = {2018-02-27T18:07:26.818Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:06:46.297Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {Many previous attempts by fetal alcohol spectrum disorders researchers to compare data across multiple prospective and retrospective human studies have failed because of both structural differences in the collected data and difficulty in coming to agreement on the precise meaning of the terminology used to describe the collected data. Although some groups of researchers have an established track record of successfully integrating data, attempts to integrate data more broadly among different groups of researchers have generally faltered. Lack of tools to help researchers share and integrate data has also hampered data analysis. This situation has delayed improving diagnosis, intervention, and treatment before and after birth. We worked with various researchers and research programs in the Collaborative Initiative on Fetal Alcohol Spectrum Disorders (CI-FASD) to develop a set of common data dictionaries to describe the data to be collected, including definitions of terms and specification of allowable values. The resulting data dictionaries were the basis for creating a central data repository (CI-FAS D Central Repository) and software tools to input and query data. Data entry restrictions ensure that only data that conform to the data dictionaries reach the CI-FASD Central Repository. The result is an effective system for centralized and unified management of the data collected and analyzed by the initiative, including a secure, long-term data repository. CI-FASD researchers are able to integrate and analyze data of different types, using multiple methods, and collected from multiple populations, and data are retained for future reuse in a secure, robust repository. © 2010 Elsevier Inc.}, bibtype = {article}, author = {Arenson, Andrew D. and Bakhireva, Ludmila N. and Chambers, Christina D. and Deximo, Christina A. and Foroud, Tatiana and Jacobson, Joseph L. and Jacobson, Sandra W. and Jones, Kenneth Lyons and Mattson, Sarah N. and May, Philip A. and Moore, Elizabeth S. and Ogle, Kimberly and Riley, Edward P. and Robinson, Luther K. and Rogers, Jeffrey and Streissguth, Ann P. and Tavares, Michel C. and Urbanski, Joseph and Yezerets, Yelena and Surya, Radha and Stewart, Craig A. and Barnett, William K.}, doi = {10.1016/j.alcohol.2009.08.007}, journal = {Alcohol}, number = {7-8} }
@inproceedings{ title = {A compelling case for a centralized filesystem on the TeraGrid: enhancing an astrophysical workflow with the data capacitor WAN as a test case}, type = {inproceedings}, year = {2010}, pages = {13}, publisher = {ACM}, id = {e7a5573c-9c4b-31d0-ba18-d4302840a915}, created = {2018-02-27T18:07:27.702Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.547Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Michael2010a}, source_type = {CONF}, private_publication = {false}, bibtype = {inproceedings}, author = {Michael, Scott and Simms, Stephen and Breckenridge III, W B and Smith, Roger and Link, Matthew}, booktitle = {Proceedings of the 2010 TeraGrid Conference} }
@article{ title = {Systems survey of endocytosis by multiparametric image analysis}, type = {article}, year = {2010}, keywords = {Bayesian analysis; gene; genome; image analysis;,Computer-Assisted; Metabolic Networks and Pathway,Computing Methodologies; Endocytosis; Endosomes;,Confocal; Phenotype; Protein Transport; RNA Inter,article; Bayes theorem; confocal microscopy; endo,cell adhesion molecule; epidermal growth factor; i}, pages = {243-249}, volume = {464}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77949424084&doi=10.1038%2Fnature08779&partnerID=40&md5=ea02851128c73be61c7e47eb4f125acf}, id = {7801d0a7-4679-336b-b893-8d8080b9acf4}, created = {2018-02-27T18:07:28.010Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:28.010Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Collinet2010243}, source_type = {article}, notes = {cited By 236}, private_publication = {false}, abstract = {Endocytosis is a complex process fulfilling many cellular and developmental functions. Understanding how it is regulated and integrated with other cellular processes requires a comprehensive analysis of its molecular constituents and general design principles. Here, we developed a new strategy to phenotypically profile the human genome with respect to transferrin (TF) and epidermal growth factor (EGF) endocytosis by combining RNA interference, automated high-resolution confocal microscopy, quantitative multiparametric image analysis and high-performance computing. We identified several novel components of endocytic trafficking, including genes implicated in human diseases. We found that signalling pathways such as Wnt, integrin/cell adhesion, transforming growth factor (TGF)-Β and Notch regulate the endocytic system, and identified new genes involved in cargo sorting to a subset of signalling endosomes. A systems analysis by Bayesian networks further showed that the number, size, concentration of cargo and intracellular position of endosomes are not determined randomly but are subject to specific regulation, thus uncovering novel properties of the endocytic system. © 2010 Macmillan Publishers Limited. All rights reserved.}, bibtype = {article}, author = {Collinet, C and Stöter, M and Bradshaw, C R and Samusik, N and Rink, J C and Kenski, D and Habermann, B and Buchholz, F and Henschel, R and Mueller, M S and Nagel, W E and Fava, E and Kalaidzidis, Y and Zerial, M}, doi = {10.1038/nature08779}, journal = {Nature}, number = {7286} }
@inproceedings{ title = {Preliminary results from nuclear decay experiments performed during the solar eclipse of August 1, 2008}, type = {inproceedings}, year = {2010}, volume = {1265}, id = {126eab8b-4048-374f-a90b-b022c98b0508}, created = {2018-02-27T18:07:28.916Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.999Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {JavorsekII2010}, private_publication = {false}, abstract = {Recent developments in efforts to determine the cause of anomalous experimental nuclear decay fluctuations suggest a possible solar influence. Here we report on the preliminary results from several nuclear decay experiments performed at Thule Air Base in Greenland during the Solar Eclipse that took place on 1 August 2008. Because of the high northern latitude and time of year, the Sun never set and thereby provided relatively stabilized conditions for nearly all environmental factors. An exhaustive list of relevant factors were monitored during the eclipse to help rule out possible systematic effects due to external influences. In addition to the normal temperature, pressure, humidity, and cloud cover associated with the outside ambient observations, we included similar measurements within the laboratory along with monitoring of the power supply output, local neutron count rates, and the Earth's local magnetic and electric fields. © 2010 American Institute of Physics.}, bibtype = {inproceedings}, author = {Javorsek II, D. and Kerford, J.L. and Stewart, C.A. and Buncher, J.B. and Fischbach, E. and Gruenwald, J.T. and Heim, J. and Hoft, A.W. and Horan, T.J. and Jenkins, J.H. and Kohler, M. and Lee, R.H. and Longman, A. and Mattes, J.J. and Mohsinally, T. and Morreale, B. and Morris, D.B. and Mudry, R. and Newport, J.R. and O'Keefe, D. and Petrelli, M.A. and Silver, M.A. and Sturrock, P.A. and Terry, B. and Willenberg, H.}, doi = {10.1063/1.3480162}, booktitle = {AIP Conference Proceedings} }
@inproceedings{ title = {A revolutionary new paradigm for the reduction and analysis of astronomical images}, type = {inproceedings}, year = {2010}, keywords = {Astronomical images; Cyber infrastructures; Data r,Astronomy; Data handling; Image analysis; Imaging,Pipeline processing systems}, pages = {168-175}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79951871691&doi=10.1109%2FeScience.2010.14&partnerID=40&md5=535e02f368c46446d0790af441fba2b1}, city = {Brisbane, QLD}, id = {2cc4aabc-7d29-39ad-9996-42e6d694ea0b}, created = {2018-02-27T18:07:29.014Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:29.014Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, citation_key = {Michael2010168}, source_type = {conference}, notes = {cited By 0; Conference of 2010 6th IEEE International Conference on e-Science, eScience 2010 ; Conference Date: 7 December 2010 Through 10 December 2010; Conference Code:83861}, private_publication = {false}, abstract = {In this article we propose a revolutionary new paradigm for the processing and analysis of astronomical image data. We describe a blueprint for a centralized data repository and processing system, which leverages national cyberinfrastructure. Included is a brief discussion of the current paradigm in astronomical image processing. The upcoming One Degree Imager (ODI) instrument, to be installed at the Wisconsin, Indiana, Yale and NOAO (WIYN) observatory in 2011, is examined as an ideal use case. Details on the major components and a detailed workflow in the case of data processing for the ODI instrument are highlighted. © 2010 IEEE.}, bibtype = {inproceedings}, author = {Michael, S and Knezek, P and Stobie, E and Henschel, R and Simms, S}, doi = {10.1109/eScience.2010.14}, booktitle = {Proceedings - 2010 6th IEEE International Conference on e-Science, eScience 2010} }
@inproceedings{ title = {Leveraging Pre-Existing Resources at Institutions of Higher Education for K-12 STEM Engagement}, type = {inproceedings}, year = {2010}, pages = {143-151}, publisher = {Association for the Advancement of Computing in Education (AACE)}, id = {07989324-bb61-3c9d-819f-3ee16bfd40a3}, created = {2018-02-27T18:07:29.305Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.080Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Boyles2010}, source_type = {CONF}, private_publication = {false}, bibtype = {inproceedings}, author = {Boyles, Michael and Frend, Chauney and Rogers, Jeff and William, Albert and Reagan, David and Wernert, Eric}, booktitle = {EdMedia: World Conference on Educational Media and Technology} }
@techreport{ title = {Cyberinfrastructure Software Sustainability and Reusability: Report from an NSF-funded workshop}, type = {techreport}, year = {2010}, publisher = {Indiana University}, id = {90f07e29-5266-3d22-b0df-e0512a86d566}, created = {2018-02-27T18:07:30.557Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.935Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2010m}, source_type = {JOUR}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Almes, Guy T and Wheeler, Bradley C} }
@inproceedings{ title = {A compelling case for a centralized filesystem on the TeraGrid}, type = {inproceedings}, year = {2010}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77956269226&doi=10.1145%2F1838574.1838587&partnerID=40&md5=4d5eeebf4a853337a11d4b515d24729c}, id = {4e0dd546-2553-3b17-949d-9d961c429f51}, created = {2018-02-27T18:07:31.252Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.432Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Michael2010b}, source_type = {conference}, notes = {cited By 3}, private_publication = {false}, bibtype = {inproceedings}, author = {Michael, Scott and Simms, Steve and Breckenridge III, W B and Smith, R and Link, Matthew R}, doi = {10.1145/1838574.1838587}, booktitle = {Proceedings of the 2010 TeraGrid Conference, TG '10} }
@inproceedings{ title = {What is cyberinfrastructure}, type = {inproceedings}, year = {2010}, pages = {37}, websites = {http://dx.doi.org/10.1145/1878335.1878347,http://portal.acm.org/citation.cfm?doid=1878335.1878347}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {853df8fe-3a0f-3109-8dfa-eddaa2111828}, created = {2018-02-27T18:07:32.448Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:06:46.287Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2010l}, source_type = {inproceedings}, private_publication = {false}, abstract = {Cyberinfrastructure is a word commonly used but lacking a single, precise definition. One recognizes intuitively the analogy with infrastructure, and the use of cyber to refer to thinking or computing - but what exactly is cyberinfrastructure as opposed to information technology infrastructure? Indiana University has developed one of the more widely cited definitions of cyberinfrastructure: Cyberinfrastructure consists of computing systems, data storage systems, advanced instruments and data repositories, visualization environments, and people, all linked together by software and high performance networks to improve research productivity and enable breakthroughs not otherwise possible. A second definition, more inclusive of scholarship generally and educational activities, has also been published and is useful in describing cyberinfrastructure: Cyberinfrastructure consists of computational systems, data and information management, advanced instruments, visualization environments, and people, all linked together by software and advanced networks to improve scholarly productivity and enable knowledge breakthroughs and discoveries not otherwise possible. In this paper, we describe the origin of the term cyberinfrastructure based on the history of the root word infrastructure, discuss several terms related to cyberinfrastructure, and provide several examples of cyberinfrastructure. © 2010 ACM.}, bibtype = {inproceedings}, author = {Stewart, Craig A and Simms, Stephen and Plale, Beth and Link, Matthew and Hancock, David Y. and Fox, Geoffrey C.}, doi = {10.1145/1878335.1878347}, booktitle = {Proceedings of the 38th annual fall conference on SIGUCCS - SIGUCCS '10} }
@inproceedings{ title = {Enabling Lustre WAN for production use on the TeraGrid: a lightweight UID mapping scheme}, type = {inproceedings}, year = {2010}, pages = {19}, publisher = {ACM}, id = {6ff7041c-04fb-3b73-b46a-2082b1e62fdd}, created = {2018-02-27T18:07:32.722Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.486Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Walgenbach2010a}, source_type = {CONF}, private_publication = {false}, bibtype = {inproceedings}, author = {Walgenbach, Joshua and Simms, Stephen C and Westneat, Kit and Miller, Justin P}, booktitle = {Proceedings of the 2010 TeraGrid Conference} }
@article{ title = {IQ-station: A low cost portable immersive environment}, type = {article}, year = {2010}, keywords = {Commercial off-the-shelf technology; Display syste,Cost benefit analysis,Costs; Helmet mounted displays; Virtual reality}, pages = {361-372}, volume = {6454 LNCS}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-78650784289&doi=10.1007%2F978-3-642-17274-8_36&partnerID=40&md5=0aa7217fcdd7af6e8033a911aa071001}, city = {Las Vegas, NV}, id = {db7ef348-4c9e-3ae6-9cc4-6a2cf9bfba6b}, created = {2018-02-27T18:07:33.368Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:33.368Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Sherman2010361}, source_type = {article}, notes = {cited By 5; Conference of 6th International, Symposium on Visual Computing, ISVC 2010 ; Conference Date: 29 November 2010 Through 1 December 2010; Conference Code:83294}, private_publication = {false}, abstract = {The emergence of inexpensive 3D-TVs, affordable input and rendering hardware and open-source software has created a yeasty atmosphere for the development of low-cost immersive systems. A low cost system (here dubbed an IQ-station), fashioned from commercial off-the-shelf technology (COTS), coupled with targeted immersive applications can be a viable laboratory instrument for enhancing scientific workflow for exploration and analysis. The use of an IQ-station in a laboratory setting also has the potential of quickening the adoption of a more sophisticated immersive environment as a critical enabler in modern scientific and engineering workflows. Prior work in immersive environments generally required special purpose display systems, such as a head mounted display (HMD) or a large projector-based implementation, which have limitations in terms of cost, usability, or space requirements. The alternative platform presented here effectively addresses those limitations. This work brings together the needed hardware and software components to create a fully integrated immersive display and interface system that can be readily deployed in laboratories and common workspaces. By doing so, it is now feasible for immersive technologies to be included in researchers' day-to-day workflows. The IQ-station sets the stage for much wider adoption of immersive interfaces outside the small communities of virtual reality centers. In spite of this technical progress, the long-term success of these systems depends on resolving several important issues related to users and support. Key among these issues are: to what degree should hardware and software be customized; what applications and content are available; and how can a community be developed? © 2010 Springer-Verlag.}, bibtype = {article}, author = {Sherman, W R and O'Leary, P and Whiting, E T and Grover, S and Wernert, E A}, doi = {10.1007/978-3-642-17274-8_36}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, number = {PART 2} }
@techreport{ title = {Progress Report on Implementation of Recommendations from the Indiana University Cyberinfrastructure Research Taskforce}, type = {techreport}, year = {2010}, keywords = {Other,cyberinfrastructure,digital data,empowering people,strategy,supercomputing}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/469.}, id = {8993ad5f-b6cc-396a-ad3e-fbec61f326bf}, created = {2020-09-10T23:12:46.084Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T23:12:46.084Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {This document refers to the Final Report of the Indiana University Cyberinfrastructure Research Taskforce at: http://hdl.handle.net/2022/469 . The summary that follows conveys progress-to-date on the ten recommendations in the CRT final report. Even as we face unprecedented budgetary challenges, IU aims to maintain "excellent facilities for research and education," as stated by President McRobbie. In this memo I summarize some of the key activities undertaken in response to the 2005 Cyberinfrastructure Research Taskforce report by the Office of the Vice President for Information Technology (OVPIT), University Information Technology Services (UITS), and its partners in developing and providing IU's advanced research cyberinfrastructure, particularly the School of Informatics and Computing (SOIC), Digital Library Program (DLP), and Pervasive Technology Institute (PTI). (Note that PTI is itself a collaborative effort of the School of Informatics and Computing, Maurer School of Law, Office of the Vice President for Information Technology, and University Information Technology Services.) CRT Recommendation #1: Indiana University should continue investments in core IT infrastructure that is a foundation for IU's advanced cyberinfrastructure. The university should expand the successful principles of equipment life cycle budgeting in line with the ITSP to all levels (schools, departments, etc.) to ensure the long-term sustainability of the core IT infrastructure required by scholars. The expansion of life cycle budgeting approaches for all core infrastructure remains a goal, and an increasingly difficult one in tough budget times. A recent review of the life-cycle funding (LCF) program has also assessed how new models of virtualization can provide more sustainable options for some services that had formerly relied on local equipment refreshes. University Information Technology Services (UITS) continues to manage core infrastructure, such as public computing labs and classrooms, with required LCF for any expansion. The new IU Bloomington Data Center also represents a critical element of IU's core information technology infrastructure. This hardened facility plays a key role in protecting valuable}, bibtype = {techreport}, author = {Stewart, Craig A} }
@inproceedings{ title = {Virtual simulation for lighting and design education}, type = {inproceedings}, year = {2009}, keywords = {Architectural design; Lighting; Students,Arts and humanities; Global illumination; Realisti,Virtual reality}, pages = {275-276}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-67649756265&doi=10.1109%2FVR.2009.4811052&partnerID=40&md5=ec688ef5bd3231d8fbb3208fd0f3f082}, city = {Lafayette, LA}, id = {6980e126-82ee-31d4-9bc3-a9b3d3caa468}, created = {2018-02-27T18:07:27.321Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:27.321Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Boyles2009275}, source_type = {conference}, notes = {cited By 1; Conference of VR 2009 - IEEE Virtual Reality 2009 ; Conference Date: 14 March 2009 Through 18 March 2009; Conference Code:76406}, private_publication = {false}, abstract = {The study of lighting in architectural and interior design education is diverse and difficult. It has been shown that static computergenerated imagery can adequately represent real-world environments for subjective lighting analysis as long as the software accurately reproduces certain light distributions. This paper describes a prototype environment that explores an alternative education tool for studying interior lighting environments through the use of global illumination simulations in a virtual environment. Modern virtual reality technology affords us the luxury of not only achieving a high quality visual experience but also allowing the student to navigate through a space and interactively adjust lighting parameters. We describe our experience creating such an environment as well as the subjective interpretation of student users. © 2009 IEEE.}, bibtype = {inproceedings}, author = {Boyles, M and Rogers, J and Goreham, K and Frank, M A and Cowan, A J}, doi = {10.1109/VR.2009.4811052}, booktitle = {Proceedings - IEEE Virtual Reality} }
@techreport{ title = {Application benchmark results for Big Red, an IBM e1350 BladeCenter Cluster}, type = {techreport}, year = {2009}, id = {6c139c74-8c2b-3421-a8d5-1e75fdd12fb5}, created = {2018-02-27T18:07:28.580Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:16.133Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2009n}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Link, Matthew and McCaulay, D Scott and Rodgers, Greg and Turner, George and Hancock, David and Wang, Peng and Saied, Faisal and Pierce, Marlon and Aiken, Ross} }
@inproceedings{ title = {Investigation of periodic nuclear decay data with spectral analysis techniques}, type = {inproceedings}, year = {2009}, volume = {1182}, id = {982a74f9-c2c8-336a-bb6c-39a80b10fccc}, created = {2018-02-27T18:07:29.441Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:29.441Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {We provide the results from a spectral analysis of nuclear decay experiments displaying unexplained periodic fluctuations. The analyzed data was from 56 Mn decay reported by the Children's Nutrition Research Center in Houston, 32 Si decay reported by an experiment performed at the Brookhaven National Laboratory, and 226 Ra decay reported by an experiment performed at the Physikalisch-Technische-Bundesanstalt in Germany. All three data sets possess the same primary frequency mode consisting of an annual period. Additionally a spectral comparison of the local ambient temperature, atmospheric pressure, relative humidity, Earth-Sun distance, and the plasma speed and latitude of the heliospheric current sheet (HCS) was performed. Following analysis of these six possible causal factors, their reciprocals, and their linear combinations, a possible link between nuclear decay rate fluctuations and the linear combination of the HCS latitude and 1/R motivates searching for a possible mechanism with such properties.}, bibtype = {inproceedings}, author = {Javorsek II, D. and Stunock, P. and Buncher, J. and Fischbach, E. and Gruenwald, T. and Hoft, A. and Horan, T. and Jenkins, J. and Kerford, J. and Lee, R. and Mattes, J. and Morris, D. and Mudry, R. and Newport, J. and Petrelli, M. and Silver, M. and Stewart, C. and Terry, B. and Willenberg, H.}, doi = {10.1063/1.3293804}, booktitle = {AIP Conference Proceedings} }
@inproceedings{ title = {Virtual reality technology and the teaching of architectural lighting}, type = {inproceedings}, year = {2009}, keywords = {Architectural design; Curricula; E-learning; Educa,Architectural lighting; Effective teaching; Energ,Lighting}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85029124213&partnerID=40&md5=61d5d49b7b69fffa461136e8fb1c3f55}, publisher = {American Society for Engineering Education}, city = {Austin, TX}, id = {b002a315-7fd6-3d4a-81f5-45585f8ed05b}, created = {2018-02-27T18:07:30.129Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:30.129Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Frank2009}, source_type = {conference}, notes = {cited By 0; Conference of 2009 ASEE Annual Conference and Exposition ; Conference Date: 14 June 2009 Through 17 June 2009; Conference Code:77079}, private_publication = {false}, abstract = {The study of lighting in architectural and interior design education is diverse. It ranges from energy efficient lighting and daylighting to studies that assess the effect of illuminance upon finish materials and color interaction. This often leads to attempts to squeeze lighting into an already crowded curriculum and is compounded when efforts are made to develop complex study models of interior lighting environments. In short, there is often little time to explore these topics in adequate detail. This paper explores an alternative to the study of interior lighting environments through use of a Virtual Reality Theater. It discusses the development of one of these highly realistic virtual environments and how it is being used to introduce students to understand and interpret varying lighting scenarios of an interior environment and, as well, how it is generating a series of international research endeavors focused upon subjective impressions of interior environments. This study is grounded in the seminal work in this field initiated by such scholars as Flynn1'2'3. It poses the question of whether or not software-generated images can accurately simulate lighting effects of the physical environment so that subjective impressions are legitimately measured. This research is used as a backdrop to this particular paper that explores the use of the Theater as an educational tool and how it offers up solutions to reducing the time to create complex study models. The use of this technology to alleviate a crowded curriculum, to explore it as an effective teaching tool, and to assess its value and limitations, remains the crux of what will be discussed herein. © American Society for Engineering Education, 2009.}, bibtype = {inproceedings}, author = {Frank, M A and Cowan, D and Boyles, M and Rogers, J and Goreham, K and Suryabrata, J and Kodrat, Y}, booktitle = {ASEE Annual Conference and Exposition, Conference Proceedings} }
@inproceedings{ title = {Where information searches for you: The visible past ubiquitous knowledge environment for digital humanities}, type = {inproceedings}, year = {2009}, keywords = {3D; Information search; Integrated frameworks; Int,Behavioral research; Education; Three dimensional,Ubiquitous computing}, pages = {1043-1047}, volume = {4}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-70849106556&doi=10.1109%2FCSE.2009.132&partnerID=40&md5=621e367f3258ef5bd5f77841c9c273cb}, city = {Vancouver, BC}, id = {f5ecddcf-2447-37ca-84db-010263518b12}, created = {2018-02-27T18:07:30.236Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:30.236Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Matei20091043}, source_type = {conference}, notes = {cited By 0; Conference of 2009 IEEE International Conference on Social Computing, SocialCom 2009 ; Conference Date: 29 August 2009 Through 31 August 2009; Conference Code:78652}, private_publication = {false}, abstract = {Visible Past proposes a new class of interdisciplinary learning, documenting, knowledge production, and discovery experiences that are anchored in space and time indicators. The project is supported by a ubiquitous computing platform with wiki, implicit social networking, and location aware capabilities. The environment can be used as an integrated framework for enhancing learning and research in social sciences and humanities. Its main benefit would be involving the student, the researcher, and/or the museum visitor in mobile interactive experiences which rely on social networking around common topics or spaces. © 2009 IEEE.}, bibtype = {inproceedings}, author = {Matei, S A and Wernert, E and Faas, T}, doi = {10.1109/CSE.2009.132}, booktitle = {Proceedings - 12th IEEE International Conference on Computational Science and Engineering, CSE 2009} }
@techreport{ title = {Network Workbench Tool}, type = {techreport}, year = {2009}, id = {b652da9f-b168-3078-9baa-f916abe4990f}, created = {2018-02-27T18:07:30.473Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.407Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Vespignani2009}, source_type = {JOUR}, private_publication = {false}, bibtype = {techreport}, author = {Vespignani, Alessandro and Wasserman, Stanley and Wernert, Eric and Huang, Weixia Bonnie and Herr, Bruce and Zhang, Heng and Balcan, Duygu and Hook, Bryan and Markines, Ben and Fortunato, Santo} }
@inproceedings{ title = {GeneIndex: An Open Source Parallel Program for Enumerating and Locating Words in a Genome}, type = {inproceedings}, year = {2009}, pages = {98-102}, websites = {http://ieeexplore.ieee.org/document/5260731/}, publisher = {IEEE}, id = {0cad3db6-c406-317e-b38a-c5a2677b5b2a}, created = {2018-02-27T18:07:30.948Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:06:46.281Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {GeneIndex is an open-source program that locates words of any length k specified by the user in a sequence. GeneIndex is useful for understanding the structure of entire genomes or very large sets of genetic sequences, particularly in finding highly repeated words and words that occur with low frequency. GeneIndex accepts DNA sequences in FASTA format input files, and performs computations and input/output in parallel. GeneIndex has been implemented on Linux, IBM AIX, and NEC SX-8, and is available with test data sets (the entire genomes of Drosophila melanogaster and Homo sapiens). The performance of the program scales well with processor count - that is, as the number of processors increases, the processing time required decreases proportionally. © 2009 IEEE.}, bibtype = {inproceedings}, author = {Li, Huian and Hart, David and Mueller, Matthias and Markwardt, Ulf and Stewart, Craig}, doi = {10.1109/IJCBS.2009.127}, booktitle = {2009 International Joint Conference on Bioinformatics, Systems Biology and Intelligent Computing} }
@inproceedings{ title = {Where information searches for you}, type = {inproceedings}, year = {2009}, id = {17579edd-e52b-3192-bab5-311dda9c9688}, created = {2018-02-27T18:07:31.037Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.324Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Matei2009}, source_type = {JOUR}, private_publication = {false}, bibtype = {inproceedings}, author = {Matei, Sorin Adam and Wernert, Eric and Faas, Travis}, doi = {10.1109/CSE.2009.132}, booktitle = {Computational Science and Engineering, 2009. CSE '09. International Conference on} }
@techreport{ title = {Developing a Coherent Cyberinfrastructure from Local Campuses to National Facilities: Challenges and Strategies}, type = {techreport}, year = {2009}, publisher = {EDUCAUSE}, id = {1445b50a-d392-3de7-a5a0-2ec9fc308d4a}, created = {2018-02-27T18:07:31.077Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.397Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2009p}, source_type = {JOUR}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, C A and Pepin, J and Odegard, J and Hauser, T and Fratkin, S and Almes, G and Ahalt, S and Agarwala, V and Dreher, P} }
@article{ title = {Implementation, performance, and science results from a 30.7 TFLOPS IBM BladeCenter cluster}, type = {article}, year = {2009}, volume = {22}, websites = {http://doi.wiley.com/10.1002/cpe.1539}, id = {4295f47d-2772-3dc0-85da-912bf95f7581}, created = {2018-02-27T18:07:31.906Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T18:06:46.428Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {This paper describes Indiana University's implementation, performance testing, and use of a large high performance computing system. IU'S Big Red, a 20.48 TFLOPS IBM e1350 BladeCenter cluster, appeared in the 27th Top500 list as the 23rd fastest supercomputer in the world in June 2006. In spring 2007, this computer was upgraded to 30.72 TFLOPS. The e1350 BladeCenter architecture, including two internal networks accessible to users and user applications and two networks used exclusively for system management, has enabled the system to provide good scalability on many important applications while being well manageable. Implementing a system based on the JS21 Blade and PowerPC 970MP processor within the US TeraGrid presented certain challenges, given that Intel-compatible processors dominate the TeraGrid. However, the particular characteristics of the PowerPC have enabled it to be highly popular among certain application communities, particularly users of molecular dynamics and weather forecasting codes. A critical aspect of Big Red's implementation has been a focus on Science Gateways, which provide graphical interfaces to systems supporting end-to-end scientific workflows. Several Science Gateways have been implemented that access Big Red as a computational resource-some via the TeraGrid, some not affiliated with the TeraGrid. In summary, Big Red has been successfully integrated with the TeraGrid, and is used by many researchers locally at IU via grids and Science Gateways. It has been a success in terms of enabling scientific discoveries at IU and, via the TeraGrid, across the US. Copyright © 2009 John Wiley & Sons, Ltd.}, bibtype = {article}, author = {Stewart, Craig A. and Link, Matthew and McCaulay, D. Scott and Rodgers, Greg and Turner, George and Hancock, David and Wang, Peng and Saied, Faisal and Pierce, Marlon and Aiken, Ross and Mueller, Matthias S. and Jurenz, Matthias and Lieber, Matthias and Tillotson, Jenett and Plale, Beth A.}, doi = {10.1002/cpe.1539}, journal = {Concurrency and Computation: Practice and Experience}, number = {2} }
@inproceedings{ title = {ID2–A Scalable and Flexible Mixed-Media Information Visualization System for Public Learning Exhibits}, type = {inproceedings}, year = {2009}, pages = {3848-3856}, publisher = {Association for the Advancement of Computing in Education (AACE)}, id = {1af401b0-f53e-3881-9602-45e1c910af78}, created = {2018-02-27T18:07:31.983Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.766Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Wernert2009}, source_type = {CONF}, private_publication = {false}, bibtype = {inproceedings}, author = {Wernert, Eric and Lakshmipathy, Jagannathan and Boyles, Michael and Borner, Katy}, booktitle = {EdMedia: World Conference on Educational Media and Technology} }
@techreport{ title = {Acquisition of a High-Speed, High Capacity Storage System to Support Scientific Computing: The Data Capacitor Final Report}, type = {techreport}, year = {2009}, id = {87193f08-8eb1-371e-8ae1-e632f886f213}, created = {2018-02-27T18:07:32.873Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.717Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2009o}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Simms, Stephen C and Pilachowski, Caty and Bramley, Randall} }
@inproceedings{ title = {Gateway Hosting at Indiana University}, type = {inproceedings}, year = {2009}, websites = {http://hdl.handle.net/2022/13944}, id = {aa27371f-55a2-3ee6-8f62-6e5560ce52af}, created = {2018-02-27T18:07:33.147Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T17:54:59.874Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Lowe2009a}, private_publication = {false}, abstract = {The gateway hosting service at Indiana University provides science gateways and portals with hosting resources to facilitate the use of computation resources and storage within the TeraGrid. This service is designed with high availability in mind and is deployed across the Indianapolis and Bloomington campuses with redundant network, power, and storage. The service uses OpenVZ to give each gateway or portal its own virtual environment while making the most efficient use of the hardware and administrative resources. OpenVZ’s user beancounter quota system and fair-share scheduling for processes and I/O allows fair distribution of resource between virtual machines while allowing full utilization of the hardware. The ability to do live migration allows kernel updates without service interruption. Indiana University’s research network provides multiple low latency high bandwidth connections between campuses, other TeraGrid resource providers, and the Internet at large. The service is in use by a variety of projects such as FlyBase and TeraGrid Information Services and, since the service was put into production in August 2008, there have been 5.37 hours of down time.}, bibtype = {inproceedings}, author = {Lowe, John Michael and Shields, Corey and Hancock, David Y and Link, Matthew R and Stewart, Craig A and Pierce, Marlon}, booktitle = {Proceedings of the TeraGrid 2009 Conference} }
@inproceedings{ title = {MyOSG}, type = {inproceedings}, year = {2009}, keywords = {Customizable,Data source,Grid computing,Hardware and software,Mobile devices,Portals,World Wide Web}, pages = {1}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-76749108523&doi=10.1145%2F1658260.1658276&partnerID=40&md5=5030e94dba7f754415a1b02912a8b534,http://portal.acm.org/citation.cfm?doid=1658260.1658276}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {dd55c985-1687-34a4-a797-afceb90f8be2}, created = {2018-02-27T18:07:33.953Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-19T16:24:07.655Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Gopu2009}, source_type = {conference}, notes = {cited By 0; Conference of 5th Grid Computing Environments Workshop at Supercomputing 2009, GCE09 ; Conference Date: 20 November 2009 Through 20 November 2009; Conference Code:79224}, private_publication = {false}, abstract = {The Open Science Grid (OSG) is made up of researchers from several scientific domains that contribute hardware and software resources through Virtual Organizations (VOs). These VOs have developed a plethora of tools that have been found useful to members of the OSG. Unfortunately it is hard for everyone in the OSG community to keep up with all the tools, and their accessibility; Similarly OSG support staff and resource administrators have been known to have a hard time debugging an issue related to a reported issue because of the distributed nature of various tools; Additionally, new collaborators have repeatedly complained that most of the tools on the OSG are hard to discover; and even after they discover a tool, the interface is not uniform, and requires them to learn a new interface and its data format. MyOSG addresses these concerns: The primary idea is to use an authoritative source of information about OSG entities as a backbone, and organize data from different tools around this backbone to create a web portal. Further, MyOSG provides the ability for users to export / subscribe to a variety of information in formats such as XML, UWA - an industry standard widget format, iCal - for calendar type information, and others. This enables a user to construct Individual Information Centers (IIC) on tools such as iGoogle, Netvibes, Opera Widgets, and on mobile devices such as iPhone, etc. In summary, MyOSG is a highly customizable web portal that allows vastly different categories of users to access information they find important to their role in a format that is convenient to them. Copyright 2009 ACM.}, bibtype = {inproceedings}, author = {Gopu, Arvind and Hayashi, Soichi and Quick, Robert}, doi = {10.1145/1658260.1658276}, booktitle = {Proceedings of the 5th Grid Computing Environments Workshop on - GCE '09} }
@article{ title = {The open science grid status and architecture}, type = {article}, year = {2008}, volume = {119}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-51149122771&doi=10.1088%2F1742-6596%2F119%2F5%2F052028&partnerID=40&md5=3b462676f363ef11906d77c994b52d9e}, id = {b60f1f18-59be-332b-b250-1b46b3f0b7a7}, created = {2018-02-27T18:07:26.018Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:26.018Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Pordes2008}, source_type = {article}, notes = {cited By 4}, private_publication = {false}, abstract = {The Open Science Grid (OSG) provides a distributed facility where the Consortium members provide guaranteed and opportunistic access to shared computing and storage resources. The OSG project[1] is funded by the National Science Foundation and the Department of Energy Scientific Discovery through Advanced Computing program. The OSG project provides specific activities for the operation and evolution of the common infrastructure. The US ATLAS and US CMS collaborations contribute to and depend on OSG as the US infrastructure contributing to the World Wide LHC Computing Grid on which the LHC experiments distribute and analyze their data. Other stakeholders include the STAR RHIC experiment, the Laser Interferometer Gravitational-Wave Observatory (LIGO), the Dark Energy Survey (DES) and several Fermilab Tevatron experiments- CDF, D0, MiniBoone etc. The OSG implementation architecture brings a pragmatic approach to enabling vertically integrated community specific distributed systems over a common horizontal set of shared resources and services. More information can be found at the OSG web site: www.opensciencegrid.org. © 2008 IOP Publishing Ltd.}, bibtype = {article}, author = {Pordes, R and Petravick, D and Kramer, B and Olson, D and Livny, M and Roy, A and Avery, P and Blackburn, K and Wenaus, T and Würthwein, F and Foster, I and Gardner, R and Wilde, M and Blatecky, A and McGee, J and Quick, R}, doi = {10.1088/1742-6596/119/5/052028}, journal = {Journal of Physics: Conference Series}, number = {5} }
@article{ title = {New science on the Open Science Grid}, type = {article}, year = {2008}, volume = {125}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-65549163927&doi=10.1088%2F1742-6596%2F125%2F1%2F012070&partnerID=40&md5=cedded307af8507fdaff128dee4fc282}, publisher = {Institute of Physics Publishing}, id = {8f968d5c-bb07-35ea-907f-03d0cbcebdbe}, created = {2018-02-27T18:07:27.110Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:27.110Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Pordes2008}, source_type = {article}, notes = {cited By 7}, private_publication = {false}, abstract = {The Open Science Grid (OSG) includes work to enable new science, new scientists, and new modalities in support of computationally based research. There are frequently significant sociological and organizational changes required in transformation from the existing to the new. OSG leverages its deliverables to the large-scale physics experiment member communities to benefit new communities at all scales through activities in education, engagement, and the distributed facility. This paper gives both a brief general description and specific examples of new science enabled on the OSG. More information is available at the OSG web site: www.opensciencegrid.org. © 2008 IOP Publishing Ltd.}, bibtype = {article}, author = {Pordes, R and Altunay, M and Avery, P and Bejan, A and Blackburn, K and Blatecky, A and Gardner, R and Kramer, B and Livny, M and McGee, J and Potekhin, M and Quick, R and Olson, D and Roy, A and Sehgal, C and Wenaus, T and Wilde, M and Würthwein, F}, doi = {10.1088/1742-6596/125/1/012070}, journal = {Journal of Physics: Conference Series} }
@inproceedings{ title = {Cyberinfrastructure resources for U.S. scholarship - The TeraGrid}, type = {inproceedings}, year = {2008}, id = {a2e9f955-033a-35f9-9c1b-2a681d4f6545}, created = {2018-02-27T18:07:29.925Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:29.925Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {The TeraGrid is an advanced cyberinfrastructure funded by the National Science Foundation. It offers computational, data, and storage resources to the U.S. scholarly community. With more than 870 teraflops of computing capability and more than 30 petabytes of online and archival storage, researchers across the nation can expand their research far beyond that allowed by their local computing facilities. The TeraGrid uses portals and Science Gateways to ease access to its facilities, allowing researchers to use the resources they need without having to become computer programmers. Access to the TeraGrid is allocated through a peerreview process, and is not limited to the sciences. Researchers in engineering, humanities, and the arts also use the TeraGrid successfully. Copyright 2008 ACM.}, bibtype = {inproceedings}, author = {Simms, S.C. and Stewart, C. and McCaulay, S.}, doi = {10.1145/1449956.1450057}, booktitle = {Proceedings ACM SIGUCCS User Services Conference} }
@book{ title = {TeraGrid: Analysis of organization, system architecture, and middleware enabling new types of applications}, type = {book}, year = {2008}, source = {Advances in Parallel Computing}, pages = {225-249}, volume = {16}, websites = {http://ebooks.iospress.nl/publication/26292}, id = {2af4c642-269e-392a-977b-42502e0a913b}, created = {2018-02-27T18:07:32.922Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-10-01T18:15:24.888Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {TeraGrid is a national-scale computational science facility supported through a partnership among thirteen institutions, with funding from the US National Science Foundation [1]. Initially created through a Major Research Equipment Facilities Construction (MREFC [2] ) award in 2001, the TeraGrid facility began providing production computing, storage, visualization, and data collections services to the national science, engineering, and education community in January 2004. In August 2005 NSF funded a five-year program to operate, enhance, and expand the capacity and capabilities of the TeraGrid facility to meet the growing needs of the science and engineering community through 2010. This paper describes TeraGrid in terms of the structures, architecture, technologies, and services that are used to provide national-scale, open cyberinfrastructure. The focus of the paper is specifically on the technology approach and use of middleware for the purposes of discussing the impact of such approaches on scientific use of computational infrastructure. While there are many individual science success stories, we do not focus on these in this paper. Similarly, there are many software tools and systems deployed in TeraGrid but our coverage is of the basic system middleware and is not meant to be exhaustive of all technology efforts within TeraGrid. We look in particular at growth and events during 2006 as the user population expanded dramatically and reached an initial 'tipping point' with respect to adoption of new 'grid' capabilities and usage modalities. © 2008 The authors and IOS Press.}, bibtype = {book}, author = {Catlett, C. and Allcock, W. E. and Andrews, P. and Aydt, R. and Bair, R. and Balac, N. and Banister, B. and Barker, T. and Bartelt, M. and Beckman, P. and Berman, F. and Bertoline, G. and Blatecky, A. and Boisseau, J. and Bottum, J. and Brunett, S. and Bunn, J. and Butler, M. and Carver, D. and Cobb, J. and Cockerill, T. and Couvares, P.F. and Dahan, M. and Diehl, D. and Dunning, T. and Foster, I. and Gaither, K. and Gannon, D. and Goasguen, S. and Grobe, M. and Hart, D. and Heinzel, M. and Hempel, C. and Huntoon, W. and Insley, J. and Jordan, C. and Judson, I. and Kamrath, A. and Karonis, N. and Kesselman, C. and Kovatch, P. and Lane, L. and Lathrop, S. and Levine, M. and Lifka, D. and Liming, L. and Livny, M. and Loft, R. and Marcusiu, D. and Marsteller, J. and Martin, S. and McCaulay, S. and McGee, J. and McGinnis, L. and McRobbie, M. and Messina, P. and Moore, R. and Moore, R. and Navarro, J.P. and Nichols, J. and Papka, M.E. and Pennington, R. and Pike, G. and Pool, J. and Reddy, R. and Reed, D. and Rimovsky, T. and Roberts, E. and Roskies, R. and Sanielevici, S. and Scott, J.R. and Shankar, A. and Sheddon, M. and Showerman, M. and Simmel, D. and Singer, A. and Skow, D. and Smallen, S. and Smith, W. and Song, C. and Stevens, R. and Stewart, C. and Stock, R.B. and Stone, N. and Towns, J. and Urban, T. and Vildibill, M. and Walker, E. and Welch, V. and Wilkins-Diehr, N. and Williams, R. and Winkler, L. and Zhao, L. and Zimmerman, A.} }
@inproceedings{ title = {Implementation of a distributed architecture for managing collection and dissemination of data for fetal alcohol spectrum disorders research}, type = {inproceedings}, year = {2007}, pages = {33-44}, volume = {4360 LNBI}, websites = {https://link.springer.com/chapter/10.1007/978-3-540-69968-2_4}, id = {956bd1e7-a1ec-31c4-8545-9fea7197f35d}, created = {2018-02-27T18:07:26.697Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-29T19:46:48.108Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {We implemented a distributed system for management of data for an international collaboration studying Fetal Alcohol Spectrum Disorders (FASD). Subject privacy was protected, researchers without dependable Internet access were accommodated, and researchers' data were shared globally. Data dictionaries codified the nature of the data being integrated, data compliance was assured through multiple consistency check s, and recovery systems provided a secure, robust, persistent repository. The system enabled new types of science to be done, using distributed technologies that are expedient for current needs while taking useful steps towards integrating the system in a future grid-based cyberinfrastructure. The distributed architecture, verification steps, and data dictionaries suggest general strategies for researchers involved in collaborative studies, particularly where data must be de-identified before being shared. The system met both the collaboration's needs and the NIH Roadmap's goal of wide access to databases that are robust and adaptable to researchers' needs. © Springer-Verlag Berlin Heidelberg 2007.}, bibtype = {inproceedings}, author = {Arenson, A. and Bakhireva, L. and Chambers, T. and Deximo, C. and Foroud, T. and Jacobson, J. and Jacobson, S. and Jones, K.L. and Mattson, S. and May, P. and Moore, E. and Ogle, K. and Riley, E. and Robinson, L. and Rogers, J. and Streissguth, A. and Tavares, M. and Urbanski, J. and Yezerets, H. and Stewart, C.A.}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@inproceedings{ title = {Empowering distributed workflow with the data capacitor: maximizing lustre performance across the wide area network}, type = {inproceedings}, year = {2007}, pages = {53-58}, publisher = {ACM}, id = {ed5f23b0-3c0b-3473-9281-3c16769615ec}, created = {2018-02-27T18:07:28.471Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.856Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Simms2007k}, source_type = {CONF}, private_publication = {false}, bibtype = {inproceedings}, author = {Simms, Stephen C and Pike, Gregory G and Teige, Scott and Hammond, Bret and Ma, Yu and Simms, Larry L and Westneat, C and Balog, Douglas A}, booktitle = {Proceedings of the 2007 workshop on Service-oriented computing performance: aspects, issues, and approaches} }
@inproceedings{ title = {I/O induced scalability limits of bioinformatics applications}, type = {inproceedings}, year = {2007}, keywords = {Analysis processes; Bioinformatics applications;,Applications; Bioinformatics; Computational comple,Turnaround time}, pages = {609-613}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-47649107840&doi=10.1109%2FBIBE.2007.4375623&partnerID=40&md5=d1ed9dbcdb7f6edcd910df282bebb07e}, city = {Boston, MA}, id = {df0f757d-27e4-3001-bc85-2db140570197}, created = {2018-02-27T18:07:31.531Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:31.531Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Henschel2007609}, source_type = {conference}, notes = {cited By 1; Conference of 7th IEEE International Conference on Bioinformatics and Bioengineering, BIBE ; Conference Date: 14 January 2007 Through 17 January 2007; Conference Code:72698}, private_publication = {false}, abstract = {The growing size of sequence, protein and other biological databases results in an increased computational complexity of the analysis process. Often parallelization is the only solution to limit the turnaround time within reasonable limits. Most scalability studies focus on the parallel algorithm and the resulting communication and synchronization patterns of the implementations. In this paper we examine to what extend I/O bottlenecks limit the scalability on current and future architectures. We study the behavior of two different bioinformatics applications (THREADER, HMMER) and show that these applications are representatives of two different classes with distinct I/O profiles and demands. ©2007 IEEE.}, bibtype = {inproceedings}, author = {Henschel, R and Müller, M S}, doi = {10.1109/BIBE.2007.4375623}, booktitle = {Proceedings of the 7th IEEE International Conference on Bioinformatics and Bioengineering, BIBE} }
@inproceedings{ title = {A multinational deployment of 3D laser scanning to study craniofacial dysmorphology in fetal alcohol spectrum disorders}, type = {inproceedings}, year = {2007}, keywords = {3D surface scanning; Craniofacial dysmorphology; F,Anthropometry; Diagnosis; Scanning; Three dimensi,Medical problems}, volume = {6491}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-34248994994&partnerID=40&md5=8741a5e2d5824d23e1397e28f3343fbc}, city = {San Jose, CA}, id = {4bdf4975-3f31-3f91-a1b9-bbd503246980}, created = {2018-02-27T18:07:32.725Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:32.725Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Rogers2007}, source_type = {conference}, notes = {cited By 0; Conference of Videometrics IX ; Conference Date: 29 January 2007 Through 30 January 2007; Conference Code:69635}, private_publication = {false}, abstract = {Craniofacial anthropometry (the measurement and analysis of head and face dimensions) has been used to assess and describe abnormal craniofacial variation (dysmorphology) and the facial phenotype in many medical syndromes. Traditionally, anthropometry measurements have been collected by the direct application of calipers and tape measures to the subject's head and face, and can suffer from inaccuracies due to restless subjects, erroneous landmark identification, clinician variability, and other forms of human error. Three-dimensional imaging technologies promise a more effective alternative that separates the acquisition and measurement phases to reduce these variabilities while also enabling novel measurements and longitudinal analysis of subjects. Indiana University (IU) is part of an international consortium of researchers studying fetal alcohol spectrum disorders (FASD). Fetal alcohol exposure results in predictable craniofacial dysmorphologies, and anthropometry has been proven to be an effective diagnosis tool for the condition. IU is leading a project to study the use of 3D surface scanning to acquire anthropometry data in order to more accurately diagnose FASD, especially in its milder forms. This paper describes our experiences in selecting, verifying, supporting, and coordinating a set of 3D scanning systems for use in collecting facial scans and anthropometric data from around the world. © 2007 SPIE-IS&T.}, bibtype = {inproceedings}, author = {Rogers, J and Wernert, E and Moore, E and Ward, R and Wetherill, L F and Foroud, T}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering} }
@article{ title = {High throughput image analysis on PetaFLOPS systems}, type = {article}, year = {2007}, keywords = {Computational complexity; Computer software; Infor,Computing systems; HPC systems; PetaFLOPS systems,Image analysis}, pages = {323-329}, volume = {4375 LNCS}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-38049150862&partnerID=40&md5=30940d182d77d46e70877db570bb217c}, city = {Dresden}, id = {98ff2a62-5a47-3d2f-b7eb-bb7558e7ba55}, created = {2018-02-27T18:07:32.839Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:32.839Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Henschel2007323}, source_type = {article}, notes = {cited By 0; Conference of 12th Euro-Par Conference on Petascale Computational Biology and Bioinformatics Workshop, CoreGRID 2006, UNICORE Summit 2006 ; Conference Date: 29 August 2006 Through 1 September 2006; Conference Code:71138}, private_publication = {false}, abstract = {Today's state of the art high-throughput screening facilities can produce tens of thousands of images of cells per day. Analyzing images from high-throughput screening experiments is very time consuming and computationally demanding. Researchers are currently limited not by the availability of experimental data, but by the computing resources for the image analysis. The Max Planck Institute of Molecular Cell Biology and Genetics Dresden, Germany, (MPI-CBG) and the Center for Information Services and High Performance Computing at the Technische Universität Dresden (ZIH) are working together to integrate high performance computing systems into the workflow of biologists. The MPI-CBG has developed software that biologists use for their image analysis work. The software can utilize local workstations and remote HPC systems for image analysis. Currently the software is used successfully on small clusters and PC-Farms. Most parts of the image analysis workflow of screening experiments can be performed in parallel and is ideal for distribution on large systems. With a few modifications and a new approach to data management, the software should be able to scale to PetaFLOPS systems. © Springer-Verlag Berlin Heidelberg 2007.}, bibtype = {article}, author = {Henschel, R and Müller, M and Kalaidzidis, Y}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)} }
@article{ title = {The open science grid}, type = {article}, year = {2007}, volume = {78}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-36049001139&doi=10.1088%2F1742-6596%2F78%2F1%2F012057&partnerID=40&md5=184ecff2d49962fc818455442358c616}, id = {67d93563-3757-3087-870c-27f6e49603c0}, created = {2018-02-27T18:07:33.415Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:33.415Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Pordes2007}, source_type = {article}, notes = {cited By 149}, private_publication = {false}, abstract = {The Open Science Grid (OSG) provides a distributed facility where the Consortium members provide guaranteed and opportunistic access to shared computing and storage resources. OSG provides support for and evolution of the infrastructure through activities that cover operations, security, software, troubleshooting, addition of new capabilities, and support for existing and engagement with new communities. The OSG SciDAC-2 project provides specific activities to manage and evolve the distributed infrastructure and support it's use. The innovative aspects of the project are the maintenance and performance of a collaborative (shared & common) petascale national facility over tens of autonomous computing sites, for many hundreds of users, transferring terabytes of data a day, executing tens of thousands of jobs a day, and providing robust and usable resources for scientific groups of all types and sizes. More information can be found at the OSG web site: www.opensciencegrid. org. © 2007 IOP Publishing Ltd.}, bibtype = {article}, author = {Pordes, R and Petravick, D and Kramer, B and Olson, D and Livny, M and Roy, A and Avery, P and Blackburn, K and Wenaus, T and Würthwein, F and Foster, I and Gardner, R and Wilde, M and Blatecky, A and McGee, J and Quick, R}, doi = {10.1088/1742-6596/78/1/012057}, journal = {Journal of Physics: Conference Series}, number = {1} }
@techreport{ title = {Wide area filesystem performance using lustre on the teragrid}, type = {techreport}, year = {2007}, id = {2cb249a6-a389-3193-95f3-efc813bfa8bd}, created = {2018-02-27T18:07:33.432Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.797Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Simms2007m}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Simms, Stephen C and Pike, Gregory G and Balog, Douglas} }
@techreport{ title = {Indiana University’s SC|07 Bandwidth Challenge award-winning project: Using the Data Capacitor for Remote Data Collection, Analysis, and Visualization}, type = {techreport}, year = {2007}, keywords = {bandwidth challenge,dc,lustre,supercomputing,wan}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/14615}, id = {3a1e1e79-338f-3b5a-b7ee-3bedf1997317}, created = {2020-09-10T23:23:22.854Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T23:23:22.854Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {In 2006, Indiana University led a team that received an honorable mention in the SC06 bandwidth challenge. The following year, IU expanded its team to include representatives of Technische Universität Dresden (TUD) in Germany and the Rochester Institute of Technology (RIT) in New York. The title of the 2007 project was “Using the Data Capacitor for Remote Data Collection, Analysis, and Visualization.” We believe that distributed workflows represent an important category of scientific application workflows that make possible new and more rapid discoveries using grids and distributed workflow tools. We believe that short-term storage systems have a particularly important role to play in distributed workflows. The IU Data Capacitor is a 535 TB distributed object store file system constructed for short- to mid-term storage of large research data sets.}, bibtype = {techreport}, author = {Simms, Stephen C. and Davy, Matthew and Hammond, C. Bret and Link, Matthew R. and Stewart, Craig A. and Teige, Scott and Baik, Mu-Hyun and Mantri, Yogita and Lord, Richard and McMullen, D.F. (Rick) and Huffman, John C. and Huffman, Kia and Juckeland, Guido and Kluge, Michael and Henschel, Robert and Brunst, Holger and Knuepfer, Andreas and Mueller, Matthias and Mukund, P.R. and Elble, Andrew and Pasupuleti, Ajay and Bohn, Richard and Das, Sripriya and Stefano, James and Pike, Gregory G. and Balog, Douglas A.} }
@techreport{ title = {Report of the Indiana University Research Data Management Taskforce}, type = {techreport}, year = {2007}, keywords = {DLP,Informatics,OAIS,UITS,Working Paper,archive,cyberinfrastructure,data,deluge,digital libraries,preservation,repository,research}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/3294}, id = {d995bc26-b7ca-3374-98d5-19ddd9e278f9}, created = {2020-09-10T23:39:02.575Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-10T23:39:02.575Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {The “data deluge” in the sciences—the ability to create massive streams of digital data—has been discussed at great length in the academic and lay press. The ability with which scientists can now produce data has transformed scientific practice so that creating data is now less of a challenge in many disciplines than making use of, properly analyzing, and properly storing such data. Two aspects of the data deluge are not as widely appreciated. One is that the data deluge is not contained simply to the sciences. Humanities scholars and artists are generating data at prodigious rates as well through massive scanning projects, digitization of still photographs, video, and music, and the creation of new musical and visual art forms that are inherently digital. A second factor that is not well appreciated is that data collected now is potentially valuable forever. The genomic DNA sequences of a particular organism are what they are. They are known precisely. Or, more properly, the sequences of the contigs that are assembled to create the sequence are known precisely, while there may be dispute about the proper assembly. Such data will be of value indefinitely – and for example to the extent that we wonder if environmental changes are changing the population genetics of various organisms, data on the frequency of particular genetic variations in populations will be of value indefinitely. Similarly, video and audio of an American folk musician, a speaker of an endangered language or a ballet performance will be of value indefinitely although argument might well go on regarding the interpretation and annotation of that video and audio. Such images and associated audio can never be recreated, and are thus of use indefinitely.}, bibtype = {techreport}, author = {} }
@inproceedings{ title = {Progress towards petascale applications in biology: Status in 2006}, type = {inproceedings}, year = {2006}, pages = {289-303}, websites = {http://hdl.handle.net/2022/1829}, publisher = {Springer}, id = {ea354a16-49ba-3abb-a301-c6d0e73b486c}, created = {2018-02-27T18:07:25.456Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-29T19:46:48.111Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2006m}, source_type = {CONF}, private_publication = {false}, abstract = {Petascale computing is currently a common topic of discussion in the high performance computing community. Biological applications, particularly protein folding, are often given as examples of the need for petascale computing. There are at present biological applications that scale to execution rates of approximately 55 teraflops on a special-purpose supercomputer and 2.2 teraflops on a general-purpose supercomputer. In comparison, Qbox, a molecular dynamics code used to model metals, has an achieved performance of 207.3 teraflops. It may be useful to increase the extent to which operation rates and total calculations are reported in discussion of biological applications, and use total operations (integer and floating point combined) rather than (or in addition to) floating point operations as the unit of measure. Increased reporting of such metrics will enable better tracking of progress as the research community strives for the insights that will be enabled by petascale computing.}, bibtype = {inproceedings}, author = {Stewart, Craig A and Müller, Matthias S and Lingwall, Malinda}, booktitle = {Euro-Par 2006 Workshops} }
@article{ title = {Template-based isocontouring}, type = {article}, year = {2006}, pages = {187-204}, volume = {6}, publisher = {World Scientific}, id = {fc036046-27e4-360f-8943-4ba68cb3be5a}, created = {2018-02-27T18:07:25.751Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.327Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Lakshmipathy2006}, source_type = {JOUR}, private_publication = {false}, bibtype = {article}, author = {Lakshmipathy, Jagannathan and Nowinski, Wieslaw L and Wernert, Eric A}, journal = {International Journal of Image and Graphics}, number = {02} }
@inproceedings{ title = {Research data storage available to researchers throughout the US via the TeraGrid}, type = {inproceedings}, year = {2006}, pages = {231-234}, websites = {http://portal.acm.org/citation.cfm?doid=1181216.1181268,preprint:,http://hdl.handle.net/2022/http://hdl.handle.net/2022/14747}, publisher = {ACM}, city = {Edmonton, Alberta, Canada}, id = {8efd0f2e-f556-329f-aef2-319e862bc7c6}, created = {2018-02-27T18:07:27.893Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:27.893Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {inproceedings}, private_publication = {false}, bibtype = {inproceedings}, author = {McCaulay, Scott D and Link, Matthew R}, doi = {10.1145/1181216.1181268}, booktitle = {The 34th annual ACM SIGUCCS fall conference} }
@inproceedings{ title = {Navigation techniques for large-scale astronomical exploration}, type = {inproceedings}, year = {2006}, keywords = {Astronomy,Computer simulation; Mathematical models; Navigati,Exponential zooming; Interaction techniques; Real}, volume = {6060}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-33645668978&doi=10.1117%2F12.648287&partnerID=40&md5=9dad7c2cf0da0628d5a7639f4f98252c}, city = {San Jose, CA}, id = {a032f767-416d-3384-b721-aa898e13f646}, created = {2018-02-27T18:07:31.283Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:31.283Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Fu2006}, source_type = {conference}, notes = {cited By 1; Conference of Visualization and Data Analysis 2006 ; Conference Date: 16 January 2006 Through 17 January 2006; Conference Code:66996}, private_publication = {false}, abstract = {Navigating effectively in virtual environments at human scales is a difficult problem. However, it is even more difficult to navigate in large-scale virtual environments such as those simulating the physical Universe; the huge spatial range of astronomical simulations and the dominance of empty space make it hard for users to acquire reliable spatial knowledge of astronomical contexts. This paper introduces a careful combination of navigation and visualization techniques to resolve the unique problems of large-scale real-time exploration in terms of travel and wayfinding. For large-scale travel, spatial scaling techniques and constrained navigation manifold methods are adapted to the large spatial scales of the virtual Universe. We facilitate large-scale wayfinding and context awareness using visual cues such as power-of-10 reference cubes, continuous exponential zooming into points of interest, and a scalable world-in-miniature (WIM) map. These methods enable more effective exploration and assist with accurate context-model building, thus leading to improved understanding of virtual worlds in the context of large-scale astronomy. © 2006 SPIE-IS&T.}, bibtype = {inproceedings}, author = {Fu, C.-W. and Hanson, A J and Wernert, E A}, doi = {10.1117/12.648287}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering} }
@techreport{ title = {SCI: ETF Early Operations-Indiana University}, type = {techreport}, year = {2006}, id = {1096f466-33a1-3984-82f4-03cab5f63dda}, created = {2018-02-27T18:07:31.316Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.445Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2006n}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Voss, Brian D and McRobbie, Michael A and Shankar, Anurag and Simms, Stephen and McCaulay, D Scott} }
@inproceedings{ title = {Powerful New Research Computing System Available Via the TeraGrid}, type = {inproceedings}, year = {2006}, keywords = {mcc2006pow-n75}, websites = {http://hdl.handle.net/2022/13994}, city = {Tampa, FL}, id = {8f6bbeb7-e24e-35c5-b981-85e5a6f5c85c}, created = {2018-02-27T18:07:32.271Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:32.271Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {inproceedings}, private_publication = {false}, bibtype = {inproceedings}, author = {McCaulay, Scott D and Link, Matthew R and Turner, George and Hancock, David Y and Morris, M and Stewart, Craig A}, booktitle = {IEEE/ACM 2006 Supercomputing Conference} }
@inbook{ type = {inbook}, year = {2006}, keywords = {BLAS,Geographically distributed cluster,LINPACK benchmark,Performance model,Performance tuning}, pages = {511-522}, websites = {http://doi.wiley.com/10.1002/0471732710.ch26}, month = {1}, publisher = {John Wiley & Sons, Inc.}, day = {23}, city = {Hoboken, NJ, USA}, id = {e3f3343c-eb28-35ef-b134-fa3d64012f92}, created = {2020-09-09T16:27:24.046Z}, accessed = {2020-09-09}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T17:17:52.380Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Indiana University's AVIDD (Analysis and Visualization of Instrument‐Driven Data) facility is the only geographically distributed cluster on the Top 500 list. It ranks 50th by June of 2003, achieving over 1TFlops of LINPACK performance. In this chapter, AVIDDs hardware and software setup is introduced, and our experience of performance tuning and benchmarking under the guidance of existing LINPACK performance model is reported. In the end, the advantages of this distributed cluster building approach are discussed based on the performance measurements.}, bibtype = {inbook}, author = {Wang, Peng and Turner, George and Simms, Steven and Hart, Dave and Papakhian, Mary and Stewart, Craig}, doi = {10.1002/0471732710.ch26}, chapter = {One Teraflop Achieved with a Geographically Distributed Linux Cluster}, title = {High-Performance Computing} }
@techreport{ title = {Indiana University Life Sciences Strategic Plan (Version 1.01)}, type = {techreport}, year = {2006}, keywords = {Biology,Cancer,Chemistry,Collaboration,Economic Development,IRSC,Informatics,Neurosciences,Planning Documents,Research}, websites = {http://institutionalmemory.iu.edu/aim/handle/10333/391}, month = {1}, publisher = {Indiana University}, day = {24}, id = {7a03bfe6-7f78-36a5-bf4d-507b913ef653}, created = {2020-09-11T00:15:26.442Z}, accessed = {2020-09-10}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-11T00:15:26.442Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {This document was created by a drafting committee consisting of the following (listed in alphabetical order): D. Craig Brater, M.D., Vice President for Life Sciences, and Dean, Indiana University School of Medicine; Michael A. McRobbie, Ph.D., Provost and Vice President for Academic Affairs (Interim); Ora H. Pescovitz, M.D., Executive Associate Dean, IU School of Medicine, President and CEO of Riley Hospital for Children; Craig A. Stewart, Ph.D., Associate Vice President for Research and Academic Computing and Chief Operating Officer, Pervasive Technology Labs; Kumble R. Subbaswamy, Ph.D., Dean, College of Arts and Sciences.}, bibtype = {techreport}, author = {Indiana University Life Sciences Strategic Plan Drafting Committee, undefined} }
@inproceedings{ title = {PViN}, type = {inproceedings}, year = {2005}, keywords = {Computer software,Context free grammars,Database systems,Hereditary diseases,Informa,Information visualization,Pe}, pages = {115}, volume = {1}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-33644506106&doi=10.1145%2F1066677.1066709&partnerID=40&md5=d583a3582db70e1f2f2ed3e7e36738fa,http://portal.acm.org/citation.cfm?doid=1066677.1066709}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {6be64e6c-a8dd-35d6-9d69-561f90cca94b}, created = {2018-02-27T18:07:27.493Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T19:31:23.590Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Wernert2005115}, source_type = {conference}, notes = {cited By 4; Conference of 20th Annual ACM Symposium on Applied Computing ; Conference Date: 13 March 2005 Through 17 March 2005; Conference Code:66730}, private_publication = {false}, abstract = {We describe the design and implementation of PViN (Pedigree Visualization and Navigation), a scalable and flexible software system that enables the visualization, analysis, and printing of hierarchical relations typically stored in relational databases. Although the concept of visualizing and printing pedigree databases is not new, we have developed a novel implementation based on modern approaches for several important reasons: (1) Our university's center of hereditary diseases has accumulated very large amounts of hereditary information from various populations for ongoing research projects, and has difficulty managing and effectively printing the associated pedigree trees with legacy FORTRAN software; (2) The size of some of these databases (over 40,000 entries covering seven generations) is too large for existing commercial pedigree software to handle; and (3) Our researchers and support staff need more effective ways to perform visual analysis tasks, such as the comparison of multiple pedigrees and the cross-referencing of individuals that appear in multiple families (through re-marriage.) The PViN system addresses these fundamental problems while also providing a number of additional features and functions, including: context-free drawing routines that enable rendering onto screen and printer contexts interchangeably; a generic framework that allows the system to interface with multiple databases and database servers; a multiple view user interface that provides side-by-side comparisons and "focus+context" rendering; and advanced node searching and cross-referencing capabilities. Copyright 2005 ACM.}, bibtype = {inproceedings}, author = {Wernert, Eric A and Lakshmipathy, Jagannathan}, doi = {10.1145/1066677.1066709}, booktitle = {Proceedings of the 2005 ACM symposium on Applied computing - SAC '05} }
@inproceedings{ title = {PubsOnline: Open source bibliography database}, type = {inproceedings}, year = {2005}, pages = {247-249}, id = {befd663c-6e5a-3226-abfc-d00cd7f9f7fa}, created = {2018-02-27T18:07:29.795Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-29T19:46:48.285Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Myron2005a}, private_publication = {false}, abstract = {Universities and colleges, departments within universities and colleges, and individual researchers often desire the ability to provide online listings, via the Web, of citations to publications and other forms of information dissemination. Cataloging citations to publications or other forms of information dissemination by a particular organization facilitates access to the information, its use, and citation in subsequent publications. Listing, searching, and indexing of citations is further improved when citations can be searched on by additional key information, such as by grant, university resource, or research lab.This paper describes PubsOnline, an open source tool for management and presentation of databases of citations via the Web. Citations with bibliographic information are kept in the database and associated with attributes that are grouped by category and usable as search keys. Citations may optionally be linked to files containing an entire article. PubsOnline was developed with PHP and MySQL, and may be downloaded from http://pubsonline. indiana.edu/. Copyright 2005 ACM.}, bibtype = {inproceedings}, author = {Myron, S.A. and Knepper, R. and Link, Matthew R and Stewart, C.}, doi = {10.1145/1099435.1099492}, booktitle = {Proceedings of the 33rd annual ACM SIGUCCS conference on User services (SIGUCCS '05)} }
@inproceedings{ title = {The john-e-box: fostering innovation, inclusion, and collaboration through accessible advanced visualization}, type = {inproceedings}, year = {2005}, pages = {64}, publisher = {Association for Computing Machinery (ACM)}, id = {7d073b26-0650-31fe-9626-9a2289a85079}, created = {2019-09-12T19:27:24.149Z}, accessed = {2019-09-12}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T19:27:24.219Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Recent advances in commodity graphics and projection hardware have motivated many notable research projects and community discussions about the potential of these technologies to make advanced visualization more broadly accessible. However, the actual realization of this promise on a significant scale is challenging, requiring strong institutional commitment, expert technical support, and a broader visualization context. This paper describes an ongoing effort at Indiana University (IU) to develop a commodity-based, large-format, 3D stereo display system and to deploy a collection of such systems to a range of classrooms, laboratories, galleries, and learning environments throughout the IU system and the State of Indiana. To date, these systems have been used in over 30 projects by investigators in 15 departments across four different IU campuses. In addition, this technology has been used to reach well over 3,000 individuals through a series of coordinated outreach efforts. This initiative is also notable for fostering new interpersonal collaborations and inter-departmental cooperation, for enabling non-traditional applications in education and artistic expression, and for providing an interface to other advanced information technology efforts.}, bibtype = {inproceedings}, author = {Wernert, Eric and Boyles, Mike and Huffman, John N. and Rogers, Jeff and Huffman, John C. and Stewart, Craig}, doi = {10.1145/1095242.1095269}, booktitle = {Proceedings of the 2005 conference on Diversity in computing (TAPIA '05)} }
@inproceedings{ title = {A novel approach to extract triangle strips for iso-surfaces in volumes}, type = {inproceedings}, year = {2004}, keywords = {Algorithms,Computation theory,Computer graphics,Iso-surface,Isovalue,Marching Cubes,Optimization,Topo,Triangle s}, pages = {239-245}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-10044294982&partnerID=40&md5=27ca90eef0e4f8e4d2a64ccd99c0938d,http://portal.acm.org/citation.cfm?doid=1044588.1044639}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {6662b30c-8768-36b5-9c5d-2f1e8eda03df}, created = {2018-02-27T18:07:30.013Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T19:31:23.602Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Lakshmipathy2004239}, source_type = {conference}, notes = {cited By 0; Conference of Proceedings VRCAI 2004 - ACM SIGGRAPH International Conference on Virtual Reality Continuum and its Applications in Industry ; Conference Date: 16 June 2004 Through 18 June 2004; Conference Code:63879}, private_publication = {false}, abstract = {The Marching Cubes (MC) algorithm is a popular approach to extract iso-surfaces from volumetric data. This approach extracts triangles from the volume data for a specific iso-value using a table lookup approach. The lookup entry in the MC is a name-value pair, where the name is a number that uniquely identifies a cube topology and the value is the set of triangles for that topology. The MC applies a divide-and-conquer strategy by sub-dividing the volume into cubes with voxels at each corner of the cube and processes these cubes in a specific order. Thus, for a user specified iso-value, the MC looks up triangles for each cube and thereby generates the whole iso-surface. Most modern graphics hardware renders triangles faster if they are rendered collectively as triangle strips as opposed to individual triangles. Therefore, in this paper we have modified the MC lookup table approach such that the name is the cube topology and the value is a sub-surface piece(s) and its face-index representation. At the time of extraction we tessellate the sub-surface pieces by considering the pieces in the neighboring cubes using the face-index representation and then triangulate these tessellated subsurface pieces into triangle strips. Our approach is superior to the existing approaches. Its features include: (1) simplicity, (2) procedural triangulation which avoids painful pre-computation, and (3) face-index representation of surface pieces that enables an efficient connection mechanism.}, bibtype = {inproceedings}, author = {Lakshmipathy, Jagannathan and Nowinski, Wieslaw L and Wernert, Eric A}, doi = {10.1145/1044588.1044639}, booktitle = {Proceedings of the 2004 ACM SIGGRAPH international conference on Virtual Reality continuum and its applications in industry - VRCAI '04} }
@book{ title = {Parallel computing in biomedical research and the search for peta-scale biomedical applications}, type = {book}, year = {2004}, source = {Advances in Parallel Computing}, volume = {13}, issue = {C}, id = {905e4cb4-1872-3b55-b12c-08f5b2f9e835}, created = {2018-02-27T18:07:33.983Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.104Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2004o}, private_publication = {false}, bibtype = {book}, author = {Stewart, C.A. and Hart, D. and Sheppard, R.W. and Li, H. and Cruise, R. and Moskvin, V. and Papiez, L.}, doi = {10.1016/S0927-5452(04)80088-1} }
@article{ title = {Bioinformatics: transforming biomedical research and medical care}, type = {article}, year = {2004}, pages = {30}, volume = {47}, websites = {http://portal.acm.org/citation.cfm?doid=1029496.1029522}, month = {11}, day = {1}, id = {0f751a62-d8b8-35e9-ade9-cc22edc72235}, created = {2019-08-29T19:46:29.512Z}, accessed = {2019-08-29}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-08-29T19:46:29.662Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, bibtype = {article}, author = {Stewart, Craig A.}, doi = {10.1145/1029496.1029522}, journal = {Communications of the ACM}, number = {11} }
@inproceedings{ title = {The Grid2003 production grid: principles and practice}, type = {inproceedings}, year = {2004}, pages = {236-245}, websites = {http://ieeexplore.ieee.org/document/1323544/}, publisher = {IEEE}, id = {56f34fdf-66f8-3fe2-9342-eb8f9b014582}, created = {2019-09-19T16:29:01.344Z}, accessed = {2019-09-19}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-19T16:33:56.331Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {The Grid2003 Project has deployed a multi-virtual organization, application-driven grid laboratory ("GridS") that has sustained for several months the production-level services required by physics experiments of the Large Hadron Collider at CERN (ATLAS and CMS), the Sloan Digital Sky Survey project, the gravitational wave search experiment LIGO, the BTeV experiment at Fermilab, as well as applications in molecular structure analysis and genome analysis, and computer science research projects in such areas as job and data scheduling. The deployed infrastructure has been operating since November 2003 with 27 sites, a peak of 2800 processors, work loads from 10 different applications exceeding 1300 simultaneous jobs, and data transfers among sites of greater than 2 TB/day. We describe the principles that have guided the development of this unique infrastructure and the practical experiences that have resulted from its creation and use. We discuss application requirements for grid services deployment and configuration, monitoring infrastructure, application performance, metrics, and operational experiences. We also summarize lessons learned.}, bibtype = {inproceedings}, author = {Foster, I. and Gieraltowski, J. and Gose, S. and Maltsev, N. and May, E. and Rodriguez, A. and Sulakhe, D. and Vaniachine, A. and Green, M. and Miller, R. and Letts, J. and Martin, T. and Shank, J. and Youssef, S. and Bury, D. and Dumitrescu, C. and Engh, D. and Gardner, R. and Adams, D. and Baker, R. and Deng, W. and Smith, J. and Yu, D. and Mambelli, M. and Smirnov, Y. and Voeckler, J. and Wilde, M. and Zhao, Y. and Zhao, X. and Legrand, I. and Singh, S. and Steenberg, C. and Xia, Y. and Avery, P. and Cavanaugh, R. and Kim, B. and Prescott, C. and Rodriguez, J. and Zahn, A. and Afaq, A. and Berman, E. and Annis, J. and Bauerdick, L.A.T. and Ernst, M. and Fisk, I. and Giacchetti, L. and Graham, G. and Heavey, A. and Kaiser, J. and McKee, S. and Kuropatkin, N. and Pordes, R. and Sekhri, V. and Weigand, J. and Wu, Y. and Jordan, C. and Prewett, J. and Thomas, T. and Baker, K. and Sorrillo, L. and Severini, H. and Huth, J. and Clifford, B. and Deelman, E. and Flon, L. and Kesselman, C. and Mehta, G. and Olomu, N. and Vahi, K. and Allen, M. and Grundhoefer, L. and Hicks, J. and Luehring, F. and Peck, S. and Quick, R. and Simms, S. and De, K. and McGuigan, P. and Sosebee, M. and Fekete, G. and VandenBerg, J. and Bradley, D. and Couvares, P. and De Smet, A. and Kireyev, C. and Paulson, E. and Roy, A. and Cho, K. and Kwon, K. and Son, D. and Park, H. and Koranda, S. and Moe, B. and Canon, S. and Jackson, K. and Konerding, D.E. and Lee, J. and Olson, D. and Sakrejda, I. and Tierney, B. and Brown, B. and Sheldon, P.}, doi = {10.1109/HPDC.2004.1323544}, booktitle = {Proceedings. 13th IEEE International Symposium on High performance Distributed Computing (HPDC 2004)} }
@inproceedings{ title = {A global grid for analysis of arthropod evolution}, type = {inproceedings}, year = {2004}, pages = {328-337}, id = {c5741518-a3ea-35eb-b425-13dc07eef9aa}, created = {2020-09-09T16:21:03.720Z}, accessed = {2020-09-09}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T16:21:03.830Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Maximum likelihood analysis is a powerful technique for inferring evolutionary histories from genetic sequence data. During the fall of 2003, an international team of computer scientists, biologists, and computer centers created a global grid to analyze the evolution of hexapods (arthropods with six legs). We created a global grid of computers using systems located in eight countries, spread across six continents (every continent but Antarctica). This work was done as part of the SC03 HPC Challenge, and this project was given an HPC Challenge award for the "Most Distributed Application." More importantly, the creation of this computing grid enabled investigation of important questions regarding the evolution of arthropods - research that would not have otherwise been undertaken. Grid computing will thus lead directly to new scientific insights. © 2004 IEEE.}, bibtype = {inproceedings}, author = {Stewart, Craig A. and Keller, Rainer and Repasky, Richard and Hess, Matthias and Hart, David and Müller, Matthias and Sheppard, Ray and Wössner, Uwe and Aumüller, Martin and Li, Huian and Berry, Donald K. and Colbourne, John}, doi = {10.1109/GRID.2004.1}, booktitle = {Proceedings - IEEE/ACM International Workshop on Grid Computing} }
@techreport{ title = {University Information Technology Services' Advanced IT Facilities: The least every researcher needs to know}, type = {techreport}, year = {2003}, id = {d6f1d90a-d011-359c-a30f-89ae353377cb}, created = {2018-02-27T18:07:25.197Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.533Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Cruise2003a}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Cruise, Robert and Hart, David and Papakhian, Mary and Repasky, Richard and Samuel, John and Shankar, Anurag and Stewart, Craig A and Wernert, Eric} }
@article{ title = {Interactive Poster: Tree3D–A System for Temporal and Comparative Analysis of Phylogenetic Trees}, type = {article}, year = {2003}, pages = {114}, id = {4bdbd479-0cde-3efe-818b-7528484bf157}, created = {2018-02-27T18:07:27.551Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.946Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Wernert2003}, source_type = {JOUR}, private_publication = {false}, bibtype = {article}, author = {Wernert, Eric A and Berry, Donald K and Huffman, John N and Stewart, Craig A}, journal = {POSTER COMPENDIUM} }
@techreport{ title = {2003 Report on Indiana University Accomplishments supported by Shared University Research Grants from IBM, Inc.}, type = {techreport}, year = {2003}, id = {637c0159-d30b-34e6-be18-338e5f7d2ff0}, created = {2018-02-27T18:07:33.247Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.938Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2003o}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Papakhian, Mary and Hart, David and Shankar, Anurag and Arenson, Andrew and McMullen, D F and Palakal, Mathew and Dalkilic, Mehmet and Ortoleva, Peter} }
@article{ title = {3DIVE: An immersive environment for interactive volume data exploration}, type = {article}, year = {2003}, keywords = {Animation; Computer simulation; Computer software;,Data reduction,Interactive volume data exploration; Texture mapp}, pages = {41-47}, volume = {18}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-0037261297&partnerID=40&md5=64cbb409a6af985ef3ed7fbb4bd48c1a}, id = {1678221f-1e4a-39cd-b9dd-2a8ca606d10f}, created = {2018-02-27T18:07:33.320Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:33.320Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Boyles200341}, source_type = {article}, notes = {cited By 2}, private_publication = {false}, abstract = {This paper describes an immersive system, called 3DIVE, for interactive volume data visualization and exploration inside the CAVE virtual environment. Combining interactive volume rendering and virtual reality provides a natural immersive environment for volumetric data visualization. More advanced data exploration operations, such as object level data manipulation, simulation and analysis, are supported in 3DIVE by several new techniques. In particular, volume primitives and texture regions are used for the rendering, manipulation, and collision detection of volumetric objects; and the region-based rendering pipeline is integrated with 3D image filters to provide an image-based mechanism for interactive transfer function design. The system has been recently released as public domain software for CAVE/ImmersaDesk users, and is currently being actively used by various scientific and biomedical visualization projects.}, bibtype = {article}, author = {Boyles, M and Fang, S F}, journal = {Journal of Computer Science and Technology}, number = {1} }
@inproceedings{ title = {Advanced information technology support for life sciences research}, type = {inproceedings}, year = {2003}, pages = {7-9}, websites = {http://portal.acm.org/citation.cfm?doid=947469.947472}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {69862249-12f8-323f-b640-a294c71eb469}, created = {2019-09-12T19:31:14.282Z}, accessed = {2019-09-12}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T19:33:59.757Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {The revolution in life sciences research brought about by the sequencing of the human genome creates new challenges for scientists and new opportunities for computing support organizations. This may involve significant shifts in computing support strategies, particularly as regards interacting with life sciences researchers who maintain a medical practice. This paper describes Indiana University's experience in a large-scale initiative in supporting life sciences research, as well as several strategies and suggestions relevant to colleges and universities of any size. Computing organizations and support professionals have many opportunities to facilitate and accelerate life sciences research.}, bibtype = {inproceedings}, author = {Stewart, Craig A. and Hart, David and Shankar, Anurag and Wernert, Eric and Repasky, Richard and Papakhian, Mary and Arenson, Andrew D. and Bernbom, Gerry}, doi = {10.1145/947469.947472}, booktitle = {Proceedings of the 31st annual ACM SIGUCCS conference on User services - SIGUCCS '03} }
@article{ title = {Case study: Constructing the solar journey}, type = {article}, year = {2002}, id = {c6675484-377b-36ca-993d-72356fa248c9}, created = {2018-02-27T18:07:27.094Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.102Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Hanson2002}, source_type = {JOUR}, private_publication = {false}, bibtype = {article}, author = {Hanson, A and Fu, C and Wernert, E and Frisch, P}, journal = {Preprint} }
@inproceedings{ title = {Gauging IT support strategies: User needs then and now}, type = {inproceedings}, year = {2002}, id = {7c561d39-8556-3f3d-bf92-70b28747b83e}, created = {2018-02-27T18:07:27.934Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:14.737Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Samuel2002b}, private_publication = {false}, abstract = {Rapid changes in the IT field have placed a burden on educational institutions, which must decide how best to provide computing support for instruction and research. Institutions must determine the ideal balance of IT support with other educational needs in the academic community. Access to information technology is no longer a privilege held by a select few groups: instead, it has become a necessity in the academic environment. Since the late 1980's, computing support strategies at Indiana University have changed drastically to accommodate the changes in users' needs. In order to keep up with these changes, the University solicits input directly from users, allowing it to apply its scant and valuable computing support resources to where they are most needed. The annual IT survey conducted by the information technology organization has proven an invaluable tool in helping the University maintain one of the best academic computing support environments in the nation. Giving the end users an opportunity to provide input on what changes are needed in the University's IT environment has played a key role in the success of the support mechanisms. In addition, responses to the survey give an accurate picture of current needs and allow better projection of future needs of users. This allows the University to better serve its academic computing community without over-allocating resources that would be more useful elsewhere in the academic environment.}, bibtype = {inproceedings}, author = {Samuel, J.V. and Peebles, C.S. and Noguchi, T. and Stewart, C.A.}, booktitle = {Proceedings ACM SIGUCCS User Services Conference} }
@inproceedings{ title = {Getting More for Less: A Software Distribution Model}, type = {inproceedings}, year = {2002}, id = {337c03a7-86c6-3f61-8868-f75c5ff51823}, created = {2018-02-27T18:07:29.039Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.691Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Samuel2002c}, source_type = {CONF}, private_publication = {false}, bibtype = {inproceedings}, author = {Samuel, John V and Wilhite, Kevin J and Stewart, Craig A}, booktitle = {Educause Conference in Atlanta, Georgia} }
@inproceedings{ title = {Transforming support: From helpdesk to information center}, type = {inproceedings}, year = {2002}, pages = {272-274}, id = {051d30ae-f79f-3a09-88b1-7520abf79323}, created = {2018-02-27T18:07:31.361Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:31.361Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, source_type = {inproceedings}, private_publication = {false}, bibtype = {inproceedings}, author = {Link, Matthew R}, booktitle = {Proceedings ACM SIGUCCS User Services Conference} }
@techreport{ title = {INGEN's advanced IT facilities: The least you need to know}, type = {techreport}, year = {2002}, id = {1d5f940a-7446-36a2-b933-432da93cb8ec}, created = {2018-02-27T18:07:33.868Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:13.168Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Cruise2002a}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Cruise, Robert and Hart, David and Papakhian, Mary and Repasky, Richard and Samuel, John and Shankar, Anurag and Stewart, Craig A and Wernert, Eric} }
@inproceedings{ title = {Parallel implementation and performance of fastDNAml}, type = {inproceedings}, year = {2001}, pages = {20-20}, websites = {http://portal.acm.org/citation.cfm?doid=582034.582054}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {9f256222-3ea4-344a-b90b-d3362a7a49fa}, created = {2018-02-27T18:07:30.011Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T20:15:52.817Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2001f}, source_type = {CONF}, private_publication = {false}, abstract = {This paper describes the parallel implementation of fastDNAml, a program for the maximum likelihood inference of phylogenetic trees from DNA sequence data. Mathematical means of inferring phylogenetic trees have been made possible by the wealth of DNA data now available. Maximum likelihood analysis of phylogenetic trees is extremely computationally intensive. Availability of computer resources is a key factor limiting use of such analyses. fastDNAml is implemented in serial, PVM, and MPI versions, and may be modified to use other message passing libraries in the future. We have developed a viewer for comparing phylogenies. We tested the scaling behavior of fastDNAml on an IBM RS/6000 SP up to 64 processors. The parallel version of fastDNAml is one of very few computational phylogenetics codes that scale well. fastDNAml is available for download as source code or compiled for Linux or AIX.}, bibtype = {inproceedings}, author = {Stewart, Craig A and Hart, David and Berry, Donald K and Olsen, Gary J and Wernert, Eric A and Fischer, William}, doi = {10.1145/582034.582054}, booktitle = {Proceedings of the 2001 ACM/IEEE conference on Supercomputing (CDROM) - Supercomputing '01} }
@techreport{ title = {Measuring quality, cost, and value of IT services}, type = {techreport}, year = {2001}, source = {Annual Quality Congress Transactions}, id = {10f6a21c-ebdf-38dd-bf26-51a6b84c7d64}, created = {2018-02-27T18:07:33.546Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:33.546Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {For the last decade, University Information Technology Services (UITS) at Indiana University has measured the satisfaction of its customers - students, faculty, and staff- with the IT services its members produced for the university community. It has used the results of these surveys to improve the range and quality of services it offers. For the last five years Activity Based Costing measures have been applied to all IT services produced by UITS. Through major organizational realignment, profound cultural changes, and the rapid evolution in hardware, software, and network technologies, UITS has pursued quality improvement, process improvement, and implementation of the Balanced Scorecard family of measures. We discuss the journey thus far with special reference to the ways in which support services are critical to the realization of full value of IT services by our customers.}, bibtype = {techreport}, author = {Peebles, C.S. and Voss, B.D. and Stewart, C.A. and Workman, S.B.} }
@inproceedings{ title = {High performance computing - Delivering valuable and valued services at colleges and universities}, type = {inproceedings}, year = {2001}, pages = {266}, websites = {http://portal.acm.org/citation.cfm?doid=500956.501026}, publisher = {ACM Press}, city = {New York, New York, USA}, id = {6d1ad281-ca5f-38f5-8ec9-d8686944c304}, created = {2019-09-12T19:55:17.765Z}, accessed = {2019-09-12}, file_attached = {true}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2019-09-12T19:55:17.834Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, abstract = {Supercomputers were once regarded as being of very limited use - of interest to a very few national centers and used by a small fraction of researchers at any given university. As scientific research becomes more and more dependent upon management and analysis of massive amounts of data, advances in human knowledge will become increasingly dependent upon use of high performance computers and parallel programming techniques. Indiana University has undergone a transformation over the past four years, during which the capacity, use, and number of users of High Performance Computing (HPC) systems has dramatically increased. HPC systems are widely viewed as valuable to the scholarly community of Indiana University - even by those researchers who do not use parallel programming techniques. Economies of scale and vendor partnerships have enabled Indiana University to amass significant HPC systems. Carefully implemented strategies in delivery of consulting support have expanded the use of parallel programming techniques. Such techniques are of critical value to advancement of human knowledge in many disciplines, and it is now possible for any institution of higher education to provide some sort of parallel computing resource for education and research.}, bibtype = {inproceedings}, author = {Stewart, Craig A. and Peebles, Christopher S. and Papakhian, Mary and Samuel, John and Hart, David and Simms, Stephen}, doi = {10.1145/500956.501026}, booktitle = {Proceedings of the 29th annual ACM SIGUCCS conference on User services - SIGUCCS '01} }
@inproceedings{ title = {Measuring quality, cost, and value of IT services}, type = {inproceedings}, year = {2001}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/426}, publisher = {American Society for Quality}, id = {d517bf70-6223-33b5-8590-aef84970e2a0}, created = {2020-09-09T16:22:17.477Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T16:22:17.477Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Peebles2001}, private_publication = {false}, abstract = {For the last decade, University Information Technology Services (UITS) at Indiana University has measured the satisfaction of its customers - students, faculty, and staff- with the IT services its members produced for the university community. It has used the results of these surveys to improve the range and quality of services it offers. For the last five years Activity Based Costing measures have been applied to all IT services produced by UITS. Through major organizational realignment, profound cultural changes, and the rapid evolution in hardware, software, and network technologies, UITS has pursued quality improvement, process improvement, and implementation of the Balanced Scorecard family of measures. We discuss the journey thus far with special reference to the ways in which support services are critical to the realization of full value of IT services by our customers.}, bibtype = {inproceedings}, author = {Peebles, C.S. and Voss, B.D. and Stewart, C.A. and Workman, S.B.}, booktitle = {Proceedings of the 55th Annual Quality Congress} }
@techreport{ title = {Indiana University Shared University Research grants-Report on Accomplishments}, type = {techreport}, year = {2000}, id = {d8317783-b277-3eff-ae76-228c1be23a55}, created = {2018-02-27T18:07:25.600Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:16.208Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart2000g}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Stewart, Craig A and Bramley, Randall and Bernbom, Gerry and Dunn, Jon W and Meglicki, Zdzislaw and McMullen, D F and Hart, David and Papakhian, Mary} }
@techreport{ title = {Research and Academic Computing Implementation Plan}, type = {techreport}, year = {2000}, id = {37c8d658-e776-3fb5-b2a2-0280a59b1f2c}, created = {2018-02-27T18:07:26.605Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:11.932Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Peebles2000a}, source_type = {RPRT}, private_publication = {false}, bibtype = {techreport}, author = {Peebles, Christopher S and Stewart, Craig A and Bernbom, Gerry and McMullen, Donald F and Shankar, Anurag and Samuel, John and Daniels, John and Papakhian, Mary and Hart, David and Walsh, John} }
@inproceedings{ title = {Tethering and reattachment in collaborative virtual environments}, type = {inproceedings}, year = {2000}, keywords = {Collaborative virtual environment; Constraint man,Computer simulation; Computer supported cooperativ,Virtual reality}, pages = {292}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-0033871942&partnerID=40&md5=75f7ac5a20cca2024ee7f73753b8adfc}, publisher = {IEEE, Los Alamitos, CA, United States}, city = {New Brunswick, NJ, USA}, id = {fc6372c4-46eb-325a-994b-973010b40f49}, created = {2018-02-27T18:07:26.620Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:26.620Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Wernert2000292}, source_type = {conference}, notes = {cited By 2; Conference of IEEE Virtual Reality 2000 ; Conference Date: 18 March 2000 Through 22 March 2000; Conference Code:56747}, private_publication = {false}, abstract = {We explore a family of specific dynamical methods that support the contrasting goals of presence and independence in collaborative virtual environments. We pose for ourselves the basic tasks of `tethering' - keeping a collaborator close to a group or leader, and of `reattachment' - returning to a collaborative virtual activity after a period of independent exploration. We first present a taxonomy of methods and parameters associated with tethering and reattachment, and then describe a formative evaluation study.}, bibtype = {inproceedings}, author = {Wernert, Eric A and Hanson, Andrew J}, booktitle = {Proceedings - Virtual Reality Annual International Symposium} }
@inproceedings{ title = {Very Large Scale Visualization Methods for Astrophysical Data.}, type = {inproceedings}, year = {2000}, pages = {115-124}, publisher = {Springer}, id = {341a7132-fe73-3c97-8daa-89b99cf3a8b0}, created = {2018-02-27T18:07:29.296Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.218Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Hanson2000}, source_type = {CONF}, private_publication = {false}, abstract = {We address the problem of interacting with scenes that contain a very large range of scales. Computer graphics environments normally deal with only a limited range of orders of magnitude before numerical error and other anomalies begin to be apparent, and the effects vary widely from environment to environment. Applications such as astrophysics, where a single scene could in principle contain visible objects from the subatomic scale to the intergalactic scale, provide a good proving ground for the multiple scale problem. In this context, we examine methods for interacting continuously with simultaneously active astronomical data sets ranging over 40 or more orders of magnitude. Our approach relies on utilizing a single scale of order 1.0 for the definition of all data sets. Where a single object, like a planet or a galaxy, may require moving in neighborhoods of vastly different scales, we employ multiple scale representations for the single object; normally, these are spa... Very Large Scale Visualization Methods for Astrophysical Data (PDF Download Available). Available from: https://www.researchgate.net/publication/2581915_Very_Large_Scale_Visualization_Methods_for_Astrophysical_Data [accessed Dec 18 2017].}, bibtype = {inproceedings}, author = {Hanson, Andrew J and Fu, Chi-Wing and Wernert, Eric A}, doi = {10.1007/978-3-7091-6783-0_12}, booktitle = {VisSym} }
@inproceedings{ title = {Computational Biology and High Performance Computing 2000}, type = {inproceedings}, year = {2000}, city = {Dallas, Texas}, id = {089a06a3-e3d4-3672-8838-e5fb10892ee8}, created = {2018-02-27T18:07:30.282Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:12.838Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Simon2000a}, source_type = {JOUR}, private_publication = {false}, bibtype = {inproceedings}, author = {Simon, Horst D and Zorn, Manfred D and Spengler, Sylvia J and Shoichet, Brian K and Stewart, Craig A and Dubchak, Inna L and Arkin, Adam P}, booktitle = {Computational Biology and High Performance Computing 2000} }
@inproceedings{ title = {A constrained navigation framework for individual and exploration of 3d environmentscollaborative}, type = {inproceedings}, year = {1999}, publisher = {Indiana University}, id = {55537df9-b4de-3e75-bacf-9cb6b121b22f}, created = {2018-02-27T18:07:25.560Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:11.698Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Wernert1999}, source_type = {BOOK}, private_publication = {false}, abstract = {We approach the problem of exploring a virtual space by exploiting positional and camera-model constraints on navigation to provide extra assistance that focuses the user's explorational wanderings on the task objectives. Our specific design incorporates not only task-based constraints on the viewer's location, gaze, and viewing parameters, but also a personal "glide" that serves two important functions: keeping the user oriented in the navigation space, and "pointing" to interesting subject areas as they are approached. The guide's cues may be ignored by continuing in motion, but if the user stops, the gaze shifts automatically toward whatever the guide was interested in. This design has the serendipitous feature that it automatically incorporates a nested collaborative paradigm simply by allowing any given viewer to be seen as the "guide" of one or more viewers following behind; the leading automated guide (we tend to select a guide dog for this avatar) can remind the leading live human guide of interesting sites to point out, while each real human collaborator down the chain has some choices about whether to follow the local leader's hints. We have chosen VRML as our initial development medium primarily because of its portability, and we have implemented a variety of natural modes for leading and collaborating, including ways for collaborators to attach to and detach from a particular leader.}, bibtype = {inproceedings}, author = {Wernert, Eric Andrew}, doi = {10.1109/VISUAL.1999.809893}, booktitle = {Visualization '99} }
@inproceedings{ title = {Evolutionary biology and computational grids}, type = {inproceedings}, year = {1999}, id = {ed0e734b-1301-3e37-a6be-87fe10c2ec90}, created = {2018-02-27T18:07:25.728Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-03-25T22:58:15.919Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Stewart1999c}, source_type = {RPRT}, private_publication = {false}, abstract = {The global high performance computing community has seen two overarching changes in the past five years. One of these changes was the consolidation toward SMP clusters as the predominant HPC system architecture. The other change was the emergence of computing grids as an important architecture in high performance computing. Several major national and international projects are now underway to develop grid technologies. Computational grids will increase the resources available to the most advanced computational scientists and encourage the use of advanced techniques by researchers who have not traditionally employed such technologies. In the latter camp are bioinformaticists in general and evolutionary biologists in particular, although this situation is changing rapidly.}, bibtype = {inproceedings}, author = {Stewart, Craig A and Tan, Tin Wee and Buckhorn, Markus and Hart, David and Berry, Donald K and Zhang, Louxin and Wernert, Eric and Sakharkar, Meena and Fischer, Will and McMullen, Donald F}, booktitle = {CASCON Workshop on Computational Biology} }
@inproceedings{ title = {Framework for assisted exploration with collaboration}, type = {inproceedings}, year = {1999}, keywords = {Assisted collaborative exploration,Computer graphics,Computer simulation; Graphical user interfaces; Hi}, pages = {241-248}, websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-0033314714&partnerID=40&md5=d5187be310f17d708eee9a8b6962024a}, publisher = {IEEE, Los Alamitos, CA, United States}, city = {San Francisco, CA, USA}, id = {ee692ea2-2d46-34d4-bda1-e0580c1171a5}, created = {2018-02-27T18:07:30.970Z}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2018-02-27T18:07:30.970Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, citation_key = {Wernert1999241}, source_type = {conference}, notes = {cited By 28; Conference of Proceedings of the IEEE Visualization '99 ; Conference Date: 24 October 1999 Through 29 October 1999; Conference Code:56308}, private_publication = {false}, abstract = {We approach the problem of exploring a virtual space by exploiting positional and camera-model constraints on navigation to provide extra assistance that focuses the user's explorational wanderings on the task objectives. Our specific design incorporates not only task-based constraints on the viewer's location, gaze, and viewing parameters, but also a personal `guide' that serves two important functions: keeping the user oriented in the navigation space, and `pointing' to interesting subject areas as they are approached. The guide's cues may be ignored by continuing in motion, but if the user stops, the gaze shifts automatically toward whatever the guide was interested in. This design has the serendipitous feature that it automatically incorporates a nested collaborative paradigm simply by allowing any given viewer to be seen as the `guide' of one or more viewers following behind; the leading automated guide (we tend to select a guide dog for this avatar) can remind the leading live human guide of interesting sites to point out, while each real human collaborator down the chain has some choices about whether to follow the local leader's hints. We have chosen VRML as our initial development medium primarily because of its portability, and we have implemented a variety of natural modes for leading and collaborating, including ways for collaborators to attach to and detach from a particular leader.}, bibtype = {inproceedings}, author = {Wernert, Eric A and Hanson, Andrew J}, booktitle = {Proceedings of the IEEE Visualization Conference} }
@techreport{ title = {Datasets Published by the IU Pervasive Technology Institute}, type = {techreport}, year = {1999}, keywords = {Technical Report}, websites = {http://creativecommons.org/licenses/by/4.0/.}, month = {8}, day = {26}, id = {dccb1be9-6481-3dd2-980e-df11254bfffe}, created = {2020-09-15T22:43:58.991Z}, accessed = {2020-09-11}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-15T22:43:58.991Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {true}, hidden = {false}, private_publication = {false}, abstract = {This report considers only data sets and binary digital products stored in IU Scholarworks (scholarworks.iu.edu). Software stored in other repositories (such as sourceforge.net or github.com) are not included in this listing. There are a total of 177 data sets listed in this report (see Section 2). There are eight additional binary images published by the IU Pervasive Technology Institute via Scholarworks.iu.edu between 1999 and 2019 (see Section 3). All of these latter eight are binaries of Virtual Machine images used on the Jetstream cloud system (Jetstream-cloud.org)}, bibtype = {techreport}, author = {Stewart, Craig A and Plale, Beth and Fischer, Jeremy} }
@misc{ title = {Technical Report: XSEDE Return on Investment (Proxy) Data and Analysis Methods, July 2014 to August 2019}, type = {misc}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/25704}, id = {a4006e22-92ff-3bf8-a7af-83eddcaf74d3}, created = {2020-09-09T20:14:40.029Z}, accessed = {2020-09-09}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T20:14:40.127Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, bibtype = {misc}, author = {} }
@misc{ title = {Indiana University Pervasive Technology Institute preproposal and proposal management, documentation, and templates}, type = {misc}, websites = {https://scholarworks.iu.edu/dspace/handle/2022/25580}, id = {b44d64bc-5ef7-3cc5-a262-e94ce4e44f93}, created = {2020-09-09T20:39:36.823Z}, accessed = {2020-09-09}, file_attached = {false}, profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d}, group_id = {27e0553c-8ec0-31bd-b42c-825b8a5a9ae8}, last_modified = {2020-09-09T20:39:36.888Z}, read = {false}, starred = {false}, authored = {false}, confirmed = {false}, hidden = {false}, private_publication = {false}, bibtype = {misc}, author = {} }