Implementation, performance, and science results from a 30.7 TFLOPS IBM BladeCenter cluster. Stewart, C., A., Link, M., McCaulay, D., S., Rodgers, G., Turner, G., Hancock, D., Wang, P., Saied, F., Pierce, M., Aiken, R., Mueller, M., S., Jurenz, M., Lieber, M., Tillotson, J., & Plale, B., A. Concurrency and Computation: Practice and Experience, 2009. Website doi abstract bibtex This paper describes Indiana University's implementation, performance testing, and use of a large high performance computing system. IU'S Big Red, a 20.48 TFLOPS IBM e1350 BladeCenter cluster, appeared in the 27th Top500 list as the 23rd fastest supercomputer in the world in June 2006. In spring 2007, this computer was upgraded to 30.72 TFLOPS. The e1350 BladeCenter architecture, including two internal networks accessible to users and user applications and two networks used exclusively for system management, has enabled the system to provide good scalability on many important applications while being well manageable. Implementing a system based on the JS21 Blade and PowerPC 970MP processor within the US TeraGrid presented certain challenges, given that Intel-compatible processors dominate the TeraGrid. However, the particular characteristics of the PowerPC have enabled it to be highly popular among certain application communities, particularly users of molecular dynamics and weather forecasting codes. A critical aspect of Big Red's implementation has been a focus on Science Gateways, which provide graphical interfaces to systems supporting end-to-end scientific workflows. Several Science Gateways have been implemented that access Big Red as a computational resource-some via the TeraGrid, some not affiliated with the TeraGrid. In summary, Big Red has been successfully integrated with the TeraGrid, and is used by many researchers locally at IU via grids and Science Gateways. It has been a success in terms of enabling scientific discoveries at IU and, via the TeraGrid, across the US. Copyright © 2009 John Wiley & Sons, Ltd.
@article{
title = {Implementation, performance, and science results from a 30.7 TFLOPS IBM BladeCenter cluster},
type = {article},
year = {2009},
volume = {22},
websites = {http://doi.wiley.com/10.1002/cpe.1539},
id = {4cf562dc-63d0-38f9-b30a-2f1f0ff4b58b},
created = {2019-10-01T18:06:10.780Z},
file_attached = {false},
profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d},
last_modified = {2020-09-09T18:06:45.834Z},
read = {false},
starred = {false},
authored = {true},
confirmed = {true},
hidden = {false},
citation_key = {Stewart2010a},
folder_uuids = {9559c0d7-009c-4a6d-b131-a3e3d83d98b8,ec6ad3c6-db7d-494d-863c-ef38d23f1f7e,22c3b665-9e84-4884-8172-710aa9082eaf},
private_publication = {false},
abstract = {This paper describes Indiana University's implementation, performance testing, and use of a large high performance computing system. IU'S Big Red, a 20.48 TFLOPS IBM e1350 BladeCenter cluster, appeared in the 27th Top500 list as the 23rd fastest supercomputer in the world in June 2006. In spring 2007, this computer was upgraded to 30.72 TFLOPS. The e1350 BladeCenter architecture, including two internal networks accessible to users and user applications and two networks used exclusively for system management, has enabled the system to provide good scalability on many important applications while being well manageable. Implementing a system based on the JS21 Blade and PowerPC 970MP processor within the US TeraGrid presented certain challenges, given that Intel-compatible processors dominate the TeraGrid. However, the particular characteristics of the PowerPC have enabled it to be highly popular among certain application communities, particularly users of molecular dynamics and weather forecasting codes. A critical aspect of Big Red's implementation has been a focus on Science Gateways, which provide graphical interfaces to systems supporting end-to-end scientific workflows. Several Science Gateways have been implemented that access Big Red as a computational resource-some via the TeraGrid, some not affiliated with the TeraGrid. In summary, Big Red has been successfully integrated with the TeraGrid, and is used by many researchers locally at IU via grids and Science Gateways. It has been a success in terms of enabling scientific discoveries at IU and, via the TeraGrid, across the US. Copyright © 2009 John Wiley & Sons, Ltd.},
bibtype = {article},
author = {Stewart, Craig A. and Link, Matthew and McCaulay, D. Scott and Rodgers, Greg and Turner, George and Hancock, David and Wang, Peng and Saied, Faisal and Pierce, Marlon and Aiken, Ross and Mueller, Matthias S. and Jurenz, Matthias and Lieber, Matthias and Tillotson, Jenett and Plale, Beth A.},
doi = {10.1002/cpe.1539},
journal = {Concurrency and Computation: Practice and Experience},
number = {2}
}
Downloads: 0
{"_id":"nTesL6WbDCpG9n8fe","bibbaseid":"stewart-link-mccaulay-rodgers-turner-hancock-wang-saied-etal-implementationperformanceandscienceresultsfroma307tflopsibmbladecentercluster-2009","authorIDs":["jQQtqgyqXNWFo8FY3"],"author_short":["Stewart, C., A.","Link, M.","McCaulay, D., S.","Rodgers, G.","Turner, G.","Hancock, D.","Wang, P.","Saied, F.","Pierce, M.","Aiken, R.","Mueller, M., S.","Jurenz, M.","Lieber, M.","Tillotson, J.","Plale, B., A."],"bibdata":{"title":"Implementation, performance, and science results from a 30.7 TFLOPS IBM BladeCenter cluster","type":"article","year":"2009","volume":"22","websites":"http://doi.wiley.com/10.1002/cpe.1539","id":"4cf562dc-63d0-38f9-b30a-2f1f0ff4b58b","created":"2019-10-01T18:06:10.780Z","file_attached":false,"profile_id":"42d295c0-0737-38d6-8b43-508cab6ea85d","last_modified":"2020-09-09T18:06:45.834Z","read":false,"starred":false,"authored":"true","confirmed":"true","hidden":false,"citation_key":"Stewart2010a","folder_uuids":"9559c0d7-009c-4a6d-b131-a3e3d83d98b8,ec6ad3c6-db7d-494d-863c-ef38d23f1f7e,22c3b665-9e84-4884-8172-710aa9082eaf","private_publication":false,"abstract":"This paper describes Indiana University's implementation, performance testing, and use of a large high performance computing system. IU'S Big Red, a 20.48 TFLOPS IBM e1350 BladeCenter cluster, appeared in the 27th Top500 list as the 23rd fastest supercomputer in the world in June 2006. In spring 2007, this computer was upgraded to 30.72 TFLOPS. The e1350 BladeCenter architecture, including two internal networks accessible to users and user applications and two networks used exclusively for system management, has enabled the system to provide good scalability on many important applications while being well manageable. Implementing a system based on the JS21 Blade and PowerPC 970MP processor within the US TeraGrid presented certain challenges, given that Intel-compatible processors dominate the TeraGrid. However, the particular characteristics of the PowerPC have enabled it to be highly popular among certain application communities, particularly users of molecular dynamics and weather forecasting codes. A critical aspect of Big Red's implementation has been a focus on Science Gateways, which provide graphical interfaces to systems supporting end-to-end scientific workflows. Several Science Gateways have been implemented that access Big Red as a computational resource-some via the TeraGrid, some not affiliated with the TeraGrid. In summary, Big Red has been successfully integrated with the TeraGrid, and is used by many researchers locally at IU via grids and Science Gateways. It has been a success in terms of enabling scientific discoveries at IU and, via the TeraGrid, across the US. Copyright © 2009 John Wiley & Sons, Ltd.","bibtype":"article","author":"Stewart, Craig A. and Link, Matthew and McCaulay, D. Scott and Rodgers, Greg and Turner, George and Hancock, David and Wang, Peng and Saied, Faisal and Pierce, Marlon and Aiken, Ross and Mueller, Matthias S. and Jurenz, Matthias and Lieber, Matthias and Tillotson, Jenett and Plale, Beth A.","doi":"10.1002/cpe.1539","journal":"Concurrency and Computation: Practice and Experience","number":"2","bibtex":"@article{\n title = {Implementation, performance, and science results from a 30.7 TFLOPS IBM BladeCenter cluster},\n type = {article},\n year = {2009},\n volume = {22},\n websites = {http://doi.wiley.com/10.1002/cpe.1539},\n id = {4cf562dc-63d0-38f9-b30a-2f1f0ff4b58b},\n created = {2019-10-01T18:06:10.780Z},\n file_attached = {false},\n profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d},\n last_modified = {2020-09-09T18:06:45.834Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Stewart2010a},\n folder_uuids = {9559c0d7-009c-4a6d-b131-a3e3d83d98b8,ec6ad3c6-db7d-494d-863c-ef38d23f1f7e,22c3b665-9e84-4884-8172-710aa9082eaf},\n private_publication = {false},\n abstract = {This paper describes Indiana University's implementation, performance testing, and use of a large high performance computing system. IU'S Big Red, a 20.48 TFLOPS IBM e1350 BladeCenter cluster, appeared in the 27th Top500 list as the 23rd fastest supercomputer in the world in June 2006. In spring 2007, this computer was upgraded to 30.72 TFLOPS. The e1350 BladeCenter architecture, including two internal networks accessible to users and user applications and two networks used exclusively for system management, has enabled the system to provide good scalability on many important applications while being well manageable. Implementing a system based on the JS21 Blade and PowerPC 970MP processor within the US TeraGrid presented certain challenges, given that Intel-compatible processors dominate the TeraGrid. However, the particular characteristics of the PowerPC have enabled it to be highly popular among certain application communities, particularly users of molecular dynamics and weather forecasting codes. A critical aspect of Big Red's implementation has been a focus on Science Gateways, which provide graphical interfaces to systems supporting end-to-end scientific workflows. Several Science Gateways have been implemented that access Big Red as a computational resource-some via the TeraGrid, some not affiliated with the TeraGrid. In summary, Big Red has been successfully integrated with the TeraGrid, and is used by many researchers locally at IU via grids and Science Gateways. It has been a success in terms of enabling scientific discoveries at IU and, via the TeraGrid, across the US. Copyright © 2009 John Wiley & Sons, Ltd.},\n bibtype = {article},\n author = {Stewart, Craig A. and Link, Matthew and McCaulay, D. Scott and Rodgers, Greg and Turner, George and Hancock, David and Wang, Peng and Saied, Faisal and Pierce, Marlon and Aiken, Ross and Mueller, Matthias S. and Jurenz, Matthias and Lieber, Matthias and Tillotson, Jenett and Plale, Beth A.},\n doi = {10.1002/cpe.1539},\n journal = {Concurrency and Computation: Practice and Experience},\n number = {2}\n}","author_short":["Stewart, C., A.","Link, M.","McCaulay, D., S.","Rodgers, G.","Turner, G.","Hancock, D.","Wang, P.","Saied, F.","Pierce, M.","Aiken, R.","Mueller, M., S.","Jurenz, M.","Lieber, M.","Tillotson, J.","Plale, B., A."],"urls":{"Website":"http://doi.wiley.com/10.1002/cpe.1539"},"biburl":"https://bibbase.org/service/mendeley/42d295c0-0737-38d6-8b43-508cab6ea85d","bibbaseid":"stewart-link-mccaulay-rodgers-turner-hancock-wang-saied-etal-implementationperformanceandscienceresultsfroma307tflopsibmbladecentercluster-2009","role":"author","metadata":{"authorlinks":{"pierce, m":"https://bibbase.org/service/mendeley/42d295c0-0737-38d6-8b43-508cab6ea85d/group/0e433c5b-85c4-32aa-851c-c145aac9f80f"}},"downloads":0},"bibtype":"article","creationDate":"2020-09-10T04:16:32.632Z","downloads":0,"keywords":[],"search_terms":["implementation","performance","science","results","tflops","ibm","bladecenter","cluster","stewart","link","mccaulay","rodgers","turner","hancock","wang","saied","pierce","aiken","mueller","jurenz","lieber","tillotson","plale"],"title":"Implementation, performance, and science results from a 30.7 TFLOPS IBM BladeCenter cluster","year":2009,"biburl":"https://bibbase.org/service/mendeley/42d295c0-0737-38d6-8b43-508cab6ea85d","dataSources":["zgahneP4uAjKbudrQ","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"]}