Sparse collective operations for MPI. Hoefler, T. & Träff, J., L. In IPDPS 2009 - Proceedings of the 2009 IEEE International Parallel and Distributed Processing Symposium, 2009.
Sparse collective operations for MPI [link]Website  doi  abstract   bibtex   
We discuss issues in designing sparse (nearest neighbor) collective operations for communication and reduction operations in small neighborhoods for the Message Passing Interface (MPI).We propose three such operations, namely a sparse gather operation, a sparse all-to-all, and a sparse reduction operation in both regular and irregular (vector) variants. By two simple experiments we show a) that a collective handle for message scheduling and communication optimization is necessary for any such interface, b) that the possibly different amount of communication between neighbors need to be taken into account by the optimization, and c) illustrate the improvements that are possible by schedules that posses global information compared to implementations that can rely on only local information. We discuss different forms the interface and optimization handles could take. The paper is inspired by current discussion in the MPI Forum. © 2009 IEEE.
@inproceedings{
 title = {Sparse collective operations for MPI},
 type = {inproceedings},
 year = {2009},
 keywords = {Collective operations; Communication optimization;,Distributed parameter networks; Optimization,Message passing},
 websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-70450031957&doi=10.1109%2FIPDPS.2009.5160935&partnerID=40&md5=bb4264cfccab9293120f0eb2f79bcab6},
 city = {Rome},
 id = {dc5c3c08-5ed8-3afe-8097-7ed36f776504},
 created = {2018-01-09T20:30:38.481Z},
 file_attached = {false},
 profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d},
 last_modified = {2018-03-12T19:03:18.410Z},
 read = {false},
 starred = {false},
 authored = {true},
 confirmed = {true},
 hidden = {false},
 citation_key = {Hoefler2009},
 source_type = {conference},
 notes = {cited By 26; Conference of 23rd IEEE International Parallel and Distributed Processing Symposium, IPDPS 2009 ; Conference Date: 23 May 2009 Through 29 May 2009; Conference Code:78504},
 folder_uuids = {2aba6c14-9027-4f47-8627-0902e1e2342b},
 private_publication = {false},
 abstract = {We discuss issues in designing sparse (nearest neighbor) collective operations for communication and reduction operations in small neighborhoods for the Message Passing Interface (MPI).We propose three such operations, namely a sparse gather operation, a sparse all-to-all, and a sparse reduction operation in both regular and irregular (vector) variants. By two simple experiments we show a) that a collective handle for message scheduling and communication optimization is necessary for any such interface, b) that the possibly different amount of communication between neighbors need to be taken into account by the optimization, and c) illustrate the improvements that are possible by schedules that posses global information compared to implementations that can rely on only local information. We discuss different forms the interface and optimization handles could take. The paper is inspired by current discussion in the MPI Forum. © 2009 IEEE.},
 bibtype = {inproceedings},
 author = {Hoefler, T and Träff, J L},
 doi = {10.1109/IPDPS.2009.5160935},
 booktitle = {IPDPS 2009 - Proceedings of the 2009 IEEE International Parallel and Distributed Processing Symposium}
}

Downloads: 0