Performance of windows multicore systems on threading and mpi. Qiu, J. & Bae, S., S. Concurrency and Computation: Practice and Experience, 24(1):14-28, Wiley Online Library, 2012.
doi  abstract   bibtex   
We present performance results on a Windows cluster with up to 768 cores using Message Passing Interface (MPI) and two variants of threading - Concurrency and Coordination Runtime (CCR) and Task Parallel Library (TPL). CCR presents a message-based interface, while TPL allows for loops to be automatically parallelized. MPI is used between the cluster nodes (up to 32) and either threading or MPI for parallelism on the 24 cores of each node. We look at the performance of two significant bioinformatics applications; gene clustering and dimension reduction. We find that the two threading runtimes offer similar performance with MPI outperforming both at low levels of parallelism but threading much better when the grain size (problem size per process/thread) is small. We develop simple models for the performance of the clustering code. Copyright © 2011 John Wiley & Sons, Ltd.
@article{
 title = {Performance of windows multicore systems on threading and mpi},
 type = {article},
 year = {2012},
 pages = {14-28},
 volume = {24},
 publisher = {Wiley Online Library},
 id = {017d7b00-aa01-3f12-a50d-7a8b24a042b0},
 created = {2017-12-18T21:44:04.793Z},
 file_attached = {false},
 profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d},
 last_modified = {2020-05-11T14:43:45.162Z},
 read = {false},
 starred = {false},
 authored = {true},
 confirmed = {true},
 hidden = {false},
 citation_key = {Qiu2012b},
 source_type = {JOUR},
 folder_uuids = {36d8ccf4-7085-47fa-8ab9-897283d082c5},
 private_publication = {false},
 abstract = {We present performance results on a Windows cluster with up to 768 cores using Message Passing Interface (MPI) and two variants of threading - Concurrency and Coordination Runtime (CCR) and Task Parallel Library (TPL). CCR presents a message-based interface, while TPL allows for loops to be automatically parallelized. MPI is used between the cluster nodes (up to 32) and either threading or MPI for parallelism on the 24 cores of each node. We look at the performance of two significant bioinformatics applications; gene clustering and dimension reduction. We find that the two threading runtimes offer similar performance with MPI outperforming both at low levels of parallelism but threading much better when the grain size (problem size per process/thread) is small. We develop simple models for the performance of the clustering code. Copyright © 2011 John Wiley  &  Sons, Ltd.},
 bibtype = {article},
 author = {Qiu, Judy and Bae, Seung‐Hee S.-H.},
 doi = {10.1002/cpe.1762},
 journal = {Concurrency and Computation: Practice and Experience},
 number = {1}
}

Downloads: 0