Declarative parallel programming for GPUs. Holk, E., Byrd, W., Mahajan, N., Willcock, J., Chauhan, A., & Lumsdaine, A. Advances in Parallel Computing, 22:297-304, IOS Press BV, 2012.
Declarative parallel programming for GPUs [link]Website  doi  abstract   bibtex   
The recent rise in the popularity of Graphics Processing Units (GPUs) has been fueled by software frameworks, such as NVIDIA's Compute Unified Device Architecture (CUDA) and Khronos Group's OpenCL that make GPUs available for general purpose computing. However, CUDA and OpenCL are still low-level approaches that require users to handle details about data layout and movement across levels of memory hierarchy. We propose a declarative approach to coordinating computation and data movement between CPU and GPU, through a domain-specific language that we called Harlan. Not only does a declarative language obviate the need for the programmer to write low-level error-prone boilerplate code, by raising the abstraction of specifying GPU computation it also allows the compiler to optimize data movement and overlap between CPU and GPU computation. By focusing on the 'what', and not the 'how', of data layout, data movement, and computation scheduling, the language eliminates the sources of many programming errors related to correctness and performance. © 2012 The authors and IOS Press. All rights reserved.
@article{
 title = {Declarative parallel programming for GPUs},
 type = {article},
 year = {2012},
 pages = {297-304},
 volume = {22},
 websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906571206&doi=10.3233%2F978-1-61499-041-3-297&partnerID=40&md5=0be2243f6de06cabbb665055c6c91e57},
 publisher = {IOS Press BV},
 id = {6765f62b-7897-3a42-b9fb-fd9de5d27057},
 created = {2017-11-27T16:38:37.284Z},
 file_attached = {false},
 profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d},
 last_modified = {2018-03-12T19:03:30.502Z},
 read = {false},
 starred = {false},
 authored = {true},
 confirmed = {true},
 hidden = {false},
 citation_key = {Holk2012297},
 source_type = {article},
 notes = {cited By 2},
 folder_uuids = {a0f5ac31-a393-4a7b-b7db-64a126a80f6e},
 private_publication = {false},
 abstract = {The recent rise in the popularity of Graphics Processing Units (GPUs) has been fueled by software frameworks, such as NVIDIA's Compute Unified Device Architecture (CUDA) and Khronos Group's OpenCL that make GPUs available for general purpose computing. However, CUDA and OpenCL are still low-level approaches that require users to handle details about data layout and movement across levels of memory hierarchy. We propose a declarative approach to coordinating computation and data movement between CPU and GPU, through a domain-specific language that we called Harlan. Not only does a declarative language obviate the need for the programmer to write low-level error-prone boilerplate code, by raising the abstraction of specifying GPU computation it also allows the compiler to optimize data movement and overlap between CPU and GPU computation. By focusing on the 'what', and not the 'how', of data layout, data movement, and computation scheduling, the language eliminates the sources of many programming errors related to correctness and performance. © 2012 The authors and IOS Press. All rights reserved.},
 bibtype = {article},
 author = {Holk, E and Byrd, W and Mahajan, N and Willcock, J and Chauhan, A and Lumsdaine, A},
 doi = {10.3233/978-1-61499-041-3-297},
 journal = {Advances in Parallel Computing}
}

Downloads: 0