Asymptotic Computing – Undoing the Damage. Sterling, T., Brodowicz, M., Kogler, D., & Anderson, M. New Frontiers in High Performance Computing and Big Data, 30:55 - 73, IOS Press, 2017.
doi  abstract   bibtex   
While the very far future well beyond exaflops computing may encompass such paradigm shifts as quantum computing or neuromorphic computing, a critical window of change exists within the domain of semiconductor digital logic technology but beyond conventional practices of architecture, system software, and programming. As key parameters such as Dennard scaling, nano-scale component densities, clock rates, pin I/O, and voltage represent asymptotic operational regimes, one major area of untapped opportunity is computer architecture which has been severely limited by conventional practices of organization and control semantics. Mainstream computer architecture in HPC has been inhibited in innovation by the original von Neumann architecture of seven decades ago. Although notably diverse in form of parallelism exploited, six major epochs of computer architecture through to the present are all von Neumann derivatives. At their core is the use of single instruction issue and the prioritization of Floating Point ALU (FPU) utilization. However, in the modern age, FPUs consume only a small part of die real estate while the plethora of mechanisms to achieve maximum floating point efficiency take up the majority of the chip. The von Neumann bottleneck, the separation of memory and processor, is also retained. A revolution in computer architecture design is possible by undoing the damage of the von Neumann heritage and emphasizing the key challenges of data movement latency and bandwidth which are the true precious resources along with operation/instruction issue control. This paper discusses key tradeoffs that should drive computer architecture in what might be called the “Neo-Digital Age”.
@article{
 title = {Asymptotic Computing – Undoing the Damage},
 type = {article},
 year = {2017},
 pages = {55 - 73},
 volume = {30},
 publisher = {IOS Press},
 id = {9b0df3c9-c624-3d63-b9c6-8c897bede10d},
 created = {2018-03-13T18:36:23.971Z},
 file_attached = {false},
 profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d},
 group_id = {be743863-c45b-320e-a8f8-6b8511f39f77},
 last_modified = {2019-08-20T18:38:39.514Z},
 read = {false},
 starred = {false},
 authored = {false},
 confirmed = {true},
 hidden = {false},
 citation_key = {STERLING2017},
 source_type = {JOUR},
 private_publication = {false},
 abstract = {While the very far future well beyond exaflops computing may encompass such paradigm shifts as quantum computing or neuromorphic computing, a critical window of change exists within the domain of semiconductor digital logic technology but beyond conventional practices of architecture, system software, and programming. As key parameters such as Dennard scaling, nano-scale component densities, clock rates, pin I/O, and voltage represent asymptotic operational regimes, one major area of untapped opportunity is computer architecture which has been severely limited by conventional practices of organization and control semantics. Mainstream computer architecture in HPC has been inhibited in innovation by the original von Neumann architecture of seven decades ago. Although notably diverse in form of parallelism exploited, six major epochs of computer architecture through to the present are all von Neumann derivatives. At their core is the use of single instruction issue and the prioritization of Floating Point ALU (FPU) utilization. However, in the modern age, FPUs consume only a small part of die real estate while the plethora of mechanisms to achieve maximum floating point efficiency take up the majority of the chip. The von Neumann bottleneck, the separation of memory and processor, is also retained. A revolution in computer architecture design is possible by undoing the damage of the von Neumann heritage and emphasizing the key challenges of data movement latency and bandwidth which are the true precious resources along with operation/instruction issue control. This paper discusses key tradeoffs that should drive computer architecture in what might be called the “Neo-Digital Age”.},
 bibtype = {article},
 author = {Sterling, Thomas and Brodowicz, Maciej and Kogler, Danny and Anderson, Matthew},
 doi = {10.3233/978-1-61499-816-7-55},
 journal = {New Frontiers in High Performance Computing and Big Data}
}

Downloads: 0