106(1-2):105–113.

doi abstract bibtex

doi abstract bibtex

N-body algorithms for long-range unscreened interactions like gravity belong to a class of highly irregular problems whose optimal solution is a challenging task for present-day massively parallel computers. In this paper we describe a strategy for optimal memory and work distribution which we have applied to our parallel implementation of the Barnes & Hut (1986) recursive tree scheme on a Cray T3D using the CRAFT programming environment. We have performed a series of tests to find an optimal data distribution in the T3D memory, and to identify a strategy for the Dynamic Load Balance in order to obtain good performances when running large simulations (more than 10 million particles). The results of tests show that the step duration depends on two main factors: the data locality and the T3D network contention. Increasing data locality we are able to minimize the step duration if the closest bodies (direct interaction) tend to be located in the same PE local memory (contiguous block subdivision, high granularity), whereas the tree properties have a fine grain distributuion. In a very large simulation, due to network contention, an unbalanced load arises. To remedy this we have devised an automatic work redistribution mechanism which provided a good Dynamic Load Balance at the price of an insignificant overhead.

@article{beccianiParallelTreeCode1997, title = {A Parallel Tree Code for Large {{N}}-Body Simulation Dynamic Load Balance and Data Distribution on a {{CRAY T3D}} System}, volume = {106}, issn = {00104655}, doi = {10.1016/S0010-4655(97)00102-1}, abstract = {N-body algorithms for long-range unscreened interactions like gravity belong to a class of highly irregular problems whose optimal solution is a challenging task for present-day massively parallel computers. In this paper we describe a strategy for optimal memory and work distribution which we have applied to our parallel implementation of the Barnes \& Hut (1986) recursive tree scheme on a Cray T3D using the CRAFT programming environment. We have performed a series of tests to find an optimal data distribution in the T3D memory, and to identify a strategy for the Dynamic Load Balance in order to obtain good performances when running large simulations (more than 10 million particles). The results of tests show that the step duration depends on two main factors: the data locality and the T3D network contention. Increasing data locality we are able to minimize the step duration if the closest bodies (direct interaction) tend to be located in the same PE local memory (contiguous block subdivision, high granularity), whereas the tree properties have a fine grain distributuion. In a very large simulation, due to network contention, an unbalanced load arises. To remedy this we have devised an automatic work redistribution mechanism which provided a good Dynamic Load Balance at the price of an insignificant overhead.}, number = {1-2}, journaltitle = {Computer Physics Communications}, date = {1997}, pages = {105--113}, author = {Becciani, U and Ansaloni, R and Antonuccio-Delogu, V and Erbacci, G and Gambera, M and Pagliaro, A}, file = {/home/dimitri/Nextcloud/Zotero/storage/Q4T46U2L/Becciani et al. - 1997 - A parallel tree code for large N-body simulation dynamic load balance and data distribution on a CRAY T3D syste.pdf} }

Downloads: 0