Regenerating and quantifying quality of benchmarking data using static and dynamic provenance. Ghoshal, D., Chauhan, A., & Plale, B. Volume 8628 , 2015.
doi  abstract   bibtex   
© Springer International Publishing Switzerland 2015. Application benchmarks are critical to establishing the performance of a new system or library. But benchmarking a system can be tricky and reproducing a benchmark result even trickier. Provenance can help. Referencing benchmarks and their results on similar platforms for collective comparison and evaluation requires capturing provenance related to the process of benchmark execution, programs involved and results generated. In this paper we define a formal model of benchmark applications and required provenance, describe an implementation of the model that employs compile time (static) and runtime provenance capture, and quantify data quality in the context of benchmarks. Our results show that through a mix of compile time and runtime provenance capture, we can enable higher quality benchmark regeneration.
@book{
 title = {Regenerating and quantifying quality of benchmarking data using static and dynamic provenance},
 type = {book},
 year = {2015},
 source = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
 volume = {8628},
 id = {0af5de89-cee5-3322-a1b8-c1730cfa0f2b},
 created = {2019-10-01T17:20:45.216Z},
 file_attached = {false},
 profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d},
 last_modified = {2019-10-01T17:23:16.856Z},
 read = {false},
 starred = {false},
 authored = {true},
 confirmed = {true},
 hidden = {false},
 citation_key = {Ghoshal2015},
 folder_uuids = {73f994b4-a3be-4035-a6dd-3802077ce863},
 private_publication = {false},
 abstract = {© Springer International Publishing Switzerland 2015. Application benchmarks are critical to establishing the performance of a new system or library. But benchmarking a system can be tricky and reproducing a benchmark result even trickier. Provenance can help. Referencing benchmarks and their results on similar platforms for collective comparison and evaluation requires capturing provenance related to the process of benchmark execution, programs involved and results generated. In this paper we define a formal model of benchmark applications and required provenance, describe an implementation of the model that employs compile time (static) and runtime provenance capture, and quantify data quality in the context of benchmarks. Our results show that through a mix of compile time and runtime provenance capture, we can enable higher quality benchmark regeneration.},
 bibtype = {book},
 author = {Ghoshal, D. and Chauhan, A. and Plale, B.},
 doi = {10.1007/978-3-319-16462-55}
}

Downloads: 0