Data pipeline in mapreduce. Zeng, J. & Plale, B. In Proceedings - IEEE 9th International Conference on e-Science, e-Science 2013, 2013. doi abstract bibtex MapReduce is an effective programming model for large scale text and data analysis. Traditional MapReduce implementation, e.g., Hadoop, has the restriction that before any analysis can take place, the entire input dataset must be loaded into the cluster. This can introduce sizable latency when the data set is large, and when it is not possible to load the data once, and process many times - A situation that exists for log files, health records and protected texts for instance. We propose a data pipeline approach to hide data upload latency in MapReduce analysis. Our implementation, which is based on Hadoop MapReduce, is completely transparent to user. It introduces a distributed concurrency queue to coordinate data block allocation and synchronization so as to overlap data upload and execution. The paper overcomes two challenges: A fixed number of maps scheduling and dynamic number of maps scheduling allows for better handling of input data sets of unknown size. We also employ delay scheduler to achieve data locality for data pipeline. The evaluation of the solution on different applications on real world data sets shows that our approach shows performance gains. Copyright © 2013 by The Institute of Electrical and Electronics Engineers, Inc.
@inproceedings{
title = {Data pipeline in mapreduce},
type = {inproceedings},
year = {2013},
id = {84dbc778-b0a8-3b44-861d-fe5f10fef51a},
created = {2019-10-01T17:20:44.847Z},
file_attached = {false},
profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d},
last_modified = {2019-10-01T17:23:08.633Z},
read = {false},
starred = {false},
authored = {true},
confirmed = {true},
hidden = {false},
citation_key = {Zeng2013},
folder_uuids = {73f994b4-a3be-4035-a6dd-3802077ce863},
private_publication = {false},
abstract = {MapReduce is an effective programming model for large scale text and data analysis. Traditional MapReduce implementation, e.g., Hadoop, has the restriction that before any analysis can take place, the entire input dataset must be loaded into the cluster. This can introduce sizable latency when the data set is large, and when it is not possible to load the data once, and process many times - A situation that exists for log files, health records and protected texts for instance. We propose a data pipeline approach to hide data upload latency in MapReduce analysis. Our implementation, which is based on Hadoop MapReduce, is completely transparent to user. It introduces a distributed concurrency queue to coordinate data block allocation and synchronization so as to overlap data upload and execution. The paper overcomes two challenges: A fixed number of maps scheduling and dynamic number of maps scheduling allows for better handling of input data sets of unknown size. We also employ delay scheduler to achieve data locality for data pipeline. The evaluation of the solution on different applications on real world data sets shows that our approach shows performance gains. Copyright © 2013 by The Institute of Electrical and Electronics Engineers, Inc.},
bibtype = {inproceedings},
author = {Zeng, J. and Plale, B.},
doi = {10.1109/eScience.2013.21},
booktitle = {Proceedings - IEEE 9th International Conference on e-Science, e-Science 2013}
}
Downloads: 0
{"_id":"6osd7ArjiuMJCzA9E","bibbaseid":"zeng-plale-datapipelineinmapreduce-2013","downloads":0,"creationDate":"2018-03-12T19:10:27.474Z","title":"Data pipeline in mapreduce","author_short":["Zeng, J.","Plale, B."],"year":2013,"bibtype":"inproceedings","biburl":"https://bibbase.org/service/mendeley/42d295c0-0737-38d6-8b43-508cab6ea85d","bibdata":{"title":"Data pipeline in mapreduce","type":"inproceedings","year":"2013","id":"84dbc778-b0a8-3b44-861d-fe5f10fef51a","created":"2019-10-01T17:20:44.847Z","file_attached":false,"profile_id":"42d295c0-0737-38d6-8b43-508cab6ea85d","last_modified":"2019-10-01T17:23:08.633Z","read":false,"starred":false,"authored":"true","confirmed":"true","hidden":false,"citation_key":"Zeng2013","folder_uuids":"73f994b4-a3be-4035-a6dd-3802077ce863","private_publication":false,"abstract":"MapReduce is an effective programming model for large scale text and data analysis. Traditional MapReduce implementation, e.g., Hadoop, has the restriction that before any analysis can take place, the entire input dataset must be loaded into the cluster. This can introduce sizable latency when the data set is large, and when it is not possible to load the data once, and process many times - A situation that exists for log files, health records and protected texts for instance. We propose a data pipeline approach to hide data upload latency in MapReduce analysis. Our implementation, which is based on Hadoop MapReduce, is completely transparent to user. It introduces a distributed concurrency queue to coordinate data block allocation and synchronization so as to overlap data upload and execution. The paper overcomes two challenges: A fixed number of maps scheduling and dynamic number of maps scheduling allows for better handling of input data sets of unknown size. We also employ delay scheduler to achieve data locality for data pipeline. The evaluation of the solution on different applications on real world data sets shows that our approach shows performance gains. Copyright © 2013 by The Institute of Electrical and Electronics Engineers, Inc.","bibtype":"inproceedings","author":"Zeng, J. and Plale, B.","doi":"10.1109/eScience.2013.21","booktitle":"Proceedings - IEEE 9th International Conference on e-Science, e-Science 2013","bibtex":"@inproceedings{\n title = {Data pipeline in mapreduce},\n type = {inproceedings},\n year = {2013},\n id = {84dbc778-b0a8-3b44-861d-fe5f10fef51a},\n created = {2019-10-01T17:20:44.847Z},\n file_attached = {false},\n profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d},\n last_modified = {2019-10-01T17:23:08.633Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Zeng2013},\n folder_uuids = {73f994b4-a3be-4035-a6dd-3802077ce863},\n private_publication = {false},\n abstract = {MapReduce is an effective programming model for large scale text and data analysis. Traditional MapReduce implementation, e.g., Hadoop, has the restriction that before any analysis can take place, the entire input dataset must be loaded into the cluster. This can introduce sizable latency when the data set is large, and when it is not possible to load the data once, and process many times - A situation that exists for log files, health records and protected texts for instance. We propose a data pipeline approach to hide data upload latency in MapReduce analysis. Our implementation, which is based on Hadoop MapReduce, is completely transparent to user. It introduces a distributed concurrency queue to coordinate data block allocation and synchronization so as to overlap data upload and execution. The paper overcomes two challenges: A fixed number of maps scheduling and dynamic number of maps scheduling allows for better handling of input data sets of unknown size. We also employ delay scheduler to achieve data locality for data pipeline. The evaluation of the solution on different applications on real world data sets shows that our approach shows performance gains. Copyright © 2013 by The Institute of Electrical and Electronics Engineers, Inc.},\n bibtype = {inproceedings},\n author = {Zeng, J. and Plale, B.},\n doi = {10.1109/eScience.2013.21},\n booktitle = {Proceedings - IEEE 9th International Conference on e-Science, e-Science 2013}\n}","author_short":["Zeng, J.","Plale, B."],"biburl":"https://bibbase.org/service/mendeley/42d295c0-0737-38d6-8b43-508cab6ea85d","bibbaseid":"zeng-plale-datapipelineinmapreduce-2013","role":"author","urls":{},"metadata":{"authorlinks":{}},"downloads":0},"search_terms":["data","pipeline","mapreduce","zeng","plale"],"keywords":[],"authorIDs":[],"dataSources":["zgahneP4uAjKbudrQ","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"]}