Anatomy of machine learning algorithm implementations in MPI, Spark, and Flink. Kamburugamuve, S., Wickramasinghe, P., Ekanayake, S., & Fox, G., C. International Journal of High Performance Computing Applications, 32(1):61-73, SAGE Publications Inc., 2018. Website doi abstract bibtex With the ever-increasing need to analyze large amounts of data to get useful insights, it is essential to develop complex parallel machine learning algorithms that can scale with data and number of parallel processes. These algorithms need to run on large data sets as well as they need to be executed with minimal time in order to extract useful information in a time-constrained environment. Message passing interface (MPI) is a widely used model for developing such algorithms in high-performance computing paradigm, while Apache Spark and Apache Flink are emerging as big data platforms for large-scale parallel machine learning. Even though these big data frameworks are designed differently, they follow the data flow model for execution and user APIs. Data flow model offers fundamentally different capabilities than the MPI execution model, but the same type of parallelism can be used in applications developed in both models. This article presents three distinct machine learning algorithms implemented in MPI, Spark, and Flink and compares their performance and identifies strengths and weaknesses in each platform. © 2017, © The Author(s) 2017.
@article{
title = {Anatomy of machine learning algorithm implementations in MPI, Spark, and Flink},
type = {article},
year = {2018},
keywords = {Artificial intelligence; Big data; Data flow analy,Data flow modeling; Flink; High performance compu,Learning algorithms},
pages = {61-73},
volume = {32},
websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85039854240&doi=10.1177%2F1094342017712976&partnerID=40&md5=0a1048e69609d95f438e0b2f01466624},
publisher = {SAGE Publications Inc.},
id = {fee0ce1f-2b00-3b30-aa0a-c17641db8593},
created = {2019-10-01T17:21:02.665Z},
file_attached = {false},
profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d},
last_modified = {2019-10-01T17:21:02.665Z},
read = {false},
starred = {false},
authored = {true},
confirmed = {true},
hidden = {false},
citation_key = {Kamburugamuve201861},
source_type = {article},
notes = {cited By 1},
private_publication = {false},
abstract = {With the ever-increasing need to analyze large amounts of data to get useful insights, it is essential to develop complex parallel machine learning algorithms that can scale with data and number of parallel processes. These algorithms need to run on large data sets as well as they need to be executed with minimal time in order to extract useful information in a time-constrained environment. Message passing interface (MPI) is a widely used model for developing such algorithms in high-performance computing paradigm, while Apache Spark and Apache Flink are emerging as big data platforms for large-scale parallel machine learning. Even though these big data frameworks are designed differently, they follow the data flow model for execution and user APIs. Data flow model offers fundamentally different capabilities than the MPI execution model, but the same type of parallelism can be used in applications developed in both models. This article presents three distinct machine learning algorithms implemented in MPI, Spark, and Flink and compares their performance and identifies strengths and weaknesses in each platform. © 2017, © The Author(s) 2017.},
bibtype = {article},
author = {Kamburugamuve, S and Wickramasinghe, P and Ekanayake, S and Fox, Geoffrey Charles},
doi = {10.1177/1094342017712976},
journal = {International Journal of High Performance Computing Applications},
number = {1}
}
Downloads: 0
{"_id":"kMSxQSWN44FDM4kmk","bibbaseid":"kamburugamuve-wickramasinghe-ekanayake-fox-anatomyofmachinelearningalgorithmimplementationsinmpisparkandflink-2018","authorIDs":[],"author_short":["Kamburugamuve, S.","Wickramasinghe, P.","Ekanayake, S.","Fox, G., C."],"bibdata":{"title":"Anatomy of machine learning algorithm implementations in MPI, Spark, and Flink","type":"article","year":"2018","keywords":"Artificial intelligence; Big data; Data flow analy,Data flow modeling; Flink; High performance compu,Learning algorithms","pages":"61-73","volume":"32","websites":"https://www.scopus.com/inward/record.uri?eid=2-s2.0-85039854240&doi=10.1177%2F1094342017712976&partnerID=40&md5=0a1048e69609d95f438e0b2f01466624","publisher":"SAGE Publications Inc.","id":"fee0ce1f-2b00-3b30-aa0a-c17641db8593","created":"2019-10-01T17:21:02.665Z","file_attached":false,"profile_id":"42d295c0-0737-38d6-8b43-508cab6ea85d","last_modified":"2019-10-01T17:21:02.665Z","read":false,"starred":false,"authored":"true","confirmed":"true","hidden":false,"citation_key":"Kamburugamuve201861","source_type":"article","notes":"cited By 1","private_publication":false,"abstract":"With the ever-increasing need to analyze large amounts of data to get useful insights, it is essential to develop complex parallel machine learning algorithms that can scale with data and number of parallel processes. These algorithms need to run on large data sets as well as they need to be executed with minimal time in order to extract useful information in a time-constrained environment. Message passing interface (MPI) is a widely used model for developing such algorithms in high-performance computing paradigm, while Apache Spark and Apache Flink are emerging as big data platforms for large-scale parallel machine learning. Even though these big data frameworks are designed differently, they follow the data flow model for execution and user APIs. Data flow model offers fundamentally different capabilities than the MPI execution model, but the same type of parallelism can be used in applications developed in both models. This article presents three distinct machine learning algorithms implemented in MPI, Spark, and Flink and compares their performance and identifies strengths and weaknesses in each platform. © 2017, © The Author(s) 2017.","bibtype":"article","author":"Kamburugamuve, S and Wickramasinghe, P and Ekanayake, S and Fox, Geoffrey Charles","doi":"10.1177/1094342017712976","journal":"International Journal of High Performance Computing Applications","number":"1","bibtex":"@article{\n title = {Anatomy of machine learning algorithm implementations in MPI, Spark, and Flink},\n type = {article},\n year = {2018},\n keywords = {Artificial intelligence; Big data; Data flow analy,Data flow modeling; Flink; High performance compu,Learning algorithms},\n pages = {61-73},\n volume = {32},\n websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85039854240&doi=10.1177%2F1094342017712976&partnerID=40&md5=0a1048e69609d95f438e0b2f01466624},\n publisher = {SAGE Publications Inc.},\n id = {fee0ce1f-2b00-3b30-aa0a-c17641db8593},\n created = {2019-10-01T17:21:02.665Z},\n file_attached = {false},\n profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d},\n last_modified = {2019-10-01T17:21:02.665Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Kamburugamuve201861},\n source_type = {article},\n notes = {cited By 1},\n private_publication = {false},\n abstract = {With the ever-increasing need to analyze large amounts of data to get useful insights, it is essential to develop complex parallel machine learning algorithms that can scale with data and number of parallel processes. These algorithms need to run on large data sets as well as they need to be executed with minimal time in order to extract useful information in a time-constrained environment. Message passing interface (MPI) is a widely used model for developing such algorithms in high-performance computing paradigm, while Apache Spark and Apache Flink are emerging as big data platforms for large-scale parallel machine learning. Even though these big data frameworks are designed differently, they follow the data flow model for execution and user APIs. Data flow model offers fundamentally different capabilities than the MPI execution model, but the same type of parallelism can be used in applications developed in both models. This article presents three distinct machine learning algorithms implemented in MPI, Spark, and Flink and compares their performance and identifies strengths and weaknesses in each platform. © 2017, © The Author(s) 2017.},\n bibtype = {article},\n author = {Kamburugamuve, S and Wickramasinghe, P and Ekanayake, S and Fox, Geoffrey Charles},\n doi = {10.1177/1094342017712976},\n journal = {International Journal of High Performance Computing Applications},\n number = {1}\n}","author_short":["Kamburugamuve, S.","Wickramasinghe, P.","Ekanayake, S.","Fox, G., C."],"urls":{"Website":"https://www.scopus.com/inward/record.uri?eid=2-s2.0-85039854240&doi=10.1177%2F1094342017712976&partnerID=40&md5=0a1048e69609d95f438e0b2f01466624"},"biburl":"https://bibbase.org/service/mendeley/42d295c0-0737-38d6-8b43-508cab6ea85d","bibbaseid":"kamburugamuve-wickramasinghe-ekanayake-fox-anatomyofmachinelearningalgorithmimplementationsinmpisparkandflink-2018","role":"author","keyword":["Artificial intelligence; Big data; Data flow analy","Data flow modeling; Flink; High performance compu","Learning algorithms"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"article","creationDate":"2019-09-12T13:19:08.416Z","downloads":0,"keywords":["artificial intelligence; big data; data flow analy","data flow modeling; flink; high performance compu","learning algorithms"],"search_terms":["anatomy","machine","learning","algorithm","implementations","mpi","spark","flink","kamburugamuve","wickramasinghe","ekanayake","fox"],"title":"Anatomy of machine learning algorithm implementations in MPI, Spark, and Flink","year":2018,"biburl":"https://bibbase.org/service/mendeley/42d295c0-0737-38d6-8b43-508cab6ea85d","dataSources":["zgahneP4uAjKbudrQ","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"]}