Performance Estimation of Synthesis Flows cross Technologies using LSTMs and Transfer Learning. Yu, C. & Zhou, W. arXiv:1811.06017 [cs, stat], November, 2018. arXiv: 1811.06017
Paper abstract bibtex Due to the increasing complexity of Integrated Circuits (ICs) and System-on-Chip (SoC), developing high-quality synthesis flows within a short market time becomes more challenging. We propose a general approach that precisely estimates the Quality-of-Result (QoR), such as delay and area, of unseen synthesis flows for specific designs. The main idea is training a Recurrent Neural Network (RNN) regressor, where the flows are inputs and QoRs are ground truth. The RNN regressor is constructed with Long Short-Term Memory (LSTM) and fully-connected layers. This approach is demonstrated with 1.2 million data points collected using 14nm, 7nm regular-voltage (RVT), and 7nm low-voltage (LVT) FinFET technologies with twelve IC designs. The accuracy of predicting the QoRs (delay and area) within one technology is ${\}boldsymbol\{{\}geq\}$\textbf\98.0\\% over ${\}sim$240,000 test points. To enable accurate predictions cross different technologies and different IC designs, we propose a transfer-learning approach that utilizes the model pre-trained with 14nm datasets. Our transfer learning approach obtains estimation accuracy ${\}geq$96.3\% over ${\}sim$960,000 test points, using only 100 data points for training.
@article{yu_performance_2018,
title = {Performance {Estimation} of {Synthesis} {Flows} cross {Technologies} using {LSTMs} and {Transfer} {Learning}},
url = {http://arxiv.org/abs/1811.06017},
abstract = {Due to the increasing complexity of Integrated Circuits (ICs) and System-on-Chip (SoC), developing high-quality synthesis flows within a short market time becomes more challenging. We propose a general approach that precisely estimates the Quality-of-Result (QoR), such as delay and area, of unseen synthesis flows for specific designs. The main idea is training a Recurrent Neural Network (RNN) regressor, where the flows are inputs and QoRs are ground truth. The RNN regressor is constructed with Long Short-Term Memory (LSTM) and fully-connected layers. This approach is demonstrated with 1.2 million data points collected using 14nm, 7nm regular-voltage (RVT), and 7nm low-voltage (LVT) FinFET technologies with twelve IC designs. The accuracy of predicting the QoRs (delay and area) within one technology is \${\textbackslash}boldsymbol\{{\textbackslash}geq\}\${\textbackslash}textbf\{98.0\}{\textbackslash}\% over \${\textbackslash}sim\$240,000 test points. To enable accurate predictions cross different technologies and different IC designs, we propose a transfer-learning approach that utilizes the model pre-trained with 14nm datasets. Our transfer learning approach obtains estimation accuracy \${\textbackslash}geq\$96.3{\textbackslash}\% over \${\textbackslash}sim\$960,000 test points, using only 100 data points for training.},
urldate = {2019-03-16},
journal = {arXiv:1811.06017 [cs, stat]},
author = {Yu, Cunxi and Zhou, Wang},
month = nov,
year = {2018},
note = {arXiv: 1811.06017},
keywords = {Computer Science - Computer Vision and Pattern Recognition, Computer Science - Machine Learning, Statistics - Machine Learning},
}
Downloads: 0
{"_id":"7yDoLSWgep46GaWFm","bibbaseid":"yu-zhou-performanceestimationofsynthesisflowscrosstechnologiesusinglstmsandtransferlearning-2018","authorIDs":["5ce6fb5226c0fcda0100030c"],"author_short":["Yu, C.","Zhou, W."],"bibdata":{"bibtype":"article","type":"article","title":"Performance Estimation of Synthesis Flows cross Technologies using LSTMs and Transfer Learning","url":"http://arxiv.org/abs/1811.06017","abstract":"Due to the increasing complexity of Integrated Circuits (ICs) and System-on-Chip (SoC), developing high-quality synthesis flows within a short market time becomes more challenging. We propose a general approach that precisely estimates the Quality-of-Result (QoR), such as delay and area, of unseen synthesis flows for specific designs. The main idea is training a Recurrent Neural Network (RNN) regressor, where the flows are inputs and QoRs are ground truth. The RNN regressor is constructed with Long Short-Term Memory (LSTM) and fully-connected layers. This approach is demonstrated with 1.2 million data points collected using 14nm, 7nm regular-voltage (RVT), and 7nm low-voltage (LVT) FinFET technologies with twelve IC designs. The accuracy of predicting the QoRs (delay and area) within one technology is ${\\}boldsymbol\\{{\\}geq\\}$\\textbf\\98.0\\\\% over ${\\}sim$240,000 test points. To enable accurate predictions cross different technologies and different IC designs, we propose a transfer-learning approach that utilizes the model pre-trained with 14nm datasets. Our transfer learning approach obtains estimation accuracy ${\\}geq$96.3\\% over ${\\}sim$960,000 test points, using only 100 data points for training.","urldate":"2019-03-16","journal":"arXiv:1811.06017 [cs, stat]","author":[{"propositions":[],"lastnames":["Yu"],"firstnames":["Cunxi"],"suffixes":[]},{"propositions":[],"lastnames":["Zhou"],"firstnames":["Wang"],"suffixes":[]}],"month":"November","year":"2018","note":"arXiv: 1811.06017","keywords":"Computer Science - Computer Vision and Pattern Recognition, Computer Science - Machine Learning, Statistics - Machine Learning","bibtex":"@article{yu_performance_2018,\n\ttitle = {Performance {Estimation} of {Synthesis} {Flows} cross {Technologies} using {LSTMs} and {Transfer} {Learning}},\n\turl = {http://arxiv.org/abs/1811.06017},\n\tabstract = {Due to the increasing complexity of Integrated Circuits (ICs) and System-on-Chip (SoC), developing high-quality synthesis flows within a short market time becomes more challenging. We propose a general approach that precisely estimates the Quality-of-Result (QoR), such as delay and area, of unseen synthesis flows for specific designs. The main idea is training a Recurrent Neural Network (RNN) regressor, where the flows are inputs and QoRs are ground truth. The RNN regressor is constructed with Long Short-Term Memory (LSTM) and fully-connected layers. This approach is demonstrated with 1.2 million data points collected using 14nm, 7nm regular-voltage (RVT), and 7nm low-voltage (LVT) FinFET technologies with twelve IC designs. The accuracy of predicting the QoRs (delay and area) within one technology is \\${\\textbackslash}boldsymbol\\{{\\textbackslash}geq\\}\\${\\textbackslash}textbf\\{98.0\\}{\\textbackslash}\\% over \\${\\textbackslash}sim\\$240,000 test points. To enable accurate predictions cross different technologies and different IC designs, we propose a transfer-learning approach that utilizes the model pre-trained with 14nm datasets. Our transfer learning approach obtains estimation accuracy \\${\\textbackslash}geq\\$96.3{\\textbackslash}\\% over \\${\\textbackslash}sim\\$960,000 test points, using only 100 data points for training.},\n\turldate = {2019-03-16},\n\tjournal = {arXiv:1811.06017 [cs, stat]},\n\tauthor = {Yu, Cunxi and Zhou, Wang},\n\tmonth = nov,\n\tyear = {2018},\n\tnote = {arXiv: 1811.06017},\n\tkeywords = {Computer Science - Computer Vision and Pattern Recognition, Computer Science - Machine Learning, Statistics - Machine Learning},\n}\n\n","author_short":["Yu, C.","Zhou, W."],"key":"yu_performance_2018","id":"yu_performance_2018","bibbaseid":"yu-zhou-performanceestimationofsynthesisflowscrosstechnologiesusinglstmsandtransferlearning-2018","role":"author","urls":{"Paper":"http://arxiv.org/abs/1811.06017"},"keyword":["Computer Science - Computer Vision and Pattern Recognition","Computer Science - Machine Learning","Statistics - Machine Learning"],"metadata":{"authorlinks":{}},"downloads":0,"html":""},"bibtype":"article","biburl":"https://bibbase.org/zotero/bxt101","creationDate":"2019-05-23T19:58:10.189Z","downloads":0,"keywords":["computer science - computer vision and pattern recognition","computer science - machine learning","statistics - machine learning"],"search_terms":["performance","estimation","synthesis","flows","cross","technologies","using","lstms","transfer","learning","yu","zhou"],"title":"Performance Estimation of Synthesis Flows cross Technologies using LSTMs and Transfer Learning","year":2018,"dataSources":["L6BLFSB28hKk5Nt67","Wsv2bQ4jPuc7qme8R"]}