Optical Flow Prediction for Blind and Non-Blind Video Error Concealment Using Deep Neural Networks. Sankisa, A., Punjabi, A., & Katsaggelos, A. K. International Journal of Multimedia Data Engineering and Management, 10(3):27–46, IEEE, jul, 2019. Paper doi abstract bibtex A novel optical flow prediction model using an adaptable deep neural network architecture for blind and non-blind error concealment of videos degraded by transmission loss is presented. The two-stream network model is trained by separating the horizontal and vertical motion fields which are passed through two similar parallel pipelines that include traditional convolutional (Conv) and convolutional long short-term memory (ConvLSTM) layers. The ConvLSTM layers extract temporally correlated motion information while the Conv layers correlate motion spatially. The optical flows used as input to the two-pipeline prediction network are obtained through a flow generation network that can be easily interchanged, increasing the adaptability of the overall end-to-end architecture. The performance of the proposed model is evaluated using real-world packet loss scenarios. Standard video quality metrics are used to compare frames reconstructed using predicted optical flows with those reconstructed using “ground-truth” flows obtained directly from the generator.
@article{Arun2019,
abstract = {A novel optical flow prediction model using an adaptable deep neural network architecture for blind and non-blind error concealment of videos degraded by transmission loss is presented. The two-stream network model is trained by separating the horizontal and vertical motion fields which are passed through two similar parallel pipelines that include traditional convolutional (Conv) and convolutional long short-term memory (ConvLSTM) layers. The ConvLSTM layers extract temporally correlated motion information while the Conv layers correlate motion spatially. The optical flows used as input to the two-pipeline prediction network are obtained through a flow generation network that can be easily interchanged, increasing the adaptability of the overall end-to-end architecture. The performance of the proposed model is evaluated using real-world packet loss scenarios. Standard video quality metrics are used to compare frames reconstructed using predicted optical flows with those reconstructed using “ground-truth” flows obtained directly from the generator.},
author = {Sankisa, Arun and Punjabi, Arjun and Katsaggelos, Aggelos K.},
doi = {10.4018/IJMDEM.2019070102},
isbn = {978-1-4799-7061-2},
issn = {1947-8534},
journal = {International Journal of Multimedia Data Engineering and Management},
keywords = {CNN,ConvLSTM,Deep Neural Networks,Optical flow,Video Error Concealment},
month = {jul},
number = {3},
pages = {27--46},
publisher = {IEEE},
title = {{Optical Flow Prediction for Blind and Non-Blind Video Error Concealment Using Deep Neural Networks}},
url = {https://ieeexplore.ieee.org/document/8451090/ http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/IJMDEM.2019070102},
volume = {10},
year = {2019}
}
Downloads: 0
{"_id":"8e2TMScFuigf8H83K","bibbaseid":"sankisa-punjabi-katsaggelos-opticalflowpredictionforblindandnonblindvideoerrorconcealmentusingdeepneuralnetworks-2019","author_short":["Sankisa, A.","Punjabi, A.","Katsaggelos, A. K."],"bibdata":{"bibtype":"article","type":"article","abstract":"A novel optical flow prediction model using an adaptable deep neural network architecture for blind and non-blind error concealment of videos degraded by transmission loss is presented. The two-stream network model is trained by separating the horizontal and vertical motion fields which are passed through two similar parallel pipelines that include traditional convolutional (Conv) and convolutional long short-term memory (ConvLSTM) layers. The ConvLSTM layers extract temporally correlated motion information while the Conv layers correlate motion spatially. The optical flows used as input to the two-pipeline prediction network are obtained through a flow generation network that can be easily interchanged, increasing the adaptability of the overall end-to-end architecture. The performance of the proposed model is evaluated using real-world packet loss scenarios. Standard video quality metrics are used to compare frames reconstructed using predicted optical flows with those reconstructed using “ground-truth” flows obtained directly from the generator.","author":[{"propositions":[],"lastnames":["Sankisa"],"firstnames":["Arun"],"suffixes":[]},{"propositions":[],"lastnames":["Punjabi"],"firstnames":["Arjun"],"suffixes":[]},{"propositions":[],"lastnames":["Katsaggelos"],"firstnames":["Aggelos","K."],"suffixes":[]}],"doi":"10.4018/IJMDEM.2019070102","isbn":"978-1-4799-7061-2","issn":"1947-8534","journal":"International Journal of Multimedia Data Engineering and Management","keywords":"CNN,ConvLSTM,Deep Neural Networks,Optical flow,Video Error Concealment","month":"jul","number":"3","pages":"27–46","publisher":"IEEE","title":"Optical Flow Prediction for Blind and Non-Blind Video Error Concealment Using Deep Neural Networks","url":"https://ieeexplore.ieee.org/document/8451090/ http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/IJMDEM.2019070102","volume":"10","year":"2019","bibtex":"@article{Arun2019,\nabstract = {A novel optical flow prediction model using an adaptable deep neural network architecture for blind and non-blind error concealment of videos degraded by transmission loss is presented. The two-stream network model is trained by separating the horizontal and vertical motion fields which are passed through two similar parallel pipelines that include traditional convolutional (Conv) and convolutional long short-term memory (ConvLSTM) layers. The ConvLSTM layers extract temporally correlated motion information while the Conv layers correlate motion spatially. The optical flows used as input to the two-pipeline prediction network are obtained through a flow generation network that can be easily interchanged, increasing the adaptability of the overall end-to-end architecture. The performance of the proposed model is evaluated using real-world packet loss scenarios. Standard video quality metrics are used to compare frames reconstructed using predicted optical flows with those reconstructed using “ground-truth” flows obtained directly from the generator.},\nauthor = {Sankisa, Arun and Punjabi, Arjun and Katsaggelos, Aggelos K.},\ndoi = {10.4018/IJMDEM.2019070102},\nisbn = {978-1-4799-7061-2},\nissn = {1947-8534},\njournal = {International Journal of Multimedia Data Engineering and Management},\nkeywords = {CNN,ConvLSTM,Deep Neural Networks,Optical flow,Video Error Concealment},\nmonth = {jul},\nnumber = {3},\npages = {27--46},\npublisher = {IEEE},\ntitle = {{Optical Flow Prediction for Blind and Non-Blind Video Error Concealment Using Deep Neural Networks}},\nurl = {https://ieeexplore.ieee.org/document/8451090/ http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/IJMDEM.2019070102},\nvolume = {10},\nyear = {2019}\n}\n","author_short":["Sankisa, A.","Punjabi, A.","Katsaggelos, A. K."],"key":"Arun2019","id":"Arun2019","bibbaseid":"sankisa-punjabi-katsaggelos-opticalflowpredictionforblindandnonblindvideoerrorconcealmentusingdeepneuralnetworks-2019","role":"author","urls":{"Paper":"https://ieeexplore.ieee.org/document/8451090/ http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/IJMDEM.2019070102"},"keyword":["CNN","ConvLSTM","Deep Neural Networks","Optical flow","Video Error Concealment"],"metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://sites.northwestern.edu/ivpl/files/2023/06/IVPL_Updated_publications-1.bib","dataSources":["KTWAakbPXLGfYseXn","ePKPjG8C6yvpk4mEK","ya2CyA73rpZseyrZ8","E6Bth2QB5BYjBMZE7","nbnEjsN7MJhurAK9x","PNQZj6FjzoxxJk4Yi","7FpDWDGJ4KgpDiGfB","bod9ms4MQJHuJgPpp","QR9t5P2cLdJuzhfzK","D8k2SxfC5dKNRFgro","7Dwzbxq93HWrJEhT6","qhF8zxmGcJfvtdeAg","fvDEHD49E2ZRwE3fb","H7crv8NWhZup4d4by","DHqokWsryttGh7pJE","vRJd4wNg9HpoZSMHD","sYxQ6pxFgA59JRhxi","w2WahSbYrbcCKBDsC","XasdXLL99y5rygCmq","3gkSihZQRfAD2KBo3","t5XMbyZbtPBo4wBGS","bEpHM2CtrwW2qE8FP","teJzFLHexaz5AQW5z"],"keywords":["cnn","convlstm","deep neural networks","optical flow","video error concealment"],"search_terms":["optical","flow","prediction","blind","non","blind","video","error","concealment","using","deep","neural","networks","sankisa","punjabi","katsaggelos"],"title":"Optical Flow Prediction for Blind and Non-Blind Video Error Concealment Using Deep Neural Networks","year":2019}