NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. Mildenhall, B., Srinivasan, P., P., Tancik, M., Barron, J., T., Ramamoorthi, R., & Ng, R. In Vedaldi, A., Bischof, H., Brox, T., & Frahm, J., editors, Computer Vision – ECCV 2020, of Lecture Notes in Computer Science, pages 405-421, 2020. Springer International Publishing. Paper doi abstract bibtex We present a method that achieves state-of-the-art results for synthesizing novel views of complex scenes by optimizing an underlying continuous volumetric scene function using a sparse set of input views. Our algorithm represents a scene using a fully-connected (non-convolutional) deep network, whose input is a single continuous 5D coordinate (spatial location (x, y, z) and viewing direction (θ,ϕ)(θ,ϕ)(\textbackslashtheta ,\textbackslashphi )) and whose output is the volume density and view-dependent emitted radiance at that spatial location. We synthesize views by querying 5D coordinates along camera rays and use classic volume rendering techniques to project the output colors and densities into an image. Because volume rendering is naturally differentiable, the only input required to optimize our representation is a set of images with known camera poses. We describe how to effectively optimize neural radiance fields to render photorealistic novel views of scenes with complicated geometry and appearance, and demonstrate results that outperform prior work on neural rendering and view synthesis. View synthesis results are best viewed as videos, so we urge readers to view our supplementary video for convincing comparisons.
@inproceedings{
title = {NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis},
type = {inproceedings},
year = {2020},
keywords = {3D deep learning,Image-based rendering,Scene representation,View synthesis,Volume rendering},
pages = {405-421},
publisher = {Springer International Publishing},
city = {Cham},
series = {Lecture Notes in Computer Science},
id = {6ac4ea82-cba6-35b0-8e4d-ebd391837521},
created = {2022-03-28T09:45:03.773Z},
file_attached = {true},
profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2022-03-29T08:05:59.073Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
citation_key = {mildenhallNeRFRepresentingScenes2020},
source_type = {inproceedings},
short_title = {NeRF},
private_publication = {false},
abstract = {We present a method that achieves state-of-the-art results for synthesizing novel views of complex scenes by optimizing an underlying continuous volumetric scene function using a sparse set of input views. Our algorithm represents a scene using a fully-connected (non-convolutional) deep network, whose input is a single continuous 5D coordinate (spatial location (x, y, z) and viewing direction (θ,ϕ)(θ,ϕ)(\textbackslashtheta ,\textbackslashphi )) and whose output is the volume density and view-dependent emitted radiance at that spatial location. We synthesize views by querying 5D coordinates along camera rays and use classic volume rendering techniques to project the output colors and densities into an image. Because volume rendering is naturally differentiable, the only input required to optimize our representation is a set of images with known camera poses. We describe how to effectively optimize neural radiance fields to render photorealistic novel views of scenes with complicated geometry and appearance, and demonstrate results that outperform prior work on neural rendering and view synthesis. View synthesis results are best viewed as videos, so we urge readers to view our supplementary video for convincing comparisons.},
bibtype = {inproceedings},
author = {Mildenhall, Ben and Srinivasan, Pratul P and Tancik, Matthew and Barron, Jonathan T and Ramamoorthi, Ravi and Ng, Ren},
editor = {Vedaldi, Andrea and Bischof, Horst and Brox, Thomas and Frahm, Jan-Michael},
doi = {10.1007/978-3-030-58452-8_24},
booktitle = {Computer Vision – ECCV 2020}
}
Downloads: 0
{"_id":"fCc26TN5C97JkE5uu","bibbaseid":"mildenhall-srinivasan-tancik-barron-ramamoorthi-ng-nerfrepresentingscenesasneuralradiancefieldsforviewsynthesis-2020","author_short":["Mildenhall, B.","Srinivasan, P., P.","Tancik, M.","Barron, J., T.","Ramamoorthi, R.","Ng, R."],"bibdata":{"title":"NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis","type":"inproceedings","year":"2020","keywords":"3D deep learning,Image-based rendering,Scene representation,View synthesis,Volume rendering","pages":"405-421","publisher":"Springer International Publishing","city":"Cham","series":"Lecture Notes in Computer Science","id":"6ac4ea82-cba6-35b0-8e4d-ebd391837521","created":"2022-03-28T09:45:03.773Z","file_attached":"true","profile_id":"235249c2-3ed4-314a-b309-b1ea0330f5d9","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2022-03-29T08:05:59.073Z","read":false,"starred":false,"authored":false,"confirmed":"true","hidden":false,"citation_key":"mildenhallNeRFRepresentingScenes2020","source_type":"inproceedings","short_title":"NeRF","private_publication":false,"abstract":"We present a method that achieves state-of-the-art results for synthesizing novel views of complex scenes by optimizing an underlying continuous volumetric scene function using a sparse set of input views. Our algorithm represents a scene using a fully-connected (non-convolutional) deep network, whose input is a single continuous 5D coordinate (spatial location (x, y, z) and viewing direction (θ,ϕ)(θ,ϕ)(\\textbackslashtheta ,\\textbackslashphi )) and whose output is the volume density and view-dependent emitted radiance at that spatial location. We synthesize views by querying 5D coordinates along camera rays and use classic volume rendering techniques to project the output colors and densities into an image. Because volume rendering is naturally differentiable, the only input required to optimize our representation is a set of images with known camera poses. We describe how to effectively optimize neural radiance fields to render photorealistic novel views of scenes with complicated geometry and appearance, and demonstrate results that outperform prior work on neural rendering and view synthesis. View synthesis results are best viewed as videos, so we urge readers to view our supplementary video for convincing comparisons.","bibtype":"inproceedings","author":"Mildenhall, Ben and Srinivasan, Pratul P and Tancik, Matthew and Barron, Jonathan T and Ramamoorthi, Ravi and Ng, Ren","editor":"Vedaldi, Andrea and Bischof, Horst and Brox, Thomas and Frahm, Jan-Michael","doi":"10.1007/978-3-030-58452-8_24","booktitle":"Computer Vision – ECCV 2020","bibtex":"@inproceedings{\n title = {NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis},\n type = {inproceedings},\n year = {2020},\n keywords = {3D deep learning,Image-based rendering,Scene representation,View synthesis,Volume rendering},\n pages = {405-421},\n publisher = {Springer International Publishing},\n city = {Cham},\n series = {Lecture Notes in Computer Science},\n id = {6ac4ea82-cba6-35b0-8e4d-ebd391837521},\n created = {2022-03-28T09:45:03.773Z},\n file_attached = {true},\n profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2022-03-29T08:05:59.073Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {mildenhallNeRFRepresentingScenes2020},\n source_type = {inproceedings},\n short_title = {NeRF},\n private_publication = {false},\n abstract = {We present a method that achieves state-of-the-art results for synthesizing novel views of complex scenes by optimizing an underlying continuous volumetric scene function using a sparse set of input views. Our algorithm represents a scene using a fully-connected (non-convolutional) deep network, whose input is a single continuous 5D coordinate (spatial location (x, y, z) and viewing direction (θ,ϕ)(θ,ϕ)(\\textbackslashtheta ,\\textbackslashphi )) and whose output is the volume density and view-dependent emitted radiance at that spatial location. We synthesize views by querying 5D coordinates along camera rays and use classic volume rendering techniques to project the output colors and densities into an image. Because volume rendering is naturally differentiable, the only input required to optimize our representation is a set of images with known camera poses. We describe how to effectively optimize neural radiance fields to render photorealistic novel views of scenes with complicated geometry and appearance, and demonstrate results that outperform prior work on neural rendering and view synthesis. View synthesis results are best viewed as videos, so we urge readers to view our supplementary video for convincing comparisons.},\n bibtype = {inproceedings},\n author = {Mildenhall, Ben and Srinivasan, Pratul P and Tancik, Matthew and Barron, Jonathan T and Ramamoorthi, Ravi and Ng, Ren},\n editor = {Vedaldi, Andrea and Bischof, Horst and Brox, Thomas and Frahm, Jan-Michael},\n doi = {10.1007/978-3-030-58452-8_24},\n booktitle = {Computer Vision – ECCV 2020}\n}","author_short":["Mildenhall, B.","Srinivasan, P., P.","Tancik, M.","Barron, J., T.","Ramamoorthi, R.","Ng, R."],"editor_short":["Vedaldi, A.","Bischof, H.","Brox, T.","Frahm, J."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/2b511521-2326-8eb7-1022-a874acda1296/Mildenhall_et_al___2020___NeRF_Representing_Scenes_as_Neural_Radiance_Field.pdf.pdf"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"mildenhall-srinivasan-tancik-barron-ramamoorthi-ng-nerfrepresentingscenesasneuralradiancefieldsforviewsynthesis-2020","role":"author","keyword":["3D deep learning","Image-based rendering","Scene representation","View synthesis","Volume rendering"],"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","dataSources":["aXmRAq63YsH7a3ufx","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"],"keywords":["3d deep learning","image-based rendering","scene representation","view synthesis","volume rendering"],"search_terms":["nerf","representing","scenes","neural","radiance","fields","view","synthesis","mildenhall","srinivasan","tancik","barron","ramamoorthi","ng"],"title":"NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis","year":2020}