Screenplay Quality Assessment: Can We Predict Who Gets Nominated?. Chiu, M., Feng, T., Ren, X., & Narayanan, S. In Proceedings of the First Joint Workshop on Narrative Understanding, Storylines, and Events, pages 11-16, Online, July, 2020. Association for Computational Linguistics. Paper doi abstract bibtex Deciding which scripts to turn into movies is a costly and time-consuming process for filmmakers. Thus, building a tool to aid script selection, an initial phase in movie production, can be very beneficial. Toward that goal, in this work, we present a method to evaluate the quality of a screenplay based on linguistic cues. We address this in a two-fold approach: (1) we define the task as predicting nominations of scripts at major film awards with the hypothesis that the peer-recognized scripts should have a greater chance to succeed. (2) based on industry opinions and narratology, we extract and integrate domain-specific features into common classification techniques. We face two challenges (1) scripts are much longer than other document datasets (2) nominated scripts are limited and thus difficult to collect. However, with narratology-inspired modeling and domain features, our approach offers clear improvements over strong baselines. Our work provides a new approach for future work in screenplay analysis.
@inproceedings{chiu-etal-2020-screenplay,
abstract = {Deciding which scripts to turn into movies is a costly and time-consuming process for filmmakers. Thus, building a tool to aid script selection, an initial phase in movie production, can be very beneficial. Toward that goal, in this work, we present a method to evaluate the quality of a screenplay based on linguistic cues. We address this in a two-fold approach: (1) we define the task as predicting nominations of scripts at major film awards with the hypothesis that the peer-recognized scripts should have a greater chance to succeed. (2) based on industry opinions and narratology, we extract and integrate domain-specific features into common classification techniques. We face two challenges (1) scripts are much longer than other document datasets (2) nominated scripts are limited and thus difficult to collect. However, with narratology-inspired modeling and domain features, our approach offers clear improvements over strong baselines. Our work provides a new approach for future work in screenplay analysis.},
address = {Online},
author = {Chiu, Ming-Chang and
Feng, Tiantian and
Ren, Xiang and
Narayanan, Shrikanth},
booktitle = {Proceedings of the First Joint Workshop on Narrative Understanding, Storylines, and Events},
doi = {10.18653/v1/2020.nuse-1.2},
link = {http://sail.usc.edu/publications/files/Chiu-2020.pdf},
month = {July},
pages = {11-16},
publisher = {Association for Computational Linguistics},
title = {Screenplay Quality Assessment: Can We Predict Who Gets Nominated?},
url = {https://www.aclweb.org/anthology/2020.nuse-1.2},
year = {2020}
}
Downloads: 0
{"_id":"oCL4emdBcyNDmX6fB","bibbaseid":"chiu-feng-ren-narayanan-screenplayqualityassessmentcanwepredictwhogetsnominated-2020","author_short":["Chiu, M.","Feng, T.","Ren, X.","Narayanan, S."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","abstract":"Deciding which scripts to turn into movies is a costly and time-consuming process for filmmakers. Thus, building a tool to aid script selection, an initial phase in movie production, can be very beneficial. Toward that goal, in this work, we present a method to evaluate the quality of a screenplay based on linguistic cues. We address this in a two-fold approach: (1) we define the task as predicting nominations of scripts at major film awards with the hypothesis that the peer-recognized scripts should have a greater chance to succeed. (2) based on industry opinions and narratology, we extract and integrate domain-specific features into common classification techniques. We face two challenges (1) scripts are much longer than other document datasets (2) nominated scripts are limited and thus difficult to collect. However, with narratology-inspired modeling and domain features, our approach offers clear improvements over strong baselines. Our work provides a new approach for future work in screenplay analysis.","address":"Online","author":[{"propositions":[],"lastnames":["Chiu"],"firstnames":["Ming-Chang"],"suffixes":[]},{"propositions":[],"lastnames":["Feng"],"firstnames":["Tiantian"],"suffixes":[]},{"propositions":[],"lastnames":["Ren"],"firstnames":["Xiang"],"suffixes":[]},{"propositions":[],"lastnames":["Narayanan"],"firstnames":["Shrikanth"],"suffixes":[]}],"booktitle":"Proceedings of the First Joint Workshop on Narrative Understanding, Storylines, and Events","doi":"10.18653/v1/2020.nuse-1.2","link":"http://sail.usc.edu/publications/files/Chiu-2020.pdf","month":"July","pages":"11-16","publisher":"Association for Computational Linguistics","title":"Screenplay Quality Assessment: Can We Predict Who Gets Nominated?","url":"https://www.aclweb.org/anthology/2020.nuse-1.2","year":"2020","bibtex":"@inproceedings{chiu-etal-2020-screenplay,\n abstract = {Deciding which scripts to turn into movies is a costly and time-consuming process for filmmakers. Thus, building a tool to aid script selection, an initial phase in movie production, can be very beneficial. Toward that goal, in this work, we present a method to evaluate the quality of a screenplay based on linguistic cues. We address this in a two-fold approach: (1) we define the task as predicting nominations of scripts at major film awards with the hypothesis that the peer-recognized scripts should have a greater chance to succeed. (2) based on industry opinions and narratology, we extract and integrate domain-specific features into common classification techniques. We face two challenges (1) scripts are much longer than other document datasets (2) nominated scripts are limited and thus difficult to collect. However, with narratology-inspired modeling and domain features, our approach offers clear improvements over strong baselines. Our work provides a new approach for future work in screenplay analysis.},\n address = {Online},\n author = {Chiu, Ming-Chang and\nFeng, Tiantian and\nRen, Xiang and\nNarayanan, Shrikanth},\n booktitle = {Proceedings of the First Joint Workshop on Narrative Understanding, Storylines, and Events},\n doi = {10.18653/v1/2020.nuse-1.2},\n link = {http://sail.usc.edu/publications/files/Chiu-2020.pdf},\n month = {July},\n pages = {11-16},\n publisher = {Association for Computational Linguistics},\n title = {Screenplay Quality Assessment: Can We Predict Who Gets Nominated?},\n url = {https://www.aclweb.org/anthology/2020.nuse-1.2},\n year = {2020}\n}\n\n","author_short":["Chiu, M.","Feng, T.","Ren, X.","Narayanan, S."],"bibbaseid":"chiu-feng-ren-narayanan-screenplayqualityassessmentcanwepredictwhogetsnominated-2020","role":"author","urls":{"Paper":"https://www.aclweb.org/anthology/2020.nuse-1.2"},"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"https://bibbase.org/f/nWhKb4SffvhfreEmj/shri-isi-edu.bib","dataSources":["P3nQrSLkFzGGSmKJQ","Reikhy6EiDXFTcuR9"],"keywords":[],"search_terms":["screenplay","quality","assessment","predict","gets","nominated","chiu","feng","ren","narayanan"],"title":"Screenplay Quality Assessment: Can We Predict Who Gets Nominated?","year":2020}