var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=http%3A%2F%2Fwww.telemidia.puc-rio.br%2F%7Eroberto%2Fbiblio%2Fbib.bib&jsonp=1&theme=simple&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=http%3A%2F%2Fwww.telemidia.puc-rio.br%2F%7Eroberto%2Fbiblio%2Fbib.bib&jsonp=1&theme=simple\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=http%3A%2F%2Fwww.telemidia.puc-rio.br%2F%7Eroberto%2Fbiblio%2Fbib.bib&jsonp=1&theme=simple\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2021\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Multi-feature 360 Video Quality Estimation.\n \n \n \n\n\n \n Azevedo, R. G. d. A.; Birkbeck, N.; Janatra, I.; Adsumilli, B.; and Frossard, P.\n\n\n \n\n\n\n IEEE Open Journal of Circuits and Systems. 2021.\n Accepted for publication\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021_04_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Birkbeck, Neil, and\nJanatra, Ivan and Adsumilli, Balu and Frossard, Pascal},\ntitle={Multi-feature 360 Video Quality Estimation},\nyear={2021},\njournal={IEEE Open Journal of Circuits and Systems},\nnote={Accepted for publication},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Omnidirectional Imaging and Deep Learning.\n \n \n \n\n\n \n Azevedo, R. G. d. A.; Khasanova, R.; and Frossard, P.\n\n\n \n\n\n\n In Omnidirectional vision. 2021.\n To appear\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{2021_00_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Khasanova, Renata and\nFrossard, Pascal},\ntitle={Omnidirectional Imaging and Deep Learning},\nyear={2021},\nbooktitle={Omnidirectional vision},\nnote={To appear},\n}\n\n%%% 2020 %%%\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Towards efficient compression of interactive omnidirectional visual content.\n \n \n \n\n\n \n de A. Azevedo, R. G.\n\n\n \n\n\n\n ˘rlhttps://mmc.committees.comsoc.org/communications-review/, Oct 2020.\n IEEE COMSOC MMTC Communications - Review\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Misc{2020_10_azevedo,\ntitle="Towards efficient compression of interactive omnidirectional visual\ncontent",\nauthor="Roberto G. de A. Azevedo",\nhowpublished="\\url{https://mmc.committees.comsoc.org/communications-review/}",\nyear={2020},\nmonth={Oct},\nnote="IEEE COMSOC MMTC Communications - Review",\npublisher="IEEE COMSOC",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Visual Distortions in 360° Videos.\n \n \n \n\n\n \n Azevedo, R. G. d. A.; Birkbeck, N.; De Simone, F.; Janatra, I.; Adsumilli, B.; and Frossard, P.\n\n\n \n\n\n\n IEEE Transactions on Circuits and Systems for Video Technology, 30(8): 2524-2537. Aug 2020.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 14 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{2020_08_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Birkbeck, Neil, and De\nSimone, Francesca and Janatra, Ivan and Adsumilli, Balu and Frossard,\nPascal},\njournal={IEEE Transactions on Circuits and Systems for Video Technology},\ntitle={Visual Distortions in 360° Videos},\nyear={2020},\nvolume={30},\nnumber={8},\npages={2524-2537},\nabstract={Omnidirectional (or 360°) images and videos are emergent signals\nbeing used in many areas, such as robotics and virtual/augmented reality. In\nparticular, for virtual reality applications, they allow an immersive\nexperience in which the user can interactively navigate through a scene with\nthree degrees of freedom, wearing a head-mounted display. Current approaches\nfor capturing, processing, delivering, and displaying 360° content, however,\npresent many open technical challenges and introduce several types of\ndistortions in the visual signal. Some of the distortions are specific to the\nnature of 360° images and often differ from those encountered in classical\nvisual communication frameworks. This paper provides a first comprehensive\nreview of the most common visual distortions that alter 360° signals going\nthrough the different processing elements of the visual communication\npipeline. While their impact on viewers’ visual perception and the\nimmersive experience at large is still unknown—thus, it is an open research\ntopic—this review serves the purpose of proposing a taxonomy of the visual\ndistortions that can be encountered in 360° signals. Their underlying causes\nin the end-to-end 360° content distribution pipeline are identified. This\ntaxonomy is essential as a basis for comparing different processing\ntechniques, such as visual enhancement, encoding, and streaming strategies,\nand allowing the effective design of new algorithms and applications. It is\nalso a useful resource for the design of psycho-visual studies aiming to\ncharacterize human perception of 360° content in interactive and immersive\napplications.},\nkeywords={Visualization;Videos;Distortion;Cameras;Pipelines;Encoding;Signal\nprocessing algorithms;Omnidirectional video;360-degree video;visual\ndistortions;artifacts;compression},\ndoi={10.1109/TCSVT.2019.2927344},\nissn={1558-2205},\nmonth={Aug},\n}\n\n
\n
\n\n\n
\n Omnidirectional (or 360°) images and videos are emergent signals being used in many areas, such as robotics and virtual/augmented reality. In particular, for virtual reality applications, they allow an immersive experience in which the user can interactively navigate through a scene with three degrees of freedom, wearing a head-mounted display. Current approaches for capturing, processing, delivering, and displaying 360° content, however, present many open technical challenges and introduce several types of distortions in the visual signal. Some of the distortions are specific to the nature of 360° images and often differ from those encountered in classical visual communication frameworks. This paper provides a first comprehensive review of the most common visual distortions that alter 360° signals going through the different processing elements of the visual communication pipeline. While their impact on viewers’ visual perception and the immersive experience at large is still unknown—thus, it is an open research topic—this review serves the purpose of proposing a taxonomy of the visual distortions that can be encountered in 360° signals. Their underlying causes in the end-to-end 360° content distribution pipeline are identified. This taxonomy is essential as a basis for comparing different processing techniques, such as visual enhancement, encoding, and streaming strategies, and allowing the effective design of new algorithms and applications. It is also a useful resource for the design of psycho-visual studies aiming to characterize human perception of 360° content in interactive and immersive applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An Authoring Model for Interactive 360 videos.\n \n \n \n\n\n \n Mendes, P.; Guedes, Á.; Moraes, D.; De Albuquerque Azevedo, R. G.; and Colcher, S.\n\n\n \n\n\n\n In Proceedings of ICME 2020 Workshop: Tools for Creating XR Media Experiences, London, UK, 03 2020. \n Accepted for publication\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2020_07_mendes,\nauthor={Mendes, Paulo and Guedes, Álan and Moraes, Daniel and De Albuquerque\nAzevedo, Roberto Gerson and Colcher, Sérgio},\ntitle={An Authoring Model for Interactive 360 videos},\ndate={2020-03-28},\nyear={2020},\nmonth={03},\nversion=1,\nlangid={english},\nlangidopts={variant=american},\neprinttype={arxiv},\neprint={},\nabstract={The recent availability of consumer-level head-mounted displays and\nomnidirectional cameras has been driving an explosion of 360 video content.\nTransforming the original recorded 360 video content in meaningful\ninteractive applications that support viewers in tasks such as learning,\nentertainment, and training tasks, however, is not trivial and requires new\ntools. Based on real-world scenarios for 360 interactive videos, in this\npaper, we gather a set of requirements and propose an authoring model to\nsupport authors in the process of reasoning and creating 360-degree\ninteractive video presentations. As a case study, we implement different\ninteractive 360 applications showing the expressiveness and completeness of\nthe model in the scope of the target scenarios.},\nbooktitle={Proceedings of ICME 2020 Workshop: Tools for Creating XR Media\nExperiences},\naddress={London, UK},\nnote={Accepted for publication},\n}\n\n
\n
\n\n\n
\n The recent availability of consumer-level head-mounted displays and omnidirectional cameras has been driving an explosion of 360 video content. Transforming the original recorded 360 video content in meaningful interactive applications that support viewers in tasks such as learning, entertainment, and training tasks, however, is not trivial and requires new tools. Based on real-world scenarios for 360 interactive videos, in this paper, we gather a set of requirements and propose an authoring model to support authors in the process of reasoning and creating 360-degree interactive video presentations. As a case study, we implement different interactive 360 applications showing the expressiveness and completeness of the model in the scope of the target scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Viewport-Driven Multi-Metric Fusion Approach for 360-Degree Video Quality Assessment.\n \n \n \n\n\n \n de Albuquerque Azevedo, R. G.; Birkbeck, N.; Janatra, I.; Adsumilli, B.; and Frossard, P.\n\n\n \n\n\n\n In 2020 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6, July 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 14 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{2020_07_azevedo,\nauthor={de Albuquerque Azevedo, Roberto Gerson and Birkbeck, Neil and\nJanatra, Ivan and Adsumilli, Balu and Frossard, Pascal},\nbooktitle={2020 IEEE International Conference on Multimedia and Expo (ICME)},\ntitle={A Viewport-Driven Multi-Metric Fusion Approach for 360-Degree Video\nQuality Assessment},\nyear={2020},\nvolume={},\nnumber={},\npages={1-6},\nabstract={We propose a new viewport-based multi-metric fusion (MMF) approach\nfor visual quality assessment of 360-degree (omnidirectional) videos. Our\nmethod is based on computing multiple spatio-temporal objective quality\nmetrics (features) on viewports extracted from 360-degree videos, and\nlearning a model that combines these features into a metric, which closely\nmatches subjective quality scores. The main motivations for the proposed\nmethod are that: 1) quality metrics computed on viewports better captures the\nuser experience than metrics computed on the projection domain; 2) no\nindividual objective image quality metric always performs best for all types\nof visual distortions, while a learned combination of them is able to adapt\nto different conditions and produce better results overall. Experimental\nresults, based on the largest available 360-degree videos quality dataset,\ndemonstrate that the proposed metric outperforms state-of-the-art 360-degree\nand 2D video quality metrics.},\nkeywords={Measurement;Quality assessment;Visualization;Video\nrecording;Feature extraction;Two dimensional displays;Distortion;visual\nquality assessment;omnidirectional video;360-degree video;multi-metric\nfusion},\ndoi={10.1109/ICME46284.2020.9102936},\nissn={1945-788X},\nmonth={July},\n}\n\n
\n
\n\n\n
\n We propose a new viewport-based multi-metric fusion (MMF) approach for visual quality assessment of 360-degree (omnidirectional) videos. Our method is based on computing multiple spatio-temporal objective quality metrics (features) on viewports extracted from 360-degree videos, and learning a model that combines these features into a metric, which closely matches subjective quality scores. The main motivations for the proposed method are that: 1) quality metrics computed on viewports better captures the user experience than metrics computed on the projection domain; 2) no individual objective image quality metric always performs best for all types of visual distortions, while a learned combination of them is able to adapt to different conditions and produce better results overall. Experimental results, based on the largest available 360-degree videos quality dataset, demonstrate that the proposed metric outperforms state-of-the-art 360-degree and 2D video quality metrics.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Subjective and Viewport-based Objective Quality Assessment of 360-degree Videos.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.; Birkbeck, N.; Janatra, I.; Adsumilli, B.; and Frossard, P.\n\n\n \n\n\n\n In Proceedings of Image Quality and System Performance XVII, Electronic Imaging 2020, pages 6, Burlingame, California, USA, January 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SubjectivePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 30 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2020_01_azevedo,\ntitle={Subjective and Viewport-based Objective Quality Assessment of\n360-degree Videos},\nauthor={Azevedo, Roberto Gerson de Albuquerque and Birkbeck, Neil and\nJanatra, Ivan and Adsumilli, Balu and Frossard, Pascal},\npages={6},\nyear={2020},\nmonth={January},\nabstract={Visual distortions in processed 360-degree visual content and\nconsumed through head-mounted displays (HMDs) are perceived very differently\nwhen compared to traditional 2D content. To better understand how\ncompression-related artifacts affect the overall perceived quality of\n360-degree videos, this paper presents a subjective quality assessment study\nand analyzes the performance of objective metrics to correlate with the\ngathered subjective scores. In contrast to previous related work, the\nproposed study focuses on the equiangular cubemap projection and includes\nspecific visual distortions (blur, blockiness, H.264 compression, and cubemap\nseams) on both monoscopic and stereoscopic sequences. The objective metrics\nperformance analysis is based on metrics computed in both the projection\ndomain and the viewports, which is closer to what the user sees. The results\nshow that overall objective metrics computed on viewports are more correlated\nwith the subjective scores in our dataset than the same metrics computed in\nthe projection domain. Moreover, the proposed dataset and objective metrics\nanalysis serve as a benchmark for the development of new perception-optimized\nquality as- sessment algorithms for 360-degree videos, which is still a\nlargely open research problem.},\nurl={http://infoscience.epfl.ch/record/271755},\nbooktitle={Proceedings of Image Quality and System Performance XVII,\nElectronic Imaging 2020},\naddress={Burlingame, California, USA},\n}\n\n%%% 2019 %%%\n
\n
\n\n\n
\n Visual distortions in processed 360-degree visual content and consumed through head-mounted displays (HMDs) are perceived very differently when compared to traditional 2D content. To better understand how compression-related artifacts affect the overall perceived quality of 360-degree videos, this paper presents a subjective quality assessment study and analyzes the performance of objective metrics to correlate with the gathered subjective scores. In contrast to previous related work, the proposed study focuses on the equiangular cubemap projection and includes specific visual distortions (blur, blockiness, H.264 compression, and cubemap seams) on both monoscopic and stereoscopic sequences. The objective metrics performance analysis is based on metrics computed in both the projection domain and the viewports, which is closer to what the user sees. The results show that overall objective metrics computed on viewports are more correlated with the subjective scores in our dataset than the same metrics computed in the projection domain. Moreover, the proposed dataset and objective metrics analysis serve as a benchmark for the development of new perception-optimized quality as- sessment algorithms for 360-degree videos, which is still a largely open research problem.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Bridging the Gap between Semantics and Multimedia Processing.\n \n \n \n\n\n \n Moreno, M. F.; Lima, G.; Santos, R.; Azevedo, R.; and Endler, M.\n\n\n \n\n\n\n In 2019 IEEE International Symposium on Multimedia (ISM), pages 315-3153, 2019. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019_12_moreno,\nauthor={M. F. {Moreno} and G. {Lima} and R. {Santos} and R. {Azevedo} and M.\n{Endler}},\nbooktitle={2019 IEEE International Symposium on Multimedia (ISM)},\ntitle={Bridging the Gap between Semantics and Multimedia Processing},\nyear={2019},\nvolume={},\nnumber={},\npages={315-3153},\nabstract={In this paper, we give an overview of the semantic gap problem in\nmultimedia and discuss how machine learning and symbolic AI can be combined\nto narrow this gap. We describe the gap in terms of a classical architecture\nfor multimedia processing and discuss a structured approach to bridge it.\nThis approach combines machine learning (for mapping signals to objects) and\nsymbolic AI (for linking objects to meanings). Our main goal is to raise\nawareness and discuss the challenges involved in this structured approach to\nmultimedia understanding, especially in the view of the latest developments\nin machine learning and symbolic AI.},\n}\n\n
\n
\n\n\n
\n In this paper, we give an overview of the semantic gap problem in multimedia and discuss how machine learning and symbolic AI can be combined to narrow this gap. We describe the gap in terms of a classical architecture for multimedia processing and discuss a structured approach to bridge it. This approach combines machine learning (for mapping signals to objects) and symbolic AI (for linking objects to meanings). Our main goal is to raise awareness and discuss the challenges involved in this structured approach to multimedia understanding, especially in the view of the latest developments in machine learning and symbolic AI.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improving the Authoring of Web-based Interactive E-books with FableJS.\n \n \n \n \n\n\n \n Silva, A.; de Souza, W.; Moraes, D.; Azevedo, R.; and Neto, C. S.\n\n\n \n\n\n\n In Anais da VII Escola Regional de Computação do Ceará, Maranhão e Piauí, pages 182–189, Porto Alegre, RS, Brasil, 2019. SBC\n \n\n\n\n
\n\n\n\n \n \n \"ImprovingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 23 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019_11_silva,\nauthor={Alfredo Silva and Welton de Souza and Daniel Moraes and Roberto\nAzevedo and Carlos Soares Neto},\ntitle={Improving the Authoring of Web-based Interactive E-books with\nFableJS},\nbooktitle={Anais da VII Escola Regional de Computação do Ceará, Maranhão\ne Piauí},\nlocation={São Luís},\nyear={2019},\nkeywords={},\nissn={0000-0000},\npages={182--189},\npublisher={SBC},\naddress={Porto Alegre, RS, Brasil},\nurl={https://sol.sbc.org.br/index.php/ercemapi/article/view/8861},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Improving the reuse support in learning objects authoring tools: a case study with Cacuriá.\n \n \n \n\n\n \n Lima, T.; Azevedo, R. G. d. A.; and Soares Neto, C. d. S.\n\n\n \n\n\n\n In WebMedia 2019, Rio de Janeiro-RJ, oct 2019. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{2019_10_lima,\nauthor={Lima, Thacyla and Azevedo, Roberto Gerson de Albuquerque and Soares\nNeto, Carlos de Salles},\ntitle="Improving the reuse support in learning objects authoring tools: a\ncase study with Cacuriá",\nbooktitle="WebMedia 2019",\naddress="Rio de Janeiro-RJ",\ndays="29-1",\nmonth="oct",\nyear="2019",\nabstract="Reuse as a way of improving educational resources and facilitating\nand accelerating the authoring process has been an important theme since the\nearliest research efforts on Learning Objects (LOs). This work presents the\nevolution of the Cacuriá authoring tool based on a previous work on the\nidentification of reuse requirements and it analyzes the impact of the\nimplementation of these good practices through a study with the target tool\naudience. Cacuriá enables the creation of interactive educational multimedia\ncontent for the Web and digital TV. The new version of the tool focuses\nmainly on the inclusion of three new features that favor reuse: Layouts,\nWidgets, and Application Templates. Through these new resources, the author\ncan build his OA reusing resources created by other authors and has the\npossibility to reuse good practices, for example, through previously\nvalidated pedagogical practices. The added functionalities were evaluated in\nan experiment with 11 teachers/tutors. The results of this qualitative sample\nindicate that the participants of the experiment believe that the new\nfunctionalities improved the creation of reusable OAs both in the creative\naspect and in the reduction of the total development time.",\nkeywords="Authoring and annotation of content; Document Engineering and Web\nEngineering; Interfaces, new interactions, usability, accessibility and user\nexperience; Testing and validation of systems and applications",\n}\n\n
\n
\n\n\n
\n Reuse as a way of improving educational resources and facilitating and accelerating the authoring process has been an important theme since the earliest research efforts on Learning Objects (LOs). This work presents the evolution of the Cacuriá authoring tool based on a previous work on the identification of reuse requirements and it analyzes the impact of the implementation of these good practices through a study with the target tool audience. Cacuriá enables the creation of interactive educational multimedia content for the Web and digital TV. The new version of the tool focuses mainly on the inclusion of three new features that favor reuse: Layouts, Widgets, and Application Templates. Through these new resources, the author can build his OA reusing resources created by other authors and has the possibility to reuse good practices, for example, through previously validated pedagogical practices. The added functionalities were evaluated in an experiment with 11 teachers/tutors. The results of this qualitative sample indicate that the participants of the experiment believe that the new functionalities improved the creation of reusable OAs both in the creative aspect and in the reduction of the total development time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Subjective Evaluation of 360-degree Sensory Experiences.\n \n \n \n\n\n \n Guedes, Á. L. V.; Azevedo, R. G. d. A.; Frossard, P.; Barbosa, S. D.; and Colcher, S.\n\n\n \n\n\n\n In IEEE 21st International Workshop on Multimedia Signal Processing, of MMSP'19, pages 6, Kuala Lumpur, Malaysia, 9 2019. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019_09b_guedes,\ntitle={Subjective Evaluation of 360-degree Sensory Experiences},\nauthor={Guedes, Álan Lívio Vasconcelos and Azevedo, Roberto Gerson de\nAlbuquerque and Frossard, Pascal and Barbosa, Simone D.J. and Colcher,\nSérgio},\nyear={2019},\nmonth={9},\npages={6},\nbooktitle={IEEE 21st International Workshop on Multimedia Signal Processing},\nseries={MMSP'19},\naddress={Kuala Lumpur, Malaysia},\nabstract={Traditionally, most multimedia content has been developed to\nstimulate two of the human senses, i.e., sight and hearing. Due to recent\ntechnological advancements, however, innovative services have been developed\nthat provide more realistic, immersive, and engaging experiences to the\naudience. Omnidirectional (i.e., 360-degree) video, for instance, is becoming\nincreasingly popular. It allows the viewer to navigate the full 360-degree\nview of a scene from a specific point. In particular, when consumed through\nhead-mounted displays, 360-degree videos provide increased immersion and\nsense of presence. The use of multi-sensory effects ---e.g., wind, vibration,\nand scent--- has also been explored by recent work, which allows an improved\nexperience by stimulating other users' senses through sensory effects that go\nbeyond the audiovisual content. Understanding how these additional\nmulti-sensory effects affect the users' perceived quality of experience~(QoE)\nin 360-degree, however, is still an open research problem at large. As a step\nto better understand the QoE of immersive sensory experiences, this paper\npresents a test-bed and discusses a user-focused study on a scenario in which\nthe user is immersed in the 360-degree video content and is stimulated\nthrough additional sensory effects. Quantitative results indicated that the\nsensorial effects can considerably increase the sense of presence of\n360-degree videos. Qualitative results provided us with a better view of the\nlimitations of current technologies and interesting insights such as the\nusers' sense of surprise.},\n}\n\n
\n
\n\n\n
\n Traditionally, most multimedia content has been developed to stimulate two of the human senses, i.e., sight and hearing. Due to recent technological advancements, however, innovative services have been developed that provide more realistic, immersive, and engaging experiences to the audience. Omnidirectional (i.e., 360-degree) video, for instance, is becoming increasingly popular. It allows the viewer to navigate the full 360-degree view of a scene from a specific point. In particular, when consumed through head-mounted displays, 360-degree videos provide increased immersion and sense of presence. The use of multi-sensory effects —e.g., wind, vibration, and scent— has also been explored by recent work, which allows an improved experience by stimulating other users' senses through sensory effects that go beyond the audiovisual content. Understanding how these additional multi-sensory effects affect the users' perceived quality of experience~(QoE) in 360-degree, however, is still an open research problem at large. As a step to better understand the QoE of immersive sensory experiences, this paper presents a test-bed and discusses a user-focused study on a scenario in which the user is immersed in the 360-degree video content and is stimulated through additional sensory effects. Quantitative results indicated that the sensorial effects can considerably increase the sense of presence of 360-degree videos. Qualitative results provided us with a better view of the limitations of current technologies and interesting insights such as the users' sense of surprise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Modeling Multimodal-Multiuser Interactions in Declarative Multimedia Languages.\n \n \n \n\n\n \n Guedes, Á. L. V.; Azevedo, R. G. d. A.; Colcher, S.; and Barbosa, S. D.\n\n\n \n\n\n\n In Proceedings of the 19th ACM Symposium on Document Engineering, of DocEng'19, pages 9, Berlin, Germany, 9 2019. \n Accepted for publication\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019_09_guedes,\ntitle={Modeling Multimodal-Multiuser Interactions in Declarative Multimedia\nLanguages},\nauthor={Guedes, Álan Lívio Vasconcelos and Azevedo, Roberto Gerson de\nAlbuquerque and Colcher, Sérgio and Barbosa, Simone D.J.},\nyear={2019},\nmonth={9},\npages={9},\nnote={Accepted for publication},\nbooktitle={Proceedings of the 19th ACM Symposium on Document Engineering},\nseries={DocEng'19},\naddress={Berlin, Germany},\nabstract={Recent advances in hardware and software technologies have given\nrise to a new class of human-computer interfaces that both explores multiple\nmodalities and allows for multiple collaborating users. When compared to the\ndevelopment of traditional single-user WIMP~(windows, icons, menus,\npointer)-based applications, however, applications supporting the seamless\nintegration of multimodal-multiuser interactions bring new specification and\nruntime requirements. In this paper, with the aim of assisting the\nspecification of multimedia applications that integrate multimodal-multiuser\ninteractions, we: (1)~propose the MMAM~(Multimodal-Multiuser Authoring\nModel); (2)~present three different instantiations of it (in NCL, HTML, and a\nblock-based syntax); and (3)~evaluate the proposed model through a user\nstudy. MMAM enables programmers to design and ponder different solutions for\napplications with multimodal-multiuser requirements. Its instantiations allow\nintegrating the solution in real execution environments, such as the Web and\nDigital TV, and served as proofs of concept about the feasibility of our\nmodel and enabled us to perform the user study. The user study focused on\ncapturing evidence of both the user understanding and the user acceptance of\nthe proposed model. We asked developers to perform tasks using MMAM and then\nanswer a TAM~(Technology Acceptance Model)-based questionnaire focused on\nboth the model and its instantiations. As results, the study indicates that\nthe participants easily understood the model~(most of them performed the\nrequired tasks with minor or no errors) and found it both useful and easy to\nuse. 94.47\\% of the participants gave positive answers to the block-based\nrepresentation TAM questions, whereas 75.17\\% of them gave positive answers\nto the instantiations-related questions.},\n}\n\n
\n
\n\n\n
\n Recent advances in hardware and software technologies have given rise to a new class of human-computer interfaces that both explores multiple modalities and allows for multiple collaborating users. When compared to the development of traditional single-user WIMP~(windows, icons, menus, pointer)-based applications, however, applications supporting the seamless integration of multimodal-multiuser interactions bring new specification and runtime requirements. In this paper, with the aim of assisting the specification of multimedia applications that integrate multimodal-multiuser interactions, we: (1)~propose the MMAM~(Multimodal-Multiuser Authoring Model); (2)~present three different instantiations of it (in NCL, HTML, and a block-based syntax); and (3)~evaluate the proposed model through a user study. MMAM enables programmers to design and ponder different solutions for applications with multimodal-multiuser requirements. Its instantiations allow integrating the solution in real execution environments, such as the Web and Digital TV, and served as proofs of concept about the feasibility of our model and enabled us to perform the user study. The user study focused on capturing evidence of both the user understanding and the user acceptance of the proposed model. We asked developers to perform tasks using MMAM and then answer a TAM~(Technology Acceptance Model)-based questionnaire focused on both the model and its instantiations. As results, the study indicates that the participants easily understood the model~(most of them performed the required tasks with minor or no errors) and found it both useful and easy to use. 94.47% of the participants gave positive answers to the block-based representation TAM questions, whereas 75.17% of them gave positive answers to the instantiations-related questions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Graph-based detection of seams in 360-degree images.\n \n \n \n\n\n \n de Simone*, F.; Azevedo*, R. G. d. A.; Sohyeong, K.; and Frossard, P.\n\n\n \n\n\n\n In 2019 IEEE International Conference on Image Processing (ICIP), of ICIP'19, pages 3776-3780, Taipei, Taiwan, Sept 2019. \n *Equal contributions.\n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{2019_09_desimone,\ntitle={Graph-based detection of seams in 360-degree images},\nauthor={de Simone*, Francesca and Azevedo*, Roberto Gerson de Albuquerque and\nSohyeong, Kim and Frossard, Pascal},\nmonth={Sept},\nyear={2019},\ndoi={10.1109/ICIP.2019.8803578},\nissn={2381-8549},\npages={3776-3780},\nnote={*Equal contributions.},\nbooktitle={2019 IEEE International Conference on Image Processing (ICIP)},\nseries={ICIP'19},\naddress={Taipei, Taiwan},\nkeywords={Omnidirectional image; cube map projection; compression; visual\ndistortion; quality metric},\nabstract={In this paper, we propose an algorithm to detect a specific kind of\ndistortions, referred to as seams, which commonly occur when a 360-degree\nimage is represented in planar domain, e.g, via the Cube Map (CM) projection,\nand undergoes lossy compression. The proposed algorithm exploits a\ngraph-based representation to account for the actual sampling density of the\n360-degree signal in the native spherical domain. The CM image is considered\nas a signal lying on a graph defined on the spherical surface. The spectra of\nthe processed and the original signals, computed by applying the Graph\nFourier Transform, are compared to detect the seams. To test our method a\ndataset of compressed CM 360-degree images, annotated by experts, has been\ncreated. The performance of the proposed algorithm is compared to those\nachieved by baseline metrics, as well as to the same approach based on\nspectral comparison but ignoring the spherical nature of the signal. The\nexperimental results show that the proposed method has the best performance\nand can successfully detect up to approximately 90\\% of visible seams on our\ndataset.},\n}\n\n
\n
\n\n\n
\n In this paper, we propose an algorithm to detect a specific kind of distortions, referred to as seams, which commonly occur when a 360-degree image is represented in planar domain, e.g, via the Cube Map (CM) projection, and undergoes lossy compression. The proposed algorithm exploits a graph-based representation to account for the actual sampling density of the 360-degree signal in the native spherical domain. The CM image is considered as a signal lying on a graph defined on the spherical surface. The spectra of the processed and the original signals, computed by applying the Graph Fourier Transform, are compared to detect the seams. To test our method a dataset of compressed CM 360-degree images, annotated by experts, has been created. The performance of the proposed algorithm is compared to those achieved by baseline metrics, as well as to the same approach based on spectral comparison but ignoring the spherical nature of the signal. The experimental results show that the proposed method has the best performance and can successfully detect up to approximately 90% of visible seams on our dataset.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the First JND and Break in Presence of 360-degree Content: An Exploratory Study.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.; Birkbeck, N.; Janatra, I.; Adsumilli, B.; and Frossard, P.\n\n\n \n\n\n\n In Proceedings of the 11th ACM Workshop on Immersive Mixed and Virtual Environment Systems, of MMVE '19, pages 1–3, New York, NY, USA, 2019. ACM\n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 45 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{2019_06_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Birkbeck, Neil and\nJanatra, Ivan and Adsumilli, Balu and Frossard, Pascal},\ntitle={On the First JND and Break in Presence of 360-degree Content: An\nExploratory Study},\nbooktitle={Proceedings of the 11th ACM Workshop on Immersive Mixed and\nVirtual Environment Systems},\nseries={MMVE '19},\nyear={2019},\nisbn={978-1-4503-6299-3},\nlocation={Amherst, Massachusetts},\npages={1--3},\nnumpages={3},\nurl={http://doi.acm.org/10.1145/3304113.3326115},\ndoi={10.1145/3304113.3326115},\nacmid={3326115},\npublisher={ACM},\naddress={New York, NY, USA},\nkeywords={360-degree video, JND, presence, visual distortions, visual\nquality},\nabstract={Unlike traditional planar 2D visual content, immersive 360-degree\nimages and videos undergo particular processing steps and are intended to be\nconsumed via head-mounted displays (HMDs). To get a deeper understanding on\nthe perception of 360-degree visual distortions when consumed through HMDs,\nwe perform an exploratory task-based subjective study in which we have asked\nsubjects to define the first noticeable difference and break-in-presence\npoints when incrementally adding specific compression artifacts. The results\nof our study: give insights on the range of allowed visual distortions for\n360-degree content; show that the added visual distortions are more tolerable\nin mono than in stereoscopic 3D; and identify issues with current 360-degree\nobjective quality metrics.},\n}\n\n%%% 2018 %%%\n%\n
\n
\n\n\n
\n Unlike traditional planar 2D visual content, immersive 360-degree images and videos undergo particular processing steps and are intended to be consumed via head-mounted displays (HMDs). To get a deeper understanding on the perception of 360-degree visual distortions when consumed through HMDs, we perform an exploratory task-based subjective study in which we have asked subjects to define the first noticeable difference and break-in-presence points when incrementally adding specific compression artifacts. The results of our study: give insights on the range of allowed visual distortions for 360-degree content; show that the added visual distortions are more tolerable in mono than in stereoscopic 3D; and identify issues with current 360-degree objective quality metrics.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Exploring an AR-based User Interface for Authoring Multimedia Presentations.\n \n \n \n\n\n \n Mendes, P. R. C.; Azevedo, R. G. d. A.; de Oliveira, R. G. S.; and Soares Neto, C. d. S.\n\n\n \n\n\n\n In DocEng 2018, Halifax, CA, aug 2018. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018_08_mendes,\nauthor={Mendes, Paulo Renato Conceição and Azevedo, Roberto Gerson de\nAlbuquerque and de Oliveira, Ruy Guilherme Silva and Soares Neto, Carlos de\nSalles},\ntitle={Exploring an AR-based User Interface for Authoring Multimedia\nPresentations},\nbooktitle={DocEng 2018},\naddress={Halifax, CA},\nmonth={aug},\nyear={2018},\nabstract={This paper describes the BumbAR approach for composing multimedia\npresentations and evaluates it through a qualitative study based on the\nTechnology Acceptance Model (TAM). The BumbAR proposal is based on the\nevent-condition-action model of Nested Context Model (NCM) and explores the\nuse of augmented reality and real-world objects (markers) as an innovative\nuser interface to specify the behavior and relationships between the media\nobjects in a presentation. The qualitative study aimed at measuring the\nusers' attitude towards using BumbAR and an augmented reality environment for\nauthoring multimedia presentations. The results show that the participants\nfound the BumbAR approach both useful and easy-to-use, while most of them\n(66,67\\%) found the system more convenient than traditional desktop-based\nauthoring tools.},\n}\n\n%\n%\n%%% 2017 %%%\n
\n
\n\n\n
\n This paper describes the BumbAR approach for composing multimedia presentations and evaluates it through a qualitative study based on the Technology Acceptance Model (TAM). The BumbAR proposal is based on the event-condition-action model of Nested Context Model (NCM) and explores the use of augmented reality and real-world objects (markers) as an innovative user interface to specify the behavior and relationships between the media objects in a presentation. The qualitative study aimed at measuring the users' attitude towards using BumbAR and an augmented reality environment for authoring multimedia presentations. The results show that the participants found the BumbAR approach both useful and easy-to-use, while most of them (66,67%) found the system more convenient than traditional desktop-based authoring tools.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Projetos Atuais e Visão de Futuro do Laboratório TeleMídia/PUC-Rio em Videocolaboração.\n \n \n \n \n\n\n \n Colcher, S.; Guedes, Á. L. V.; Azevedo, R. G. d. A.; Lima, G. F.; Santos, R. C. M.; and Busson, A. J. G.\n\n\n \n\n\n\n In Ciuffo, L.; and Roesler, V., editor(s), O Futuro da Videocolaboração: perspectivas, of Anais do IV CT-Vídeo. WebMedia '17, pages 207–232. Sociedade Brasileira de Computação, New York, NY, USA, 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Projetos paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 30 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{2017_10_colcher,\nauthor={Colcher, Sérgio and Guedes, Álan Lívio Vasconcelos and Azevedo,\nRoberto Gerson de Albuquerque and Lima, Guilherme F. and Santos, Rodrigo\nCosta Mesquita and Busson, Antônio José Grandson},\ntitle={Projetos Atuais e Visão de Futuro do Laboratório TeleMídia/PUC-Rio\nem Videocolaboração},\neditor={Ciuffo, Leandro and Roesler, Valter},\nbooktitle={O Futuro da Videocolaboração: perspectivas},\nseries={Anais do IV CT-Vídeo. WebMedia '17},\nyear={2017},\nisbn={978-85-7669-381-9},\nlocation={Gramado, RS, Brazil},\npages={207--232},\npublisher={Sociedade Brasileira de Computação},\naddress={New York, NY, USA},\nlanguage={Portuguese},\nabstract={Since the early 1990's, the TeleMídia laboratory of the Department\nof Informatics of PUC-Rio researches and trains people in the areas of\nmultimedia and hypermedia, which include interactive videos and video\ncollaboration as important subareas. This chapter presents the main research\nlines and projects of the laboratory and discusses the future vision of the\nauthors about interactive videos and video collaboration.},\nurl_paper={http://www.telemidia.puc-rio.br/files/biblio/2017_10_colcher.pdf},\n}\n\n
\n
\n\n\n
\n Since the early 1990's, the TeleMídia laboratory of the Department of Informatics of PUC-Rio researches and trains people in the areas of multimedia and hypermedia, which include interactive videos and video collaboration as important subareas. This chapter presents the main research lines and projects of the laboratory and discusses the future vision of the authors about interactive videos and video collaboration.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Converting NCL documents to Smix and fixing their semantics and interpretation in the process.\n \n \n \n \n\n\n \n Lima, G.; Azevedo, R. G. d. A.; Colcher, S.; and Haeusler, E. H.\n\n\n \n\n\n\n In WebMedia 2017, Gramado, RS, oct 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Converting paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 30 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{lima_2017,\nauthor="Lima, Guilherme and Azevedo, Roberto Gerson de Albuquerque and\nColcher, Sérgio and Haeusler, Edward Hermann",\ntitle="Converting NCL documents to Smix and fixing their semantics and\ninterpretation in the process",\nbooktitle="WebMedia 2017",\naddress="Gramado, RS",\ndays="17-20",\nmonth="oct",\nyear="2017",\nabstract="In this paper, we describe the conversion of NCL to Smix and\ndiscuss its main implications. NCL is a declarative language for the\nspecification of interactive multimedia presentations which was adopted by\nthe ITU-T H.761 recommendation for interoperable IPTV services. Smix is an\nNCL-like, domain-specific language with a similar purpose, but with a much\nsimpler and more precise semantics. By implementing NCL over Smix, we bring\nto the former the notions of reaction and execution instants, and with them\nsome benefits. From a practical perspective, we fix the semantics of the\nconverted documents, get a leaner NCL player (the Smix interpreter), and\nsimplify further conversions and interpretations. From a systems-design\nperspective, the structured conversion of NCL to Smix helps us tame the\ncomplexity of mapping the user-oriented constructs of NCL into the\nmachine-oriented primitives and operations that realize them as a multimedia\napplication. In the paper, we present both NCL and Smix, discuss related work\non document conversion, and detail the proposed conversion process together\nwith its prototype implementation.",\nkeywords="Architectures, processes and methodologies; Content analysis,\ncoding and processing; Content synchronization and presentation",\nurl_paper={http://www.telemidia.puc-rio.br/files/biblio/2017_10_lima.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we describe the conversion of NCL to Smix and discuss its main implications. NCL is a declarative language for the specification of interactive multimedia presentations which was adopted by the ITU-T H.761 recommendation for interoperable IPTV services. Smix is an NCL-like, domain-specific language with a similar purpose, but with a much simpler and more precise semantics. By implementing NCL over Smix, we bring to the former the notions of reaction and execution instants, and with them some benefits. From a practical perspective, we fix the semantics of the converted documents, get a leaner NCL player (the Smix interpreter), and simplify further conversions and interpretations. From a systems-design perspective, the structured conversion of NCL to Smix helps us tame the complexity of mapping the user-oriented constructs of NCL into the machine-oriented primitives and operations that realize them as a multimedia application. In the paper, we present both NCL and Smix, discuss related work on document conversion, and detail the proposed conversion process together with its prototype implementation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Fábulas Model for Authoring Web-based Children's eBooks.\n \n \n \n \n\n\n \n Pinto, H. F.; Soares Neto, C. D. S.; Colcher, S.; and Azevedo, R. G. d. A.\n\n\n \n\n\n\n In Proceedings of the 2017 ACM Symposium on Document Engineering, of DocEng '17, pages 19–28, New York, NY, USA, 2017. ACM\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 32 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{pinto_2017,\nauthor={Pinto, Hedvan Fernandes and Soares Neto, Carlos De Salles and\nColcher, S{\\'e}rgio and Azevedo, Roberto Gerson de Albuquerque},\ntitle={The F\\'{a}bulas Model for Authoring Web-based Children's eBooks},\nbooktitle={Proceedings of the 2017 ACM Symposium on Document Engineering},\nseries={DocEng '17},\nyear={2017},\nisbn={978-1-4503-4689-4},\nlocation={Valletta, Malta},\npages={19--28},\nnumpages={10},\nurl={http://doi.acm.org/10.1145/3103010.3103016},\ndoi={10.1145/3103010.3103016},\nacmid={3103016},\npublisher={ACM},\naddress={New York, NY, USA},\nkeywords={conceptual model, interactive ebook, multimedia authoring},\nabstract={Nowadays, tablets and smartphones are commonly used by children for\nboth entertainment and education purposes. In special, interactive multimedia\neBooks running on those devices allow a richer experience when compared to\ntraditional text-only books, being potentially more engaging and entertaining\nto readers. However, to explore the most exciting features in these\nenvironments, authors are currently left alone in the sense that there is no\nhigh level (less technical) support, and these features are usually\naccessible only through programming or some other technical skill. In this\nwork, we aim at extracting the main features on enhanced children's eBooks\nand propose a model, named Fábulas - the Portuguese word for fables -that\nallows authors to create interactive multimedia children's eBooks\ndeclaratively. The model was conceived by taking, as a starting point, a\nsystematic analysis of the common concepts, with the focus on identifying and\ncategorizing recurring characteristics and pointing out functional and\nnon-functional requirements that establish a strong orientation towards the\nset of desirable abstractions of an underlying model. Moreover, the paper\npresents a case study for the implementation of Fábulas on the Web, and\ndiscusses the authoring of a complete interactive story over it.},\n}\n\n
\n
\n\n\n
\n Nowadays, tablets and smartphones are commonly used by children for both entertainment and education purposes. In special, interactive multimedia eBooks running on those devices allow a richer experience when compared to traditional text-only books, being potentially more engaging and entertaining to readers. However, to explore the most exciting features in these environments, authors are currently left alone in the sense that there is no high level (less technical) support, and these features are usually accessible only through programming or some other technical skill. In this work, we aim at extracting the main features on enhanced children's eBooks and propose a model, named Fábulas - the Portuguese word for fables -that allows authors to create interactive multimedia children's eBooks declaratively. The model was conceived by taking, as a starting point, a systematic analysis of the common concepts, with the focus on identifying and categorizing recurring characteristics and pointing out functional and non-functional requirements that establish a strong orientation towards the set of desirable abstractions of an underlying model. Moreover, the paper presents a case study for the implementation of Fábulas on the Web, and discusses the authoring of a complete interactive story over it.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Hypervideo Model for Learning Objects.\n \n \n \n \n\n\n \n Busson, A. J. G.; Damasceno, A. L. d. B.; Azevedo, R. G. d. A.; Neto, C. d. S. S.; Lima, T. d. S.; and Colcher, S.\n\n\n \n\n\n\n In Proceedings of the 28th ACM Conference on Hypertext and Social Media, of HT '17, pages 245–253, New York, NY, USA, 2017. ACM\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 41 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{2017_07_busson,\nauthor={Busson, Antonio Jos{\\'e} G. and Damasceno, Andr{\\'e} Luiz de B. and\nAzevedo, Roberto Gerson de Albuquerque and Neto, Carlos de Salles Soares and\nLima, Thacyla de Sousa and Colcher, S{\\'e}rgio},\ntitle={A Hypervideo Model for Learning Objects},\nbooktitle={Proceedings of the 28th ACM Conference on Hypertext and Social\nMedia},\nseries={HT '17},\nyear={2017},\nisbn={978-1-4503-4708-2},\nlocation={Prague, Czech Republic},\npages={245--253},\nnumpages={9},\nurl={http://doi.acm.org/10.1145/3078714.3078739},\ndoi={10.1145/3078714.3078739},\nacmid={3078739},\npublisher={ACM},\naddress={New York, NY, USA},\nkeywords={hypervideos, learning objects, scenesync},\nabstract={Learning Objects (LOs) are entities that can be used, reused, or\nreferred during the teaching process. They are commonly embedded into\ndocuments that establish spatial and temporal relationships on their\ncontents. Hypervideos LOs allow students to individualize their learning\nexperience with non-linear browsing mechanisms and content adaptation. This\npaper presents a survey of features for a set of documents representing such\nLOs as well as desirable aspects that should be expressed during the\nauthoring phase. Also, this paper presents a conceptual model that fits such\nrequirements. The model is implemented by SceneSync, a domain specific\nlanguage focused on the synchronization and temporal behavior of LOs. As a\nresult of the work, we present a set of LOs specified in SceneSync and a\ndiscussion about the identified features, which confirm the expressiveness\nand applicability of the model.},\n}\n\n
\n
\n\n\n
\n Learning Objects (LOs) are entities that can be used, reused, or referred during the teaching process. They are commonly embedded into documents that establish spatial and temporal relationships on their contents. Hypervideos LOs allow students to individualize their learning experience with non-linear browsing mechanisms and content adaptation. This paper presents a survey of features for a set of documents representing such LOs as well as desirable aspects that should be expressed during the authoring phase. Also, this paper presents a conceptual model that fits such requirements. The model is implemented by SceneSync, a domain specific language focused on the synchronization and temporal behavior of LOs. As a result of the work, we present a set of LOs specified in SceneSync and a discussion about the identified features, which confirm the expressiveness and applicability of the model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Extending multimedia languages to support multimodal user interactions.\n \n \n \n \n\n\n \n Guedes, Á. L.; Azevedo, R. G. d. A.; and Barbosa, S. D. J.\n\n\n \n\n\n\n Multimedia Tools and Applications, 76(4): 5691–5720. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"ExtendingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 41 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2017_02_guedes,\nauthor="Guedes, {\\'A}lan L{\\'i}vio Vasconcelos and Azevedo, Roberto Gerson de\nAlbuquerque and Barbosa, Simone Diniz Junqueira",\ntitle="Extending multimedia languages to support multimodal user\ninteractions",\njournal="Multimedia Tools and Applications",\nyear="2017",\nvolume="76",\nnumber="4",\npages="5691--5720",\nabstract="Historically, the Multimedia community research has focused on\noutput modalities, through studies on timing and multimedia processing. The\nMultimodal Interaction community, on the other hand, has focused on\nuser-generated modalities, through studies on Multimodal User Interfaces\n(MUI). In this paper, aiming to assist the development of multimedia\napplications with MUIs, we propose the integration of concepts from those two\ncommunities in a unique high-level programming framework. The framework\nintegrates user modalities ---both user-generated (e.g., speech, gestures)\nand user-consumed (e.g., audiovisual, haptic)--- in declarative programming\nlanguages for the specification of interactive multimedia applications. To\nillustrate our approach, we instantiate the framework in the NCL (Nested\nContext Language) multimedia language. NCL is the declarative language for\ndeveloping interactive applications for Brazilian Digital TV and an ITU-T\nRecommendation for IPTV services. To help evaluate our approach, we discuss a\nusage scenario and implement it as an NCL application extended with the\nproposed multimodal features. Also, we compare the expressiveness of the\nmultimodal NCL against existing multimedia and multimodal languages, for both\ninput and output modalities.",\nissn="1573-7721",\ndoi="10.1007/s11042-016-3846-8",\nurl="http://dx.doi.org/10.1007/s11042-016-3846-8",\n}\n\n%%% 2016 %%%\n
\n
\n\n\n
\n Historically, the Multimedia community research has focused on output modalities, through studies on timing and multimedia processing. The Multimodal Interaction community, on the other hand, has focused on user-generated modalities, through studies on Multimodal User Interfaces (MUI). In this paper, aiming to assist the development of multimedia applications with MUIs, we propose the integration of concepts from those two communities in a unique high-level programming framework. The framework integrates user modalities —both user-generated (e.g., speech, gestures) and user-consumed (e.g., audiovisual, haptic)— in declarative programming languages for the specification of interactive multimedia applications. To illustrate our approach, we instantiate the framework in the NCL (Nested Context Language) multimedia language. NCL is the declarative language for developing interactive applications for Brazilian Digital TV and an ITU-T Recommendation for IPTV services. To help evaluate our approach, we discuss a usage scenario and implement it as an NCL application extended with the proposed multimodal features. Also, we compare the expressiveness of the multimodal NCL against existing multimedia and multimodal languages, for both input and output modalities.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Programando Aplicacões Multimídia no GStreamer.\n \n \n \n\n\n \n Lima, G. F.; Mesquita, R. C.; and Azevedo, R. G. d. A.\n\n\n \n\n\n\n In , editor(s), Anais do XXII Simpósio Brasileiro de Sistemas Multimídia e Web (Vol. 3): Minicursos. SBC, Teresina, PI, Brazil, November 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{2016_11_lima,\nauthor={Lima, Guilherme Ferreira and Mesquita, Rodrigo Costa and Azevedo,\nRoberto Gerson de Albuquerque},\ntitle={Programando Aplicacões Multim{\\'i}dia no GStreamer},\nbooktitle={Anais do XXII Simp{\\'o}sio Brasileiro de Sistemas Multim{\\'i}dia e\nWeb (Vol. 3): Minicursos},\npages={},\npublisher={SBC},\nyear={2016},\nmonth={November},\neditor={},\naddress={Teresina, PI, Brazil},\nlanguage={Portuguese},\nisbn={978-85-7669-333-8},\nabstract={This short course is an introduction to GStreamer, one of the main\nfree/open-source frameworks for multimedia processing. We start presenting\nGStreamer, its architecture and the dataflow programming model, and then\nadopt a hands-on approach. Starting with an example, a simple video player,\nwe introduce the main concepts of GStreamer’s basic C API and implement\nthem over the initial example incrementally, so that at the end of the course\nwe get a complete video player with support for the usual playback operations\n(start, stop, pause, seek, fast-forward, and rewind). We also discuss sample\nfilters—processing elements that manipulate audio and video samples—and\npresent some of the filters natively available in GStreamer. Moreover, we\nshow how one can extend the framework by creating a plugin with a custom\nfilter that manipulates video samples. The prerequisite for the short course\nis a basic knowledge of the C programming language. At the end of the course,\nwe expect that participants acquire a general view of GStreamer, and be able\nto create simple multimedia applications and explore its more advanced\nfeatures.},\n}\n\n
\n
\n\n\n
\n This short course is an introduction to GStreamer, one of the main free/open-source frameworks for multimedia processing. We start presenting GStreamer, its architecture and the dataflow programming model, and then adopt a hands-on approach. Starting with an example, a simple video player, we introduce the main concepts of GStreamer’s basic C API and implement them over the initial example incrementally, so that at the end of the course we get a complete video player with support for the usual playback operations (start, stop, pause, seek, fast-forward, and rewind). We also discuss sample filters—processing elements that manipulate audio and video samples—and present some of the filters natively available in GStreamer. Moreover, we show how one can extend the framework by creating a plugin with a custom filter that manipulates video samples. The prerequisite for the short course is a basic knowledge of the C programming language. At the end of the course, we expect that participants acquire a general view of GStreamer, and be able to create simple multimedia applications and explore its more advanced features.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n SXMLua: Definindo estilos de documentos XML em Lua.\n \n \n \n\n\n \n Gomes, L.; Mesquita, R. C.; and Azevedo, R. G. d. A.\n\n\n \n\n\n\n In , editor(s), Proceedings of the 13rd Workshop on Undergratuate Work on the Brazilian Symposium on Multimedia and the Web, of WebMedia '16, Teresina, PI, Brazil, November 2016. SBC\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016_11_gomes,\nauthor={Gomes, Lucas and Mesquita, Rodrigo Costa and Azevedo, Roberto Gerson\nde Albuquerque},\ntitle={SXMLua: Definindo estilos de documentos XML em Lua},\nbooktitle={Proceedings of the 13rd Workshop on Undergratuate Work on the\nBrazilian Symposium on Multimedia and the Web},\nseries={WebMedia '16},\npublisher={SBC},\nyear={2016},\nmonth={November},\neditor={},\naddress={Teresina, PI, Brazil},\nlanguage={Portuguese},\nabstract={This paper proposes SXMLua (Styling XML through Lua), a\npre-processor and a library supporting the definition of stylesheets for XML\ndocuments using Lua. SXMLua was inspired by the CSS language, and, similar to\nit, allows the separation between the presentation and the content\nspecification of XML documents. It also provides (through the Lua language)\nfacilities that are not natively supported by CSS, such as variables and\nfunctions. Besides presenting SXMLua, in this paper, we also discuss its use\nfor specifying stylesheets for interactive digital TV applications using NCL\n(Nested Context Language).},\n}\n\n
\n
\n\n\n
\n This paper proposes SXMLua (Styling XML through Lua), a pre-processor and a library supporting the definition of stylesheets for XML documents using Lua. SXMLua was inspired by the CSS language, and, similar to it, allows the separation between the presentation and the content specification of XML documents. It also provides (through the Lua language) facilities that are not natively supported by CSS, such as variables and functions. Besides presenting SXMLua, in this paper, we also discuss its use for specifying stylesheets for interactive digital TV applications using NCL (Nested Context Language).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Extending NCL to Support Multiuser and Multimodal Interactions.\n \n \n \n \n\n\n \n Guedes, Á. L.; Azevedo, R. G. d. A.; Colcher, S.; and Barbosa, S. D.\n\n\n \n\n\n\n In Proceedings of the 22nd Brazilian Symposium on Multimedia and the Web, of Webmedia '16, pages 39–46, New York, NY, USA, 2016. ACM\n Acceptance ratio: 30%\n\n\n\n
\n\n\n\n \n \n \"ExtendingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 35 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{2016_11_guedes,\nauthor={Guedes, \\'{A}lan L.V. and Azevedo, Roberto Gerson de Albuquerque and\nColcher, S{\\'e}rgio and Barbosa, Simone D.J.},\ntitle={Extending NCL to Support Multiuser and Multimodal Interactions},\nbooktitle={Proceedings of the 22nd Brazilian Symposium on Multimedia and the\nWeb},\nseries={Webmedia '16},\nyear={2016},\nisbn={978-1-4503-4512-5},\nlocation={Teresina, Piauí; State, Brazil},\npages={39--46},\nnumpages={8},\nurl={http://doi.acm.org/10.1145/2976796.2976869},\ndoi={10.1145/2976796.2976869},\nacmid={2976869},\npublisher={ACM},\naddress={New York, NY, USA},\nkeywords={NCL, ginga-NCL, multimedia languages, multimodal interactions,\nmultiuser interactions, nested context language},\nabstract={Recent advances in technologies for speech, touch and gesture\nrecognition have given rise to a new class of user interfaces that does not\nonly explore multiple modalities but also allows for multiple interacting\nusers. Even so, current declarative multimedia languages e.g. HTML, SMIL, and\nNCL?support only limited forms of user input (mainly keyboard and mouse) for\na single user. In this paper, we aim at studying how the NCL multimedia\nlanguage could take advantage of those new recognition technologies. To do\nso, we revisit the model behind NCL, named NCM (Nested Context Model), and\nextend it with first-class concepts supporting multiuser and multimodal\nfeatures. To evaluate our approach, we instantiate the proposal and discuss\nsome usage scenarios, developed as NCL applications with our extended\nfeatures.},\nnote={Acceptance ratio: 30\\%},\n}\n\n
\n
\n\n\n
\n Recent advances in technologies for speech, touch and gesture recognition have given rise to a new class of user interfaces that does not only explore multiple modalities but also allows for multiple interacting users. Even so, current declarative multimedia languages e.g. HTML, SMIL, and NCL?support only limited forms of user input (mainly keyboard and mouse) for a single user. In this paper, we aim at studying how the NCL multimedia language could take advantage of those new recognition technologies. To do so, we revisit the model behind NCL, named NCM (Nested Context Model), and extend it with first-class concepts supporting multiuser and multimodal features. To evaluate our approach, we instantiate the proposal and discuss some usage scenarios, developed as NCL applications with our extended features.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A graphics composition architecture for multimedia applications based on layered-depth-image.\n \n \n \n\n\n \n Azevedo, R. G. d. A.; and Lima, G. F.\n\n\n \n\n\n\n In 2016 3DTV-Conference: The True Vision - Capture, Transmission and Display of 3D Video (3DTV-CON), pages 1-4, Hamburg, Germany, July 2016. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{2016_07_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Lima, Guilherme Ferreira},\nbooktitle={2016 3DTV-Conference: The True Vision - Capture, Transmission and\nDisplay of 3D Video (3DTV-CON)},\ntitle={A graphics composition architecture for multimedia applications based\non layered-depth-image},\nyear={2016},\npages={1-4},\nkeywords={Kernel;Media;Multimedia communication;Real-time systems;Rendering\n(computer graphics);Three-dimensional displays;3D Video;Graphics\nComposition;Layered-depth Image;Multimedia Applications},\ndoi={10.1109/3DTV.2016.7548882},\nmonth={July},\naddress={Hamburg, Germany},\nabstract={We present a graphics architecture for the seamless integration of\n3DV objects into ordinary 2D/3D multimedia applications. The architecture is\nbased on layered-depth-image (LDI) and supports the real-time rendering of\nmultiple views for multi-view 3D displays. We also present a prototype\nimplementation of the proposed architecture in OpenCL, and discuss\nexperimental results. The results indicate that the proposed approach leads\nto better results than other state-of-art graphics architecture for\nmultimedia application.},\n}\n\n%%% 2015 %%%\n
\n
\n\n\n
\n We present a graphics architecture for the seamless integration of 3DV objects into ordinary 2D/3D multimedia applications. The architecture is based on layered-depth-image (LDI) and supports the real-time rendering of multiple views for multi-view 3D displays. We also present a prototype implementation of the proposed architecture in OpenCL, and discuss experimental results. The results indicate that the proposed approach leads to better results than other state-of-art graphics architecture for multimedia application.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Supporting multimedia applications in stereoscopic and depth-based 3D video systems.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.\n\n\n \n\n\n\n Ph.D. Thesis, Department of Informatics, PUC-Rio, December 2015.\n \n\n\n\n
\n\n\n\n \n \n \"SupportingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 14 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@PhDThesis{2015_12_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque},\ntitle={Supporting multimedia applications in stereoscopic and depth-based 3D\nvideo systems},\ntype={Ph.D. thesis},\nmonth={December},\nyear={2015},\nlocation={Rio de Janeiro, RJ},\nadvisor={Soares, Luiz Fernando Gomes},\nschool={Department of Informatics, PUC-Rio},\nurl={http://www2.dbd.puc-rio.br/pergamum/tesesabertas/1021805_2015_completo.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Specification of Multimodal Interactions in NCL .\n \n \n \n \n\n\n \n Guedes, A. L. V.; Azevedo, R. G. d. A.; Moreno, M. F.; and Soares, L. F. G.\n\n\n \n\n\n\n In Proceedings of the 21st Brazilian Symposium on Multimedia and the Web, of WebMedia '15, pages 181–187, New York, NY, USA, 2015. ACM\n Acceptance ratio: 34%\n\n\n\n
\n\n\n\n \n \n \"SpecificationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 34 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{2015_10_guedes,\nauthor={Guedes, Alan Lívio Vasconcelos and Azevedo, Roberto Gerson de\nAlbuquerque and Moreno, Marcio Ferreira and Soares, Luiz Fernando Gomes},\ntitle={Specification of Multimodal Interactions in {NCL} },\nbooktitle={Proceedings of the 21st Brazilian Symposium on Multimedia and the\nWeb},\nseries={WebMedia '15},\nyear={2015},\nisbn={978-1-4503-3959-9},\nlocation={Manaus, Brazil},\npages={181--187},\nnumpages={7},\nurl={http://doi.acm.org/10.1145/2820426.2820436},\ndoi={10.1145/2820426.2820436},\nacmid={2820436},\npublisher={ACM},\naddress={New York, NY, USA},\nkeywords={mui, multimodal interactions, multimodal interfaces, ncl, nested\ncontext language},\nabstract={This paper proposes an approach to integrate multimodal\nevents--both user-generated, e.g., audio recognizer, motion sensors; and\nuser-consumed, e.g., speech synthesizer, haptic synthesizer--into programming\nlanguages for the declarative specification of multimedia applications. More\nprecisely, it presents extensions to the NCL (Nested Context Language)\nmultimedia language. NCL is the standard declarative language for the\ndevelopment of interactive applications for Brazilian Digital TV and an ITU-T\nRecommendation for IPTV services. NCL applications extended with the\nmultimodal features are presented as results. Historically, Human-Computer\nInteraction research community has been focusing on user-generated\nmodalities, through studies on the user interaction. On the other hand,\nMultimedia community has been focusing on output modalities, through studies\non timing and multimedia processing. The proposals in this paper is an\nattempt to integrate concepts of both research communities in a unique\nhigh-level programming framework, which aims to assist the authoring of\nmultimedia/multimodal applications.},\nnote={Acceptance ratio: 34\\%},\n}\n\n
\n
\n\n\n
\n This paper proposes an approach to integrate multimodal events–both user-generated, e.g., audio recognizer, motion sensors; and user-consumed, e.g., speech synthesizer, haptic synthesizer–into programming languages for the declarative specification of multimedia applications. More precisely, it presents extensions to the NCL (Nested Context Language) multimedia language. NCL is the standard declarative language for the development of interactive applications for Brazilian Digital TV and an ITU-T Recommendation for IPTV services. NCL applications extended with the multimodal features are presented as results. Historically, Human-Computer Interaction research community has been focusing on user-generated modalities, through studies on the user interaction. On the other hand, Multimedia community has been focusing on output modalities, through studies on timing and multimedia processing. The proposals in this paper is an attempt to integrate concepts of both research communities in a unique high-level programming framework, which aims to assist the authoring of multimedia/multimodal applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Approach to Convert NCL Applications into Stereoscopic 3D.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.; Lima, G. F.; and Soares, L. F. G.\n\n\n \n\n\n\n In Proceedings of the 2015 Symposium on Document Engineering, of DocEng '15, pages 177–186, New York, NY, USA, 2015. ACM\n Acceptance ratio: 35%\n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 30 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{2015_09_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Lima, Guilherme F. and\nSoares, Luiz Fernando Gomes},\ntitle={An Approach to Convert {NCL} Applications into Stereoscopic 3D},\nbooktitle={Proceedings of the 2015 Symposium on Document Engineering},\nseries={DocEng '15},\nyear={2015},\nisbn={978-1-4503-3307-8},\nlocation={Lausanne, Switzerland},\npages={177--186},\nnumpages={10},\nurl={http://doi.acm.org/10.1145/2682571.2797064},\ndoi={10.1145/2682571.2797064},\nacmid={2797064},\npublisher={ACM},\naddress={New York, NY, USA},\nkeywords={3dtv, document processing, ginga, ncl, stereoscopic multimedia\napplications},\nabstract={This paper presents and discusses the internal operation of NCLSC\n(NCL Stereo Converter): a tool to convert a 2D interactive multimedia\napplication annotated with depth information to a stereoscopic-multimedia\napplication. Stereoscopic-multimedia applications are those that codify both\nthe left-eye and right-eye views, as required by stereoscopic 3D displays.\nNCLSC takes as input an NCL (Nested Context Language) document and outputs an\nNCL stereoscopic application codified in side-by-side or top-bottom format\n(both common input formats for 3DTV sets). NCL is the declarative language\nadopted in most Latin America countries for terrestrial digital TV middleware\nsystems and the ITU-T H.761 Recommendation for IPTV services. However, the\nproposed approach is not restricted to NCL and can be used by other\nlanguages. The depth annotation allows for positioning each 2D graphical\ncomponent in a layered (2.5D or 2D+depth) user interface. It is used by NCLSC\nto compute the screen parallax (offset) between the graphical elements in the\nleft and right views of the resulting stereoscopic application. When the\nresulting application is presented on stereoscopic 3D displays, such screen\nparallax induces retinal disparity, which creates the illusion of floating\nflat-2D graphical elements. NCLSC does not require any additional native\nmiddleware support to run in currently available 3D-enabled TV sets.\nMoreover, NCLSC can adapt, at run-time, the output application to different\ndisplay sizes, viewer distances, and viewer preferences, which are usually\nrequired for a proper balance between artistic effects and user experience.},\nnote={Acceptance ratio: 35\\%},\n}\n\n%%% 2014 %%%\n
\n
\n\n\n
\n This paper presents and discusses the internal operation of NCLSC (NCL Stereo Converter): a tool to convert a 2D interactive multimedia application annotated with depth information to a stereoscopic-multimedia application. Stereoscopic-multimedia applications are those that codify both the left-eye and right-eye views, as required by stereoscopic 3D displays. NCLSC takes as input an NCL (Nested Context Language) document and outputs an NCL stereoscopic application codified in side-by-side or top-bottom format (both common input formats for 3DTV sets). NCL is the declarative language adopted in most Latin America countries for terrestrial digital TV middleware systems and the ITU-T H.761 Recommendation for IPTV services. However, the proposed approach is not restricted to NCL and can be used by other languages. The depth annotation allows for positioning each 2D graphical component in a layered (2.5D or 2D+depth) user interface. It is used by NCLSC to compute the screen parallax (offset) between the graphical elements in the left and right views of the resulting stereoscopic application. When the resulting application is presented on stereoscopic 3D displays, such screen parallax induces retinal disparity, which creates the illusion of floating flat-2D graphical elements. NCLSC does not require any additional native middleware support to run in currently available 3D-enabled TV sets. Moreover, NCLSC can adapt, at run-time, the output application to different display sizes, viewer distances, and viewer preferences, which are usually required for a proper balance between artistic effects and user experience.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Real-Time Depth-Image-Based Rendering for 3DTV Using OpenCL.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.; Ismério, F.; Raposo, A. B.; and Soares, L. F. G.\n\n\n \n\n\n\n In Bebis, G.; Boyle, R.; Parvin, B.; Koracin, D.; McMahan, R.; Jerald, J.; Zhang, H.; Drucker, S.; Kambhamettu, C.; El Choubassi, M.; Deng, Z.; and Carlson, M., editor(s), Advances in Visual Computing, volume 8887, of Lecture Notes in Computer Science, pages 97-106. Springer International Publishing, July 2014.\n Acceptance ratio: 26.4%\n\n\n\n
\n\n\n\n \n \n \"Real-TimePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 34 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@incollection{2014_12_azevedo,\nyear={2014},\nisbn={978-3-319-14248-7},\nbooktitle={Advances in Visual Computing},\nvolume={8887},\nseries={Lecture Notes in Computer Science},\neditor={Bebis, George and Boyle, Richard and Parvin, Bahram and Koracin,\nDarko and McMahan, Ryan and Jerald, Jason and Zhang, Hui and Drucker,\nStevenM. and Kambhamettu, Chandra and El Choubassi, Maha and Deng, Zhigang\nand Carlson, Mark},\ndoi={10.1007/978-3-319-14249-4_10},\ntitle={Real-Time Depth-Image-Based Rendering for {3DTV} Using {OpenCL}},\nurl={http://dx.doi.org/10.1007/978-3-319-14249-4_10},\npublisher={Springer International Publishing},\nkeywords={DIBR; OpenCL; GPGPU; Stereoscopic; 3DTV},\nauthor={Azevedo, Roberto Gerson de Albuquerque and Ismério, Fernando and\nRaposo, Alberto Barbosa and Soares, Luiz Fernando Gomes},\npages={97-106},\nnote={Acceptance ratio: 26.4\\%},\nmonth={July},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ginga extensions to support depth-based 3D media.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.; and Soares, L. F. G.\n\n\n \n\n\n\n In 3DTV-Conference: The True Vision - Capture, Transmission and Display of 3D Video (3DTV-CON), 2014, pages 1-4, July 2014. \n \n\n\n\n
\n\n\n\n \n \n \"GingaPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 20 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{2014_07a_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Soares, Luiz Fernando\nGomes},\nbooktitle={3DTV-Conference: The True Vision - Capture, Transmission and\nDisplay of 3D Video (3DTV-CON), 2014},\ntitle={Ginga extensions to support depth-based 3D media},\nyear={2014},\nmonth={July},\npages={1-4},\nkeywords={3DTV;Ginga;ISDB-T;Multimedia languages;NCL;Video-plus-depth},\ndoi={10.1109/3DTV.2014.6874715},\nurl={http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=\\&arnumber=6874715\\&isnumber=6874707},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Composer: meeting non-functional aspects of hypermedia authoring environment.\n \n \n \n\n\n \n Azevedo, R. G. d. A.; Araújo, E. C.; Lima, B.; Soares, L. F. G.; and Moreno, M. F.\n\n\n \n\n\n\n Multimedia Tools and Applications,1-30. July 2014.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 10 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{2014_07b_azevedo,\nyear={2014},\nissn={1380-7501},\njournal={Multimedia Tools and Applications},\ndoi={10.1007/s11042-012-1216-8},\ntitle={Composer: meeting non-functional aspects of hypermedia authoring\nenvironment},\npublisher={Springer US},\nkeywords={Hypermedia authoring tools; Non-functional requirements;\nMicrokernel-based architecture},\nauthor={Azevedo, Roberto Gerson de Albuquerque and Araújo, Eduardo Cruz and\nLima, Bruno and Soares, Luiz Fernando Gomes and Moreno, Marcelo Ferreira},\npages={1-30},\nmonth={July},\nabstract={This paper discusses the importance of non-functional requirements\nin the design of hypermedia authoring tools, which typically provides\nmultiple graphical abstractions (views). It focuses on creating products and\nservices that operate robustly across a broad range of environments, and that\ntake into account the changeable needs of their users over time, as they\nbecome more familiar with the tool. In order to meet these non-functional\naspects, this paper proposes a microkernel-based architecture for authoring\ntools, where the microkernel is responsible for instantiating the requested\nextensions (plugins), maintaining the core data model that represents the\nhypermedia document under development, and notifying changes in this model to\nplugins interested in them. Based on the proposed architecture, a new version\nof Composer (an NCL authoring tool) is presented, rewritten from scratch.\nResults from experiments show that the discussed non-functional requirements\nare adequately met.},\n}\n\n%%% 2013 %%%\n
\n
\n\n\n
\n This paper discusses the importance of non-functional requirements in the design of hypermedia authoring tools, which typically provides multiple graphical abstractions (views). It focuses on creating products and services that operate robustly across a broad range of environments, and that take into account the changeable needs of their users over time, as they become more familiar with the tool. In order to meet these non-functional aspects, this paper proposes a microkernel-based architecture for authoring tools, where the microkernel is responsible for instantiating the requested extensions (plugins), maintaining the core data model that represents the hypermedia document under development, and notifying changes in this model to plugins interested in them. Based on the proposed architecture, a new version of Composer (an NCL authoring tool) is presented, rewritten from scratch. Results from experiments show that the discussed non-functional requirements are adequately met.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Revisiting the inter and intra media synchronization model of the NCL player architecture.\n \n \n \n\n\n \n Soares, L. F. G.; Moreno, M. F.; Lima, G. F.; Azevedo, R. G. d. A.; Araújo, E. C.; Rios, R.; and Bartista, C. E.\n\n\n \n\n\n\n In Proceedings of Media Synchronization Workshop (MediaSync) 2013, of MediaSync '13, 2013. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2013_soares,\nauthor={Soares, Luiz Fernando Gomes and Moreno, Marcio Ferreira and Lima,\nGuilherme Ferreira and Azevedo, Roberto Gerson de Albuquerque and Araújo,\nEduardo Cruz and Rios, Ricardo and Bartista, Carlos Eduardo},\ntitle={Revisiting the inter and intra media synchronization model of the\n{NCL} player architecture.},\nbooktitle={Proceedings of Media Synchronization Workshop (MediaSync) 2013},\nseries={MediaSync '13},\nyear={2013},\nlocation={Nantes, France},\nnumpages={8},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multimedia authoring based on templates and semi-automatic generated wizards.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.; Santos, R. C. M.; Araújo, E. C.; Soares, L. F. G.; and Soares Neto, C. d. S.\n\n\n \n\n\n\n In Proceedings of the 2013 ACM symposium on Document engineering, of DocEng '13, pages 205–214, New York, NY, USA, 2013. ACM\n Acceptance ratio: 32%\n\n\n\n
\n\n\n\n \n \n \"MultimediaPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 28 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{2013_09_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Santos, Rodrigo Costa\nMesquita and Araújo, Eduardo Cruz and Soares, Luiz Fernando Gomes and Soares\nNeto, Carlos de Salles},\ntitle={Multimedia authoring based on templates and semi-automatic generated\nwizards},\nbooktitle={Proceedings of the 2013 ACM symposium on Document engineering},\nseries={DocEng '13},\nyear={2013},\nisbn={978-1-4503-1789-4},\nlocation={Florence, Italy},\npages={205--214},\nnumpages={10},\nurl={http://doi.acm.org/10.1145/2494266.2494283},\ndoi={10.1145/2494266.2494283},\nacmid={2494283},\npublisher={ACM},\naddress={New York, NY, USA},\nkeywords={authoring, multimedia, tal, template-based authoring, wizards},\nnote={Acceptance ratio: 32\\%},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n NCL+Depth: Extending NCL for Stereo/Autostereoscopic 3D Displays.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.; and Soares, L. F. G.\n\n\n \n\n\n\n In Proceedings of the 19th Brazilian Symposium on Multimedia and the Web, of WebMedia '13, pages 185–192, New York, NY, USA, 2013. ACM\n \n\n\n\n
\n\n\n\n \n \n \"NCL+Depth:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 31 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{2013_11_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Soares, Luiz Fernando\nGomes},\ntitle={ {NCL+Depth}: Extending {NCL} for Stereo/Autostereoscopic 3D\nDisplays},\nbooktitle={Proceedings of the 19th Brazilian Symposium on Multimedia and the\nWeb},\nseries={WebMedia '13},\nyear={2013},\nisbn={978-1-4503-2559-2},\nlocation={Salvador, Brazil},\npages={185--192},\nnumpages={8},\nurl={http://doi.acm.org/10.1145/2526188.2526203},\ndoi={10.1145/2526188.2526203},\nacmid={2526203},\npublisher={ACM},\naddress={New York, NY, USA},\nkeywords={3DTV, multimedia, nested context language, video-plus-depth},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reducing the Complexity of NCL Player Implementations.\n \n \n \n \n\n\n \n Lima, G. A. F.; Soares, L. F. G.; Azevedo, R. G. d. A.; and Moreno, M. F.\n\n\n \n\n\n\n In Proceedings of the 19th Brazilian Symposium on Multimedia and the Web, of WebMedia '13, pages 297–304, New York, NY, USA, 2013. ACM\n \n\n\n\n
\n\n\n\n \n \n \"ReducingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 30 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{2013_lima,\nauthor={Lima, Guilherme Augusto Ferreira and Soares, Luiz Fernando Gomes and\nAzevedo, Roberto Gerson de Albuquerque and Moreno, Marcio Ferreira},\ntitle={Reducing the Complexity of {NCL} Player Implementations},\nbooktitle={Proceedings of the 19th Brazilian Symposium on Multimedia and the\nWeb},\nseries={WebMedia '13},\nyear={2013},\nisbn={978-1-4503-2559-2},\nlocation={Salvador, Brazil},\npages={297--304},\nnumpages={8},\nurl={http://doi.acm.org/10.1145/2526188.2526217},\ndoi={10.1145/2526188.2526217},\nacmid={2526217},\npublisher={ACM},\naddress={New York, NY, USA},\nkeywords={NCL players, NCL raw profile, web-based players},\n}\n\n%%% 2012 %%%\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Embedding 3D objects into NCL multimedia presentations.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.; and Soares, L. F. G.\n\n\n \n\n\n\n In Proceedings of the 17th International Conference on 3D Web Technology, of Web3D '12, pages 143–151, New York, NY, USA, 2012. ACM\n \n\n\n\n
\n\n\n\n \n \n \"EmbeddingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 20 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{2012_08_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Soares, Luiz Fernando\nGomes},\ntitle={Embedding 3D objects into {NCL} multimedia presentations},\nbooktitle={Proceedings of the 17th International Conference on 3D Web\nTechnology},\nseries={Web3D '12},\nyear={2012},\nisbn={978-1-4503-1432-9},\nlocation={Los Angeles, California},\npages={143--151},\nnumpages={9},\nurl={http://doi.acm.org/10.1145/2338714.2338739},\ndoi={10.1145/2338714.2338739},\nacmid={2338739},\npublisher={ACM},\naddress={New York, NY, USA},\nkeywords={NCL, X3D, declarative languages, digital TV},\n}\n\n%%% 2011 %%%\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n TeleMídia and 3D Hypermedia Research.\n \n \n \n \n\n\n \n Soares, L. F. G.; Azevedo, R. G. d. A.; and Moreno, M. F.\n\n\n \n\n\n\n SBC Journal on 3D Interactive Systems, 2(2). 2011.\n \n\n\n\n
\n\n\n\n \n \n \"TeleMídiaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 23 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2011_08_soares,\nauthor={Soares, Luiz Fernando Gomes and Azevedo, Roberto Gerson de\nAlbuquerque and Moreno, Marcio Ferreira},\ntitle={TeleMídia and 3D Hypermedia Research},\njournal={SBC Journal on 3D Interactive Systems},\nyear=2011,\nvolume={2},\nnumber={2},\nissn={2236-3297},\nurl={http://www.telemidia.puc-rio.br/files/biblio/2011_12_soares.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Textual authoring of interactive digital TV applications.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.; Neto, C. d. S. S.; Teixeira, M. M.; Santos, R. C. M.; and Gomes, T. A.\n\n\n \n\n\n\n In Proceedings of the 9th international interactive conference on Interactive television, of EuroITV '11, pages 235–244, New York, NY, USA, 2011. ACM\n \n\n\n\n
\n\n\n\n \n \n \"TextualPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 32 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{2011_08_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Neto, Carlos de Salles\nSoares and Teixeira, Mário Meireles and Santos, Rodrigo Costa Mesquita and\nGomes, Thiago Alencar},\ntitle={Textual authoring of interactive digital {TV} applications},\nbooktitle={Proceedings of the 9th international interactive conference on\nInteractive television},\nseries={EuroITV '11},\nyear={2011},\nisbn={978-1-4503-0602-7},\nlocation={Lisbon, Portugal},\npages={235--244},\nnumpages={10},\nurl={http://doi.acm.org/10.1145/2000119.2000169},\ndoi={10.1145/2000119.2000169},\nacmid={2000169},\npublisher={ACM},\naddress={New York, NY, USA},\nkeywords={ginga-ncl, hypermedia authoring, ncl eclipse, nested context\nlanguage, textual authoring},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Comunicação Digital e a Interdisciplinaridade na Produção de Conteúdo Interativo.\n \n \n \n \n\n\n \n Angeluci, A. C. B.; Azevedo, R. G. d. A.; and Soares, L. F. G.\n\n\n \n\n\n\n Revista Comunicação Midiática, 6(1). Jan/Apr 2011.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 18 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2011_01_angelucci,\nauthor={Angeluci, Alan César Belo and Azevedo, Roberto Gerson de Albuquerque\nand Soares, Luiz Fernando Gomes},\ntitle={A Comunicação Digital e a Interdisciplinaridade na Produção de\nConteúdo Interativo},\njournal={Revista Comunicação Midiática},\nyear=2011,\nmonth={Jan/Apr},\nvolume=6,\nnumber=1,\nissn={2236-8000},\nurl={http://www.mundodigital.unesp.br/revista/index.php/comunicacaomidiatica/article/view/58/43},\nlanguage={Portuguese},\n}\n\n%%% 2010 %%%\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Correção de Código Semi-Automática em Nested Context Language.\n \n \n \n\n\n \n Santos, R. C. M.; Azevedo, R. G. d. A.; Soares Neto, C. d. S.; and Teixeira, M. A. M.\n\n\n \n\n\n\n In XVI Simpósio Brasileiro de Sistemas Multimídia e Web (WebMedia 2010), October 2010. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010_10_santos,\nauthor={Santos, Rodrigo Costa Mesquita and Azevedo, Roberto Gerson de\nAlbuquerque and Soares Neto, Carlos de Salles and Teixeira, Mário Antônio\nMeireles},\ntitle={Correção de Código Semi-Automática em {Nested Context Language}},\nbooktitle={XVI Simpósio Brasileiro de Sistemas Multimídia e Web (WebMedia\n2010)},\nyear=2010,\nmonth={October},\nlocation={Belo Horizonte, MG, Brasil},\n_url={http://www.laws.deinf.ufma.br/~roberto/publications/pdf/2010_10_santos.pdf},\nlanguage={Portuguese},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Estendendo NCL: Objetos NCLua como Exibidores para Novos Tipos de Mídia.\n \n \n \n\n\n \n Sousa Júnior, J. G.; Azevedo, R. G. d. A.; Soares Neto, C. d. S.; and Soares, L. F. G.\n\n\n \n\n\n\n In XVI Simpósio Brasileiro de Sistemas Multimídia e Web (WebMedia 2010), October 2010. \n I Worshop de TV Digital\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010_10_sousa,\nauthor={Sousa Júnior, José Geraldo and Azevedo, Roberto Gerson de\nAlbuquerque and Soares Neto, Carlos de Salles and Soares, Luiz Fernando\nGomes},\ntitle={Estendendo {NCL}: Objetos {NCLua} como Exibidores para Novos Tipos de\nMídia},\nbooktitle={XVI Simpósio Brasileiro de Sistemas Multimídia e Web (WebMedia\n2010)},\nyear=2010,\nmonth={October},\nlocation={Belo Horizonte, MG, Brasil},\nnote={I Worshop de TV Digital},\nlanguage={Portuguese},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Composer 3: Ambiente de Autoria Extensível, Adaptável e Multiplataforma.\n \n \n \n \n\n\n \n Lima, B. S.; Azevedo, R. G. d. A.; Moreno, M. F.; and Soares, L. F. G.\n\n\n \n\n\n\n In XVI Simpósio Brasileiro de Sistemas Multimídia e Web (WebMedia 2010), October 2010. \n I Worshop de TV Digital\n\n\n\n
\n\n\n\n \n \n \"ComposerPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 23 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010_10_lima,\nauthor={Lima, Bruno Seabra and Azevedo, Roberto Gerson de Albuquerque and\nMoreno, Marcelo Ferreira and Soares, Luiz Fernando Gomes},\ntitle={Composer 3: Ambiente de Autoria Extensível, Adaptável e\nMultiplataforma},\nbooktitle={XVI Simpósio Brasileiro de Sistemas Multimídia e Web (WebMedia\n2010)},\nyear=2010,\nmonth={October},\nlocation={Belo Horizonte, MG, Brasil},\nnote={I Worshop de TV Digital},\nurl={http://www.telemidia.puc-rio.br/files/biblio/2010_10_lima.pdf},\nlanguage={Portuguese},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Control and presentation of three-dimensional media objects in NCL.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.\n\n\n \n\n\n\n Masters thesis, PUC-Rio, Rio de Janeiro, August 2010.\n \n\n\n\n
\n\n\n\n \n \n \"ControlPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 15 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@MastersThesis{2010_08_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque},\ntitle={Control and presentation of three-dimensional media objects in {NCL}},\ntitle_ptbr={Suporte ao Controle e à Apresentação de Objetos de Mídia\nTridimensionais em {NCL}},\ntype={Masters thesis},\nschool={PUC-Rio},\naddress={Rio de Janeiro},\nyear={2010},\nmonth={August},\nnotes={Apresentada como requisito parcial para obtenção do título Mestre\nem Informática},\nabstract={O mundo em que vivemos é formado por um espaço fisicamente\ntridimensional e é natural que o ser humano busque representá-lo da forma\nmais fiel possível, também em ambientes virtuais. Na Web, por exemplo, já\nexistem vários esforços na busca do suporte a criação de aplicações 3D\ninterativas de forma declarativa, grande parte deles são baseados nos\nconceitos de grafos de cena e grafos de rotas. Grafos de cena têm se tornado\num padrão de modelagem espacial de aplicações tridimensionais por meio de\numa abordagem hierárquica e declarativa. Por sua vez, o comportamento do\ngrafo de cena usualmente tem sido especificado por meio de grafos de rotas ou\nlinguagens imperativas. No que se refere à TV Digital Interativa (TVDi), por\noutro lado, ainda há muito a ser feito. Nested Context Language (NCL) é a\nlinguagem declarativa padrão para o sistema de TV Digital Terrestre ISDB-TB\ne Recomendação ITU-T para serviços IPTV, que permite a autoria de\ndocumentos hipermídia por meio de uma abordagem simples e expressiva. Embora\nNCL não restrinja qualquer tipo de objeto de mídia, na sua versão atual\n(3.0), ela trata apenas objetos de mídia bidimensionais, relacionando-os\ntemporal e espacialmente. Dada a importância de NCL no cenário de TVDi,\nesta pesquisa objetiva discutir como tal linguagem pode também controlar\nobjetos tridimensionais, permitindo a criação de aplicações 3D para TVDi.\nComo caso especial, este trabalho discute como NCL pode controlar o\ncomportamento de objetos 3D compostos representados por grafos de cenas,\ndiscutindo suas vantagens e desvantagens em relação ao uso de grafos de\nrotas. Visando testar a proposta deste trabalho, foi adicionado um exibidor\nde mídia X3D (linguagem baseada em grafo de cena) à implementação de\nreferência do Ginga-NCL, responsável por executar aplicações NCL.\nAdicionalmente, também é proposta a extensão dos eventos tratados por NCL,\npara refletir eventos específicos de ambientes 3D, e a incorporação de\nregiões, baseadas em objetos geométricos 3D, à NCL, com o objetivo de\npossibilitar a apresentação de objetos de mídia 2D sobre a superfície de\nobjetos 3D.},\nurl={http://www.maxwell.vrac.puc-rio.br/Busca_etds.php?strSecao=resultado&nrSeq=16864\n1},\nlanguage={Portuguese},\n}\n\n
\n
\n\n\n
\n O mundo em que vivemos é formado por um espaço fisicamente tridimensional e é natural que o ser humano busque representá-lo da forma mais fiel possível, também em ambientes virtuais. Na Web, por exemplo, já existem vários esforços na busca do suporte a criação de aplicações 3D interativas de forma declarativa, grande parte deles são baseados nos conceitos de grafos de cena e grafos de rotas. Grafos de cena têm se tornado um padrão de modelagem espacial de aplicações tridimensionais por meio de uma abordagem hierárquica e declarativa. Por sua vez, o comportamento do grafo de cena usualmente tem sido especificado por meio de grafos de rotas ou linguagens imperativas. No que se refere à TV Digital Interativa (TVDi), por outro lado, ainda há muito a ser feito. Nested Context Language (NCL) é a linguagem declarativa padrão para o sistema de TV Digital Terrestre ISDB-TB e Recomendação ITU-T para serviços IPTV, que permite a autoria de documentos hipermídia por meio de uma abordagem simples e expressiva. Embora NCL não restrinja qualquer tipo de objeto de mídia, na sua versão atual (3.0), ela trata apenas objetos de mídia bidimensionais, relacionando-os temporal e espacialmente. Dada a importância de NCL no cenário de TVDi, esta pesquisa objetiva discutir como tal linguagem pode também controlar objetos tridimensionais, permitindo a criação de aplicações 3D para TVDi. Como caso especial, este trabalho discute como NCL pode controlar o comportamento de objetos 3D compostos representados por grafos de cenas, discutindo suas vantagens e desvantagens em relação ao uso de grafos de rotas. Visando testar a proposta deste trabalho, foi adicionado um exibidor de mídia X3D (linguagem baseada em grafo de cena) à implementação de referência do Ginga-NCL, responsável por executar aplicações NCL. Adicionalmente, também é proposta a extensão dos eventos tratados por NCL, para refletir eventos específicos de ambientes 3D, e a incorporação de regiões, baseadas em objetos geométricos 3D, à NCL, com o objetivo de possibilitar a apresentação de objetos de mídia 2D sobre a superfície de objetos 3D.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n NCL Eclipse: Ferramenta de Autoria Textual para a Linguagem NCL.\n \n \n \n\n\n \n Santos, R. C. M.; Gomes, T. A.; Azevedo, R. G. d. A.; Soares Neto, C. d. S.; and Teixeira, M. A. M.\n\n\n \n\n\n\n In XI Fórum Internacional de Software Livre (FISL 11), July 2010. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010_05_santos,\nauthor={Santos, Rodrigo Costa Mesquita and Gomes, T. A. and Azevedo, Roberto\nGerson de Albuquerque and Soares Neto, Carlos de Salles and Teixeira, M. A.\nM.},\ntitle={ {NCL Eclipse}: Ferramenta de Autoria Textual para a Linguagem {NCL}},\nbooktitle={XI Fórum Internacional de Software Livre (FISL 11)},\nyear={2010},\nmonth={July},\nlocation={Porto Alegre, RS, Brasil},\n_url={http://laws.deinf.ufma.br/~roberto/publications/pdf/2010_05_santos.pdf},\nlanguage={Portuguese},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n NCL Eclipse: Ambiente Integrado para o Desenvolvimento de Aplicações para TV Digital Interativa em Nested Context Language.\n \n \n \n\n\n \n Azevedo, R. G. d. A.; Teixeira, M. M.; and Soares Neto, C. d. S.\n\n\n \n\n\n\n REIC - Revista Eletrônica de Iniciação Científica, (4). December 2009.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2009_12_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Teixeira, Mário Meireles\nand Soares Neto, Carlos de Salles},\ntitle={ {NCL Eclipse}: Ambiente Integrado para o Desenvolvimento de\nAplicações para TV Digital Interativa em {Nested Context Language}},\njournal={REIC - Revista Eletrônica de Iniciação Científica},\nyear=2009,\nmonth={December},\nnumber=4,\nissn={1519-8219},\n_url={http://laws.deinf.ufma.br/~roberto/publications/pdf/2009_12_azevedo.pdf},\nlanguage={Portuguese},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n O Uso da Linguagem Declarativa do Ginga-NCL na Construção de Conteúdos Audiovisuais Interativos: A Experiência do \"Roteiros do Dia\".\n \n \n \n \n\n\n \n Angelucci, A. C. B.; Azevedo, R. G. d. A.; and Soares, L. F. G.\n\n\n \n\n\n\n In I Simpósio Internacional de Televisão Digital (SIMTVD), November 2009. \n \n\n\n\n
\n\n\n\n \n \n \"OPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 18 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2009_11_angelucci,\nauthor={Angelucci, Alan César Belo and Azevedo, Roberto Gerson de\nAlbuquerque and Soares, Luiz Fernando Gomes},\ntitle={O Uso da Linguagem Declarativa do {Ginga-NCL} na Construção de\nConteúdos Audiovisuais Interativos: A Experiência do "Roteiros do Dia"},\nbooktitle={I Simpósio Internacional de Televisão Digital (SIMTVD)},\npages={},\npublisher={},\nyear=2009,\nmonth={November},\nlocation={Bauru, SP, Brazil},\nurl={http://www.telemidia.puc-rio.br/files/biblio/2009_11_angeluci.pdf},\nlanguage={Portuguese},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Desenvolvimento de Aplicações Declarativas para TV Digital no Middleware Ginga com Objetos Imperativos Lua.\n \n \n \n \n\n\n \n Santa'Anna, F.; Soares Neto, C. d. S.; Azevedo, R. G. d. A.; and Barbosa, S. D. J.\n\n\n \n\n\n\n In , editor(s). 2009.\n \n\n\n\n
\n\n\n\n \n \n \"DesenvolvimentoPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 25 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{2009_10_santanna,\nauthor={Santa'Anna, Francisco and Soares Neto, Carlos de Salles and Azevedo,\nRoberto Gerson de Albuquerque and Barbosa, Simone Diniz Junqueira},\ntitle={Desenvolvimento de Aplicações Declarativas para TV Digital no\nMiddleware Ginga com Objetos Imperativos Lua},\nbooktitle={},\npages={},\npublisher={},\nyear=2009,\neditor={},\nlocation={Fortaleza, CE, Brazil},\nurl={http://www.telemidia.puc-rio.br/files/biblio/2009_10_santanna.pdf},\nlanguage={Portuguese},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Autoria de Documentos Multimídia Baseada na Identificação e Preenchimento de Estruturas Recorrentes.\n \n \n \n \n\n\n \n Lima, B. S.; Azevedo, R. G. d. A.; and Soares Neto, C. d. S.\n\n\n \n\n\n\n In XV Simpósio Brasileiro de Sistemas Multimídia e Web (WebMedia 2009), October 2009. \n \n\n\n\n
\n\n\n\n \n \n \"AutoriaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 20 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2009_10_lima,\nauthor={Lima, Bruno Seabra and Azevedo, Roberto Gerson de Albuquerque and\nSoares Neto, Carlos de Salles},\ntitle={Autoria de Documentos Multimídia Baseada na Identificação e\nPreenchimento de Estruturas Recorrentes},\nbooktitle={XV Simpósio Brasileiro de Sistemas Multimídia e Web (WebMedia\n2009)},\nyear=2009,\nmonth={October},\nlocation={Fortaleza, CE, Brazil},\nurl={http://www.telemidia.puc-rio.br/files/biblio/2009_10_lima.pdf},\nlanguage={Portuguese},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Uma Abordagem para Autoria Textual de Documentos Hipermídia Baseada no Uso de Visualização Programática e Navegação Hipertextual.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.; Lima, B. S.; Soares Neto, C. d. S.; and Teixeira, M. M.\n\n\n \n\n\n\n In XV Simpósio Brasileiro de Sistemas Multimídia e Web (WebMedia 2009), October 2009. \n \n\n\n\n
\n\n\n\n \n \n \"UmaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 20 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2009_10_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Lima, Bruno Seabra and\nSoares Neto, Carlos de Salles and Teixeira, Mário Meireles},\ntitle={Uma Abordagem para Autoria Textual de Documentos Hipermídia Baseada\nno Uso de Visualização Programática e Navegação Hipertextual},\nbooktitle={XV Simpósio Brasileiro de Sistemas Multimídia e Web (WebMedia\n2009)},\nyear=2009,\nmonth={October},\nlocation={Fortaleza, CE, Brazil},\nurl={http://www.telemidia.puc-rio.br/files/biblio/2009_10_azevedo.pdf},\nlanguage={Portuguese},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n NCL Eclipse: Ambiente Integrado para o Desenvolvimento de Aplicações Interativas em Nested Context Language.\n \n \n \n \n\n\n \n Azevedo, R. G. d. A.; Teixeira, M. M.; and Soares Neto, C. d. S.\n\n\n \n\n\n\n In Salão de Ferramentas do Simpósio Brasileiro de Redes de Computadores (SBRC), May 2009. \n \n\n\n\n
\n\n\n\n \n \n \"NCLPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 16 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2009_05_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Teixeira, Mario Meireles\nand Soares Neto, Carlos de Salles},\ntitle={ {NCL Eclipse}: Ambiente Integrado para o Desenvolvimento de\nAplicações Interativas em {Nested Context Language}},\nbooktitle={Salão de Ferramentas do Simpósio Brasileiro de Redes de\nComputadores (SBRC)},\nyear=2009,\nmonth={May},\nurl={http://www.lbd.dcc.ufmg.br/colecoes/sbrc/2009/087.pdf},\nlanguage={Portuguese},\n}\n\n%%% 2008 %%%\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n NCL-validator: um processo de validação sintática e semântica de documentos multimídia NCL.\n \n \n \n\n\n \n Araújo, E. C.; Azevedo, R. G. d. A.; and Soares Neto, C. d. S.\n\n\n \n\n\n\n In II Jornada de Informática do Maranhão, June 2008. \n Best Paper Award\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2008_06_araujo,\nauthor={Araújo, Eduardo Cruz and Azevedo, Roberto Gerson de Albuquerque and\nSoares Neto, Carlos de Salles},\ntitle={ {NCL}-validator: um processo de validação sintática e semântica\nde documentos multimídia {NCL}},\nbooktitle={II Jornada de Informática do Maranhão},\nyear=2008,\nmonth={June},\nnote={Best Paper Award},\n_url={http://www.laws.deinf.ufma.br/~roberto/publications/pdf/2008_06_araujo.pdf},\nlanguage={Portuguese},\n}\n\n%%% 2006 %%%\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Deinf Media Center: Infra-Estrutura para Armazenamento e Distribuição de Conteúdo Multimídia Streaming.\n \n \n \n\n\n \n Azevedo, R. G. d. A.; Teixeira, M. M.; and Araújo, E. C.\n\n\n \n\n\n\n In I Jornada de Informática do Maranhão, November 2006. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2006_06_azevedo,\nauthor={Azevedo, Roberto Gerson de Albuquerque and Teixeira, Mário Meireles\nand Araújo, Eduardo Cruz},\ntitle={Deinf Media Center: Infra-Estrutura para Armazenamento e\nDistribuição de Conteúdo Multimídia Streaming},\nbooktitle={I Jornada de Informática do Maranhão},\nyear=2006,\nmonth={November},\nlanguage={Portuguese},\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);