var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https://kuis-ai-publications.s3.eu-central-1.amazonaws.com/cv.txt?dl=0&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https://kuis-ai-publications.s3.eu-central-1.amazonaws.com/cv.txt?dl=0&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https://kuis-ai-publications.s3.eu-central-1.amazonaws.com/cv.txt?dl=0&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2022\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Use of Affective Visual Information for Summarization of Human-Centric Videos.\n \n \n \n \n\n\n \n Köprü, B.; and Erzin, E.\n\n\n \n\n\n\n CoRR,1-14. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"UsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{DBLP:journals/corr/abs-2107-03783,\n    author = {K{\\"{o}}pr{\\"{u}}, Berkay and Erzin, Engin},\n    title = "Use of Affective Visual Information for Summarization of Human-Centric Videos",\n    journal = "CoRR",\n    year = "2022",\n    abstract = "The increasing volume of user-generated human-centric video content and its applications, such as video retrieval and browsing, require compact representations addressed by the video summarization literature. Current supervised studies formulate video summarization as a sequence-to-sequence learning problem, and the existing solutions often neglect the surge of the human-centric view, which inherently contains affective content. In this study, we investigate the affective-information enriched supervised video summarization task for human-centric videos. First, we train a visual input-driven state-of-the-art continuous emotion recognition model (CER-NET) on the RECOLA dataset to estimate activation and valence attributes. Then, we integrate the estimated emotional attributes and their high-level embeddings from the CER-NET with the visual information to define the proposed affective video summarization (AVSUM) architectures. In addition, we investigate the use of attention to improve the AVSUM architectures and propose two new architectures based on temporal attention (TA-AVSUM) and spatial attention (SA-AVSUM). We conduct video summarization experiments on the TvSum and COGNIMUSE datasets. The proposed temporal attention-based TA-AVSUM architecture attains competitive video summarization performances with strong improvements for the human-centric videos compared to the state-of-the-art in terms of F-score, self-defined face recall, and rank correlation metrics.",\n    keywords = "CV,HCI",\n    pages = "1-14",\n    url = "https://ieeexplore.ieee.org/document/9954146",\n    doi = "10.1109/TAFFC.2022.3222882",\n    publisher = "IEEE"\n}\n
\n
\n\n\n
\n The increasing volume of user-generated human-centric video content and its applications, such as video retrieval and browsing, require compact representations addressed by the video summarization literature. Current supervised studies formulate video summarization as a sequence-to-sequence learning problem, and the existing solutions often neglect the surge of the human-centric view, which inherently contains affective content. In this study, we investigate the affective-information enriched supervised video summarization task for human-centric videos. First, we train a visual input-driven state-of-the-art continuous emotion recognition model (CER-NET) on the RECOLA dataset to estimate activation and valence attributes. Then, we integrate the estimated emotional attributes and their high-level embeddings from the CER-NET with the visual information to define the proposed affective video summarization (AVSUM) architectures. In addition, we investigate the use of attention to improve the AVSUM architectures and propose two new architectures based on temporal attention (TA-AVSUM) and spatial attention (SA-AVSUM). We conduct video summarization experiments on the TvSum and COGNIMUSE datasets. The proposed temporal attention-based TA-AVSUM architecture attains competitive video summarization performances with strong improvements for the human-centric videos compared to the state-of-the-art in terms of F-score, self-defined face recall, and rank correlation metrics.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Computer Vision for Autonomous Vehicles: Problems, Datasets and State of the Art.\n \n \n \n\n\n \n Janai, J.; Güney, F.; Behl, A.; and Geiger, A.\n\n\n \n\n\n\n 2021.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@misc{janai2021computer,\n    author = {Janai, Joel and G{\\"u}ney, Fatma and Behl, Aseem and Geiger, Andreas},\n    title = "Computer Vision for Autonomous Vehicles: Problems, Datasets and State of the Art",\n    year = "2021",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n mustGAN: multi-stream Generative Adversarial Networks for MR Image Synthesis.\n \n \n \n \n\n\n \n Yurt, M.; Dar, S. U.; Erdem, A.; Erdem, E.; Oguz, K. K; and Çukur, T.\n\n\n \n\n\n\n Medical Image Analysis, 70: 101944. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"mustGAN:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 13 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{YURT2021101944,\n    author = "Yurt, Mahmut and Dar, Salman UH and Erdem, Aykut and Erdem, Erkut and Oguz, Kader K and {\\c{C}}ukur, Tolga",\n    title = "mustGAN: multi-stream Generative Adversarial Networks for MR Image Synthesis",\n    journal = "Medical Image Analysis",\n    volume = "70",\n    pages = "101944",\n    year = "2021",\n    issn = "1361-8415",\n    doi = "https://doi.org/10.1016/j.media.2020.101944",\n    url = "https://www.sciencedirect.com/science/article/pii/S136184152030308X",\n    keywords = "Magnetic resonance imaging (MRI),Multi-contrast,Generative adversarial networks (GAN),Image synthesis,Multi-stream,Fusion,CV",\n    abstract = "Multi-contrast MRI protocols increase the level of morphological information available for diagnosis. Yet, the number and quality of contrasts are limited in practice by various factors including scan time and patient motion. Synthesis of missing or corrupted contrasts from other high-quality ones can alleviate this limitation. When a single target contrast is of interest, common approaches for multi-contrast MRI involve either one-to-one or many-to-one synthesis methods depending on their input. One-to-one methods take as input a single source contrast, and they learn a latent representation sensitive to unique features of the source. Meanwhile, many-to-one methods receive multiple distinct sources, and they learn a shared latent representation more sensitive to common features across sources. For enhanced image synthesis, we propose a multi-stream approach that aggregates information across multiple source images via a mixture of multiple one-to-one streams and a joint many-to-one stream. The complementary feature maps generated in the one-to-one streams and the shared feature maps generated in the many-to-one stream are combined with a fusion block. The location of the fusion block is adaptively modified to maximize task-specific performance. Quantitative and radiological assessments on T1,- T2-, PD-weighted, and FLAIR images clearly demonstrate the superior performance of the proposed method compared to previous state-of-the-art one-to-one and many-to-one methods."\n}\n\n
\n
\n\n\n
\n Multi-contrast MRI protocols increase the level of morphological information available for diagnosis. Yet, the number and quality of contrasts are limited in practice by various factors including scan time and patient motion. Synthesis of missing or corrupted contrasts from other high-quality ones can alleviate this limitation. When a single target contrast is of interest, common approaches for multi-contrast MRI involve either one-to-one or many-to-one synthesis methods depending on their input. One-to-one methods take as input a single source contrast, and they learn a latent representation sensitive to unique features of the source. Meanwhile, many-to-one methods receive multiple distinct sources, and they learn a shared latent representation more sensitive to common features across sources. For enhanced image synthesis, we propose a multi-stream approach that aggregates information across multiple source images via a mixture of multiple one-to-one streams and a joint many-to-one stream. The complementary feature maps generated in the one-to-one streams and the shared feature maps generated in the many-to-one stream are combined with a fusion block. The location of the fusion block is adaptively modified to maximize task-specific performance. Quantitative and radiological assessments on T1,- T2-, PD-weighted, and FLAIR images clearly demonstrate the superior performance of the proposed method compared to previous state-of-the-art one-to-one and many-to-one methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generating visual story graphs with application to photo album summarization.\n \n \n \n \n\n\n \n Celikkale, B.; Erdogan, G.; Erdem, A.; and Erdem, E.\n\n\n \n\n\n\n Signal Processing: Image Communication, 90: 116033. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"GeneratingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{CELIKKALE2021116033,\n    author = "Celikkale, Bora and Erdogan, Goksu and Erdem, Aykut and Erdem, Erkut",\n    title = "Generating visual story graphs with application to photo album summarization",\n    journal = "Signal Processing: Image Communication",\n    volume = "90",\n    pages = "116033",\n    year = "2021",\n    issn = "0923-5965",\n    doi = "https://doi.org/10.1016/j.image.2020.116033",\n    url = "https://www.sciencedirect.com/science/article/pii/S092359652030182X",\n    keywords = "Visual story graph,Structured summarization,CV",\n    abstract = "Making sense of ever-growing amount of visual data available on the web is difficult, especially when considered in an unsupervised manner. As a step towards this goal, this study tackles a relatively less explored topic of generating structured summaries of large photo collections. Our framework relies on the notion of a story graph which captures the main narratives in the data and their relationships based on their visual, textual and spatio-temporal features. Its output is a directed graph with a set of possibly intersecting paths. Our proposed approach identifies coherent visual storylines and exploits sub-modularity to select a subset of these lines which covers the general narrative at most. Our experimental analysis reveals that extracted story graphs allow for obtaining better results when utilized as priors for photo album summarization. Moreover, our user studies show that our approach delivers better performance on next image prediction and coverage tasks than the state-of-the-art."\n}\n\n
\n
\n\n\n
\n Making sense of ever-growing amount of visual data available on the web is difficult, especially when considered in an unsupervised manner. As a step towards this goal, this study tackles a relatively less explored topic of generating structured summaries of large photo collections. Our framework relies on the notion of a story graph which captures the main narratives in the data and their relationships based on their visual, textual and spatio-temporal features. Its output is a directed graph with a set of possibly intersecting paths. Our proposed approach identifies coherent visual storylines and exploits sub-modularity to select a subset of these lines which covers the general narrative at most. Our experimental analysis reveals that extracted story graphs allow for obtaining better results when utilized as priors for photo album summarization. Moreover, our user studies show that our approach delivers better performance on next image prediction and coverage tasks than the state-of-the-art.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Computer Vision for Autonomous Vehicles: Problems, Datasets and State-of-the-Art.\n \n \n \n\n\n \n Janai, J.; Güney, F.; Behl, A.; and Geiger, A.\n\n\n \n\n\n\n Found. Trends Comput. Graph. Vis., 12: 1-308. 2020.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Janai2020ComputerVF,\n    author = {Janai, Joel and G{\\"u}ney, Fatma and Behl, A. and Geiger, Andreas},\n    title = "Computer Vision for Autonomous Vehicles: Problems, Datasets and State-of-the-Art",\n    journal = "Found. Trends Comput. Graph. Vis.",\n    year = "2020",\n    volume = "12",\n    pages = "1-308",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hedging static saliency models to predict dynamic saliency.\n \n \n \n \n\n\n \n Kavak, Y.; Erdem, E.; and Erdem, A.\n\n\n \n\n\n\n Signal Processing: Image Communication, 81: 115694. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"HedgingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{KAVAK2020115694,\n    author = "Kavak, Yasin and Erdem, Erkut and Erdem, Aykut",\n    title = "Hedging static saliency models to predict dynamic saliency",\n    journal = "Signal Processing: Image Communication",\n    volume = "81",\n    pages = "115694",\n    year = "2020",\n    issn = "0923-5965",\n    doi = "https://doi.org/10.1016/j.image.2019.115694",\n    url = "https://www.sciencedirect.com/science/article/pii/S0923596518311846",\n    keywords = "Dynamic saliency,Hedge algorithm,Decision theoretic online learning,Feature integration,CV",\n    abstract = "In recent years, many computational models for saliency prediction have been introduced. For dynamic scenes, the existing models typically combine different feature maps extracted from spatial and temporal domains either by following generic integration strategies such as averaging or winners take all or using machine learning techniques to set each feature’s importance. Rather than resorting to these fixed feature integration schemes, in this paper, we propose a novel weakly supervised dynamic saliency model called HedgeSal, which is based on a decision-theoretic online learning scheme. Our framework uses two pretrained deep static saliency models as experts to extract individual saliency maps from appearance and motion streams, and then generates the final saliency map by weighted decisions of all these models. As visual characteristics of dynamic scenes constantly vary, the models providing consistently good predictions in the past are automatically assigned higher weights, allowing each expert to adjust itself to the current conditions. We demonstrate the effectiveness of our model on the CRCNS, UCFSports and CITIUS datasets."\n}\n\n
\n
\n\n\n
\n In recent years, many computational models for saliency prediction have been introduced. For dynamic scenes, the existing models typically combine different feature maps extracted from spatial and temporal domains either by following generic integration strategies such as averaging or winners take all or using machine learning techniques to set each feature’s importance. Rather than resorting to these fixed feature integration schemes, in this paper, we propose a novel weakly supervised dynamic saliency model called HedgeSal, which is based on a decision-theoretic online learning scheme. Our framework uses two pretrained deep static saliency models as experts to extract individual saliency maps from appearance and motion streams, and then generates the final saliency map by weighted decisions of all these models. As visual characteristics of dynamic scenes constantly vary, the models providing consistently good predictions in the past are automatically assigned higher weights, allowing each expert to adjust itself to the current conditions. We demonstrate the effectiveness of our model on the CRCNS, UCFSports and CITIUS datasets.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Visually Grounded Language Learning for Robot Navigation.\n \n \n \n \n\n\n \n Ünal, E.; Can, O. A.; and Yemez, Y.\n\n\n \n\n\n\n In 1st International Workshop on Multimodal Understanding and Learning for Embodied Applications, of MULEA '19, pages 27–32, New York, NY, USA, 2019. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"VisuallyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3347450.3357655,\n    author = {\\"{U}nal, Emre and Can, Ozan Arkan and Yemez, Y\\"{u}cel},\n    title = "Visually Grounded Language Learning for Robot Navigation",\n    year = "2019",\n    isbn = "9781450369183",\n    publisher = "Association for Computing Machinery",\n    address = "New York, NY, USA",\n    url = "https://doi.org/10.1145/3347450.3357655",\n    doi = "10.1145/3347450.3357655",\n    abstract = "We present an end-to-end deep learning model for robot navigation from raw visual pixel input and natural text instructions. The proposed model is an LSTM-based sequence-to-sequence neural network architecture with attention, which is trained on instruction-perception data samples collected in a synthetic environment. We conduct experiments on the SAIL dataset which we reconstruct in 3D so as to generate the 2D images associated with the data. Our experiments show that the performance of our model is on a par with state-of-the-art, despite the fact that it learns navigational language with end-to-end training from raw visual data.",\n    booktitle = "1st International Workshop on Multimodal Understanding and Learning for Embodied Applications",\n    pages = "27–32",\n    keywords = "CV,instruction following,natural language processing,robot navigation,visual grounding",\n    series = "MULEA '19"\n}\n\n
\n
\n\n\n
\n We present an end-to-end deep learning model for robot navigation from raw visual pixel input and natural text instructions. The proposed model is an LSTM-based sequence-to-sequence neural network architecture with attention, which is trained on instruction-perception data samples collected in a synthetic environment. We conduct experiments on the SAIL dataset which we reconstruct in 3D so as to generate the 2D images associated with the data. Our experiments show that the performance of our model is on a par with state-of-the-art, despite the fact that it learns navigational language with end-to-end training from raw visual data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Batch Recurrent Q-Learning for Backchannel Generation Towards Engaging Agents.\n \n \n \n\n\n \n Hussain, N.; Erzin, E.; Sezgin, T. M.; and Yemez, Y.\n\n\n \n\n\n\n 2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII),1-7. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Hussain2019BatchRQ,\n    author = "Hussain, N. and Erzin, E. and Sezgin, T. M. and Yemez, Y.",\n    title = "Batch Recurrent Q-Learning for Backchannel Generation Towards Engaging Agents",\n    journal = "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",\n    year = "2019",\n    pages = "1-7",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Learning to Follow Verbal Instructions with Visual Grounding.\n \n \n \n\n\n \n Ünal, E.; Can, O. A.; and Yemez, Y.\n\n\n \n\n\n\n 2019 27th Signal Processing and Communications Applications Conference (SIU),1-4. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{nal2019LearningTF,\n    author = {{\\"U}nal, Emre and Can, Ozan Arkan and Yemez, Y.},\n    title = "Learning to Follow Verbal Instructions with Visual Grounding",\n    journal = "2019 27th Signal Processing and Communications Applications Conference (SIU)",\n    year = "2019",\n    pages = "1-4",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Speech Driven Backchannel Generation using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction.\n \n \n \n\n\n \n Hussain, N.; Erzin, E.; Sezgin, T. M.; and Yemez, Y.\n\n\n \n\n\n\n ArXiv, abs/1908.01618. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Hussain2019SpeechDB,\n    author = "Hussain, N. and Erzin, E. and Sezgin, T. M. and Yemez, Y.",\n    title = "Speech Driven Backchannel Generation using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction",\n    journal = "ArXiv",\n    year = "2019",\n    volume = "abs/1908.01618",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Diffusion-based isometric depth correspondence.\n \n \n \n\n\n \n Küpçü, E.; and Yemez, Y.\n\n\n \n\n\n\n Comput. Vis. Image Underst., 189. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Kp2019DiffusionbasedID,\n    author = {K{\\"u}p{\\c{c}}{\\"u}, Emel and Yemez, Y.},\n    title = "Diffusion-based isometric depth correspondence",\n    journal = "Comput. Vis. Image Underst.",\n    year = "2019",\n    volume = "189",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n DeepDistance: A Multi-task Deep Regression Model for Cell Detection in Inverted Microscopy Images.\n \n \n \n\n\n \n Koyuncu, C. F.; Gunesli, G. N.; Cetin-Atalay, R.; and Gunduz-Demir, C.\n\n\n \n\n\n\n 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@misc{koyuncu2019deepdistance,\n    author = "Koyuncu, Can Fahrettin and Gunesli, Gozde Nur and Cetin-Atalay, Rengul and Gunduz-Demir, Cigdem",\n    title = "DeepDistance: A Multi-task Deep Regression Model for Cell Detection in Inverted Microscopy Images",\n    year = "2019",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Unsupervised Learning of Multi-Frame Optical Flow with Occlusions.\n \n \n \n\n\n \n Janai, J.; Güney, F.; Ranjan, A.; Black, M. J.; and Geiger, A.\n\n\n \n\n\n\n In ECCV, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Janai2018UnsupervisedLO,\n    author = {Janai, Joel and G{\\"u}ney, Fatma and Ranjan, A. and Black, Michael J. and Geiger, Andreas},\n    title = "Unsupervised Learning of Multi-Frame Optical Flow with Occlusions",\n    booktitle = "ECCV",\n    year = "2018",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On the Integration of Optical Flow and Action Recognition.\n \n \n \n\n\n \n Sevilla-Lara, L.; Liao, Y.; Güney, F.; Jampani, V.; Geiger, A.; and Black, M. J.\n\n\n \n\n\n\n In GCPR, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{SevillaLara2018OnTI,\n    author = {Sevilla-Lara, Laura and Liao, Yiyi and G{\\"u}ney, Fatma and Jampani, V. and Geiger, A. and Black, Michael J.},\n    title = "On the Integration of Optical Flow and Action Recognition",\n    booktitle = "GCPR",\n    year = "2018",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Semantic Segmentation of RGBD Videos with Recurrent Fully Convolutional Neural Networks.\n \n \n \n\n\n \n Yurdakul, E. E.; and Yemez, Y.\n\n\n \n\n\n\n 2017 IEEE International Conference on Computer Vision Workshops (ICCVW),367-374. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Yurdakul2017SemanticSO,\n    author = "Yurdakul, Ekrem Emre and Yemez, Y.",\n    title = "Semantic Segmentation of RGBD Videos with Recurrent Fully Convolutional Neural Networks",\n    journal = "2017 IEEE International Conference on Computer Vision Workshops (ICCVW)",\n    year = "2017",\n    pages = "367-374",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Reliable Isometric Point Correspondence from Depth.\n \n \n \n\n\n \n Küpçü, E.; and Yemez, Y.\n\n\n \n\n\n\n 2017 IEEE International Conference on Computer Vision Workshops (ICCVW),1266-1273. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Kp2017ReliableIP,\n    author = {K{\\"u}p{\\c{c}}{\\"u}, Emel and Yemez, Y.},\n    title = "Reliable Isometric Point Correspondence from Depth",\n    journal = "2017 IEEE International Conference on Computer Vision Workshops (ICCVW)",\n    year = "2017",\n    pages = "1266-1273",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n RGB-D Object Recognition Using Deep Convolutional Neural Networks.\n \n \n \n\n\n \n Zia, S.; Yüksel, B.; Yuret, D.; and Yemez, Y.\n\n\n \n\n\n\n 2017 IEEE International Conference on Computer Vision Workshops (ICCVW),887-894. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{Zia2017RGBDOR,\n    author = {Zia, Saman and Y{\\"u}ksel, B. and Yuret, Deniz and Yemez, Y.},\n    title = "RGB-D Object Recognition Using Deep Convolutional Neural Networks",\n    journal = "2017 IEEE International Conference on Computer Vision Workshops (ICCVW)",\n    year = "2017",\n    pages = "887-894",\n    keywords = "CV,NLP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n The JESTKOD database: an affective multimodal database of dyadic interactions.\n \n \n \n\n\n \n Bozkurt, E.; Khaki, H.; Keçeci, S.; Turker, B. B.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n Language Resources and Evaluation, 51: 857-872. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Bozkurt2017TheJD,\n    author = "Bozkurt, E. and Khaki, H. and Ke{\\c{c}}eci, Sinan and Turker, B. B. and Yemez, Y. and Erzin, E.",\n    title = "The JESTKOD database: an affective multimodal database of dyadic interactions",\n    journal = "Language Resources and Evaluation",\n    year = "2017",\n    volume = "51",\n    pages = "857-872",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Slow Flow: Exploiting High-Speed Cameras for Accurate and Diverse Optical Flow Reference Data.\n \n \n \n\n\n \n Janai, J.; Güney, F.; Wulff, J.; Black, M. J.; and Geiger, A.\n\n\n \n\n\n\n 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),1406-1416. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Janai2017SlowFE,\n    author = {Janai, Joel and G{\\"u}ney, Fatma and Wulff, J. and Black, Michael J. and Geiger, Andreas},\n    title = "Slow Flow: Exploiting High-Speed Cameras for Accurate and Diverse Optical Flow Reference Data",\n    journal = "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",\n    year = "2017",\n    pages = "1406-1416",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Audio-Facial Laughter Detection in Naturalistic Dyadic Conversations.\n \n \n \n\n\n \n Turker, B. B.; Yemez, Y.; Sezgin, T. M.; and Erzin, E.\n\n\n \n\n\n\n IEEE Transactions on Affective Computing, 8: 534-545. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Turker2017AudioFacialLD,\n    author = "Turker, B. B. and Yemez, Y. and Sezgin, T. M. and Erzin, E.",\n    title = "Audio-Facial Laughter Detection in Naturalistic Dyadic Conversations",\n    journal = "IEEE Transactions on Affective Computing",\n    year = "2017",\n    volume = "8",\n    pages = "534-545",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Analysis of Engagement and User Experience with a Laughter Responsive Social Robot.\n \n \n \n\n\n \n Türker, B. B.; Buçinca, Z.; Erzin, E.; Yemez, Y.; and Sezgin, T. M.\n\n\n \n\n\n\n In INTERSPEECH, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Trker2017AnalysisOE,\n    author = {T{\\"u}rker, Bekir Berker and Bu{\\c{c}}inca, Zana and Erzin, E. and Yemez, Y. and Sezgin, T. M.},\n    title = "Analysis of Engagement and User Experience with a Laughter Responsive Social Robot",\n    booktitle = "INTERSPEECH",\n    year = "2017",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Multimodal analysis of speech and arm motion for prosody-driven synthesis of beat gestures.\n \n \n \n\n\n \n Bozkurt, E.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n Speech Commun., 85: 29-42. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Bozkurt2016MultimodalAO,\n    author = "Bozkurt, E. and Yemez, Y. and Erzin, E.",\n    title = "Multimodal analysis of speech and arm motion for prosody-driven synthesis of beat gestures",\n    journal = "Speech Commun.",\n    year = "2016",\n    volume = "85",\n    pages = "29-42",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Deep Discrete Flow.\n \n \n \n\n\n \n Güney, F.; and Geiger, A.\n\n\n \n\n\n\n In ACCV, 2016. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Gney2016DeepDF,\n    author = {G{\\"u}ney, Fatma and Geiger, Andreas},\n    title = "Deep Discrete Flow",\n    booktitle = "ACCV",\n    year = "2016",\n    keywords = "CV"\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);