\n \n \n
\n
\n\n \n \n \n \n \n Effect of Remote Masking on Tactile Perception of Electrovibration.\n \n \n \n\n\n \n Jamalzadeh, M.; Basdogan, C.; and Güçlü, B.\n\n\n \n\n\n\n
IEEE Transactions on Haptics, 14(1): 132-142. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{9204403,\n author = {Jamalzadeh, Milad and Basdogan, Cagatay and G{\\"u}{\\c{c}}l{\\"u}, Burak},\n journal = {IEEE Transactions on Haptics},\n title = {Effect of Remote Masking on Tactile Perception of Electrovibration},\n year = {2021},\n volume = {14},\n number = {1},\n pages = {132-142},\n doi = {10.1109/TOH.2020.3025772}}
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Editorial: Introduction to the Issue on Deep Learning for Image/Video Restoration and Compression.\n \n \n \n\n\n \n Tekalp, A. M.; Covell, M.; Timofte, R.; and Dong, C.\n\n\n \n\n\n\n 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@misc{tekalp2021editorial,\n author = {A. Murat Tekalp and Michele Covell and Radu Timofte and Chao Dong},\n title = {Editorial: Introduction to the Issue on Deep Learning for Image/Video Restoration and Compression},\n year = {2021},\n keywords = {SAI},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Edge Intelligence for Empowering IoT-based Healthcare Systems.\n \n \n \n\n\n \n Hayyolalam, V.; Aloqaily, M.; Ozkasap, O.; and Guizani, M.\n\n\n \n\n\n\n 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@misc{hayyolalam2021edge,\n title = {Edge Intelligence for Empowering IoT-based Healthcare Systems},\n author = {Vahideh Hayyolalam and Moayad Aloqaily and Oznur Ozkasap and Mohsen Guizani},\n year = {2021},\n keywords = {SAI},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n The structural basis of Akt PH domain interaction with calmodulin.\n \n \n \n \n\n\n \n Weako, J.; Jang, H.; Keskin, O.; Nussinov, R.; and Gursoy, A.\n\n\n \n\n\n\n
Biophysical Journal. 2021.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@article{WEAKO2021,\n title = {The structural basis of Akt PH domain interaction with calmodulin},\n journal = {Biophysical Journal},\n year = {2021},\n issn = {0006-3495},\n doi = {https://doi.org/10.1016/j.bpj.2021.03.018},\n url = {https://www.sciencedirect.com/science/article/pii/S0006349521002484},\n author = {Jackson Weako and Hyunbum Jang and Ozlem Keskin and Ruth Nussinov and Attila Gursoy},\n abstract = {Akt plays a key role in the Ras/PI3K/Akt/mTOR signaling pathway. In breast cancer, Akt translocation to the plasma membrane is enabled by the interaction of its pleckstrin homology domain (PHD) with calmodulin (CaM). At the membrane, the conformational change promoted by PIP3 releases CaM and facilitates Thr308 and Ser473 phosphorylation and activation. Here, using modeling and molecular dynamics simulations, we aim to figure out how CaM interacts with Akt’s PHD at the atomic level. Our simulations show that CaM-PHD interaction is thermodynamically stable and involves a β-strand rather than an α-helix, in agreement with NMR data, and that electrostatic and hydrophobic interactions are critical. The PHD interacts with CaM lobes; however, multiple modes are possible. IP4, the polar head of PIP3, weakens the CaM-PHD interaction, implicating the release mechanism at the plasma membrane. Recently, we unraveled the mechanism of PI3Kα activation at the atomistic level and the structural basis for Ras role in the activation. Here, our atomistic structural data clarify the mechanism of how CaM interacts, delivers, and releases Akt—the next node in the Ras/PI3K pathway—at the plasma membrane.},\n keywords = {CBM},\n}\n\n
\n
\n\n\n
\n Akt plays a key role in the Ras/PI3K/Akt/mTOR signaling pathway. In breast cancer, Akt translocation to the plasma membrane is enabled by the interaction of its pleckstrin homology domain (PHD) with calmodulin (CaM). At the membrane, the conformational change promoted by PIP3 releases CaM and facilitates Thr308 and Ser473 phosphorylation and activation. Here, using modeling and molecular dynamics simulations, we aim to figure out how CaM interacts with Akt’s PHD at the atomic level. Our simulations show that CaM-PHD interaction is thermodynamically stable and involves a β-strand rather than an α-helix, in agreement with NMR data, and that electrostatic and hydrophobic interactions are critical. The PHD interacts with CaM lobes; however, multiple modes are possible. IP4, the polar head of PIP3, weakens the CaM-PHD interaction, implicating the release mechanism at the plasma membrane. Recently, we unraveled the mechanism of PI3Kα activation at the atomistic level and the structural basis for Ras role in the activation. Here, our atomistic structural data clarify the mechanism of how CaM interacts, delivers, and releases Akt—the next node in the Ras/PI3K pathway—at the plasma membrane.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Neuropsychiatric Symptoms of COVID-19 Explained by SARS-CoV-2 Proteins’ Mimicry of Human Protein Interactions.\n \n \n \n \n\n\n \n Yapici-Eser, H.; Koroglu, Y. E.; Oztop-Cakmak, O.; Keskin, O.; Gursoy, A.; and Gursoy-Ozdemir, Y.\n\n\n \n\n\n\n
Frontiers in Human Neuroscience, 15: 126. 2021.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@article{10.3389/fnhum.2021.656313,\n author = {Yapici-Eser, Hale and Koroglu, Yunus Emre and Oztop-Cakmak, Ozgur and Keskin, Ozlem and Gursoy, Attila and Gursoy-Ozdemir, Yasemin},\n title = {Neuropsychiatric Symptoms of COVID-19 Explained by SARS-CoV-2 Proteins’ Mimicry of Human Protein Interactions},\n journal = {Frontiers in Human Neuroscience},\n volume = {15},\n pages = {126},\n year = {2021},\n url = {https://www.frontiersin.org/article/10.3389/fnhum.2021.656313},\n doi = {10.3389/fnhum.2021.656313},\n issn = {1662-5161},\n abstract = {The first clinical symptoms focused on the presentation of coronavirus disease 2019 (COVID-19) have been respiratory failure, however, accumulating evidence also points to its presentation with neuropsychiatric symptoms, the exact mechanisms of which are not well known. By using a computational methodology, we aimed to explain the molecular paths of COVID-19 associated neuropsychiatric symptoms, based on the mimicry of the human protein interactions with SARS-CoV-2 proteins.Methods: Available 11 of the 29 SARS-CoV-2 proteins’ structures have been extracted from Protein Data Bank. HMI-PRED (Host-Microbe Interaction PREDiction), a recently developed web server for structural PREDiction of protein-protein interactions (PPIs) between host and any microbial species, was used to find the “interface mimicry” through which the microbial proteins hijack host binding surfaces. Classification of the found interactions was conducted using the PANTHER Classification System.Results: Predicted Human-SARS-CoV-2 protein interactions have been extensively compared with the literature. Based on the analysis of the molecular functions, cellular localizations and pathways related to human proteins, SARS-CoV-2 proteins are found to possibly interact with human proteins linked to synaptic vesicle trafficking, endocytosis, axonal transport, neurotransmission, growth factors, mitochondrial and blood-brain barrier elements, in addition to its peripheral interactions with proteins linked to thrombosis, inflammation and metabolic control.Conclusion: SARS-CoV-2-human protein interactions may lead to the development of delirium, psychosis, seizures, encephalitis, stroke, sensory impairments, peripheral nerve diseases, and autoimmune disorders. Our findings are also supported by the previous in vivo and in vitro studies from other viruses. Further in vivo and in vitro studies using the proteins that are pointed here, could pave new targets both for avoiding and reversing neuropsychiatric presentations.},\n keywords = {CBM},\n}\n\n
\n
\n\n\n
\n The first clinical symptoms focused on the presentation of coronavirus disease 2019 (COVID-19) have been respiratory failure, however, accumulating evidence also points to its presentation with neuropsychiatric symptoms, the exact mechanisms of which are not well known. By using a computational methodology, we aimed to explain the molecular paths of COVID-19 associated neuropsychiatric symptoms, based on the mimicry of the human protein interactions with SARS-CoV-2 proteins.Methods: Available 11 of the 29 SARS-CoV-2 proteins’ structures have been extracted from Protein Data Bank. HMI-PRED (Host-Microbe Interaction PREDiction), a recently developed web server for structural PREDiction of protein-protein interactions (PPIs) between host and any microbial species, was used to find the “interface mimicry” through which the microbial proteins hijack host binding surfaces. Classification of the found interactions was conducted using the PANTHER Classification System.Results: Predicted Human-SARS-CoV-2 protein interactions have been extensively compared with the literature. Based on the analysis of the molecular functions, cellular localizations and pathways related to human proteins, SARS-CoV-2 proteins are found to possibly interact with human proteins linked to synaptic vesicle trafficking, endocytosis, axonal transport, neurotransmission, growth factors, mitochondrial and blood-brain barrier elements, in addition to its peripheral interactions with proteins linked to thrombosis, inflammation and metabolic control.Conclusion: SARS-CoV-2-human protein interactions may lead to the development of delirium, psychosis, seizures, encephalitis, stroke, sensory impairments, peripheral nerve diseases, and autoimmune disorders. Our findings are also supported by the previous in vivo and in vitro studies from other viruses. Further in vivo and in vitro studies using the proteins that are pointed here, could pave new targets both for avoiding and reversing neuropsychiatric presentations.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Computer Vision for Autonomous Vehicles: Problems, Datasets and State of the Art.\n \n \n \n\n\n \n Janai, J.; Güney, F.; Behl, A.; and Geiger, A.\n\n\n \n\n\n\n 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@misc{janai2021computer,\n title = {Computer Vision for Autonomous Vehicles: Problems, Datasets and State of the Art},\n author = {Joel Janai and Fatma G{\\"u}ney and Aseem Behl and Andreas Geiger},\n year = {2021},\n keywords = {CV},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Improving phoneme recognition of throat microphone speech recordings using transfer learning.\n \n \n \n \n\n\n \n Turan, M. T.; and Erzin, E.\n\n\n \n\n\n\n
Speech Communication, 129: 25-32. 2021.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{TURAN202125,\n title = {Improving phoneme recognition of throat microphone speech recordings using transfer learning},\n journal = {Speech Communication},\n volume = {129},\n pages = {25-32},\n year = {2021},\n issn = {0167-6393},\n doi = {https://doi.org/10.1016/j.specom.2021.02.004},\n url = {https://www.sciencedirect.com/science/article/pii/S0167639321000200},\n author = {M.A. Tu{\\u{g}}tekin Turan and Engin Erzin},\n keywords = {Phoneme recognition,Feature augmentation,Transfer learning,Throat microphone,Denoising auto-encoder,MSP},\n abstract = {Throat microphones (TM) are a type of skin-attached non-acoustic sensors, which are robust to environmental noise but carry a lower signal bandwidth characterization than the traditional close-talk microphones (CM). Attaining high-performance phoneme recognition is a challenging task when the training data from a degrading channel, such as TM, is limited. In this paper, we address this challenge for the TM speech recordings using a transfer learning approach based on the stacked denoising auto-encoders (SDA). The proposed transfer learning approach defines an SDA-based domain adaptation framework to map the source domain CM representations and the target domain TM representations into a common latent space, where the mismatch across TM and CM is eliminated to better train an acoustic model and to improve the TM phoneme recognition. For the phoneme recognition task, we use the convolutional neural network (CNN) and the hidden Markov model (HMM) based CNN/HMM hybrid system, which delivers better acoustic modeling performance compared to the conventional Gaussian mixture model (GMM) based models. In the experimental evaluations, we observed more than 12% relative phoneme error rate (PER) improvement for the TM recordings with the proposed transfer learning approach compared to baseline performances.},\n publisher = {Elsevier B.V. },\n}\n\n
\n
\n\n\n
\n Throat microphones (TM) are a type of skin-attached non-acoustic sensors, which are robust to environmental noise but carry a lower signal bandwidth characterization than the traditional close-talk microphones (CM). Attaining high-performance phoneme recognition is a challenging task when the training data from a degrading channel, such as TM, is limited. In this paper, we address this challenge for the TM speech recordings using a transfer learning approach based on the stacked denoising auto-encoders (SDA). The proposed transfer learning approach defines an SDA-based domain adaptation framework to map the source domain CM representations and the target domain TM representations into a common latent space, where the mismatch across TM and CM is eliminated to better train an acoustic model and to improve the TM phoneme recognition. For the phoneme recognition task, we use the convolutional neural network (CNN) and the hidden Markov model (HMM) based CNN/HMM hybrid system, which delivers better acoustic modeling performance compared to the conventional Gaussian mixture model (GMM) based models. In the experimental evaluations, we observed more than 12% relative phoneme error rate (PER) improvement for the TM recordings with the proposed transfer learning approach compared to baseline performances.\n
\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Generating visual story graphs with application to photo album summarization.\n \n \n \n \n\n\n \n Celikkale, B.; Erdogan, G.; Erdem, A.; and Erdem, E.\n\n\n \n\n\n\n
Signal Processing: Image Communication, 90: 116033. 2021.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{CELIKKALE2021116033,\n title = {Generating visual story graphs with application to photo album summarization},\n journal = {Signal Processing: Image Communication},\n volume = {90},\n pages = {116033},\n year = {2021},\n issn = {0923-5965},\n doi = {https://doi.org/10.1016/j.image.2020.116033},\n url = {https://www.sciencedirect.com/science/article/pii/S092359652030182X},\n author = {Bora Celikkale and Goksu Erdogan and Aykut Erdem and Erkut Erdem},\n keywords = {Visual story graph,Structured summarization,CV},\n abstract = {Making sense of ever-growing amount of visual data available on the web is difficult, especially when considered in an unsupervised manner. As a step towards this goal, this study tackles a relatively less explored topic of generating structured summaries of large photo collections. Our framework relies on the notion of a story graph which captures the main narratives in the data and their relationships based on their visual, textual and spatio-temporal features. Its output is a directed graph with a set of possibly intersecting paths. Our proposed approach identifies coherent visual storylines and exploits sub-modularity to select a subset of these lines which covers the general narrative at most. Our experimental analysis reveals that extracted story graphs allow for obtaining better results when utilized as priors for photo album summarization. Moreover, our user studies show that our approach delivers better performance on next image prediction and coverage tasks than the state-of-the-art.},\n}\n\n
\n
\n\n\n
\n Making sense of ever-growing amount of visual data available on the web is difficult, especially when considered in an unsupervised manner. As a step towards this goal, this study tackles a relatively less explored topic of generating structured summaries of large photo collections. Our framework relies on the notion of a story graph which captures the main narratives in the data and their relationships based on their visual, textual and spatio-temporal features. Its output is a directed graph with a set of possibly intersecting paths. Our proposed approach identifies coherent visual storylines and exploits sub-modularity to select a subset of these lines which covers the general narrative at most. Our experimental analysis reveals that extracted story graphs allow for obtaining better results when utilized as priors for photo album summarization. Moreover, our user studies show that our approach delivers better performance on next image prediction and coverage tasks than the state-of-the-art.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Domain Adaptation for Food Intake Classification with Teacher/Student Learning.\n \n \n \n \n\n\n \n Turan, M. A. T.; and Erzin, E.\n\n\n \n\n\n\n
IEEE Transactions on Multimedia, 23: 4220 - 4231. 2021.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@article{turan2020domain,\n title = {Domain Adaptation for Food Intake Classification with Teacher/Student Learning},\n author = {Turan, Mehmet Ali Tugtekin and Erzin, Engin},\n journal = {IEEE Transactions on Multimedia},\n publisher = {IEEE},\n keywords = {MSP},\n url = {https://ieeexplore.ieee.org/document/9261115},\n volume = {23},\n pages = {4220 - 4231},\n doi = {10.1109/TMM.2020.3038315},\n abstract = {Automatic dietary monitoring (ADM) stands as a challenging application in wearable healthcare technologies. In this paper, we define an ADM to perform food intake classification (FIC) over throat microphone recordings. We investigate the use of transfer learning to design an improved FIC system. Although labeled data with acoustic close-talk microphones are abundant, throat data is scarce. Therefore, we propose a new adaptation framework based on teacher/student learning. The teacher network is trained over high-quality acoustic microphone recordings, whereas the student network distills deep feature extraction capacity of the teacher over a parallel dataset. Our approach allows us to transfer the representational capacity, adds robustness to the resulting model, and improves the FIC through throat microphone recordings. The classification problem is formulated as a spectra-temporal sequence recognition using the Convolutional LSTM (ConvLSTM) models. We evaluate the proposed approach using a large scale acoustic dataset collected from online recordings, an in-house food intake throat microphone dataset, and a parallel speech dataset. The bidirectional ConvLSTM network with the proposed domain adaptation approach consistently outperforms the SVM- and CNN-based baseline methods and attains 85.2% accuracy for the classification of 10 different food intake items. This translates to 17.8% accuracy improvement with the proposed domain adaptation.},\n year = {2021},\n}\n\n
\n
\n\n\n
\n Automatic dietary monitoring (ADM) stands as a challenging application in wearable healthcare technologies. In this paper, we define an ADM to perform food intake classification (FIC) over throat microphone recordings. We investigate the use of transfer learning to design an improved FIC system. Although labeled data with acoustic close-talk microphones are abundant, throat data is scarce. Therefore, we propose a new adaptation framework based on teacher/student learning. The teacher network is trained over high-quality acoustic microphone recordings, whereas the student network distills deep feature extraction capacity of the teacher over a parallel dataset. Our approach allows us to transfer the representational capacity, adds robustness to the resulting model, and improves the FIC through throat microphone recordings. The classification problem is formulated as a spectra-temporal sequence recognition using the Convolutional LSTM (ConvLSTM) models. We evaluate the proposed approach using a large scale acoustic dataset collected from online recordings, an in-house food intake throat microphone dataset, and a parallel speech dataset. The bidirectional ConvLSTM network with the proposed domain adaptation approach consistently outperforms the SVM- and CNN-based baseline methods and attains 85.2% accuracy for the classification of 10 different food intake items. This translates to 17.8% accuracy improvement with the proposed domain adaptation.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Use of affect context in dyadic interactions for continuous emotion recognition.\n \n \n \n \n\n\n \n Fatima, S. N.; and Erzin, E.\n\n\n \n\n\n\n
Speech Communication, 132: 70-82. 2021.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{FATIMA202170,\n title = {Use of affect context in dyadic interactions for continuous emotion recognition},\n journal = {Speech Communication},\n volume = {132},\n pages = {70-82},\n year = {2021},\n issn = {0167-6393},\n doi = {https://doi.org/10.1016/j.specom.2021.05.010},\n url = {https://www.sciencedirect.com/science/article/pii/S0167639321000601},\n author = {Syeda Narjis Fatima and Engin Erzin},\n keywords = {Dyadic interactions,Continuous emotion recognition (CER),Dyadic affect context (DAC),CNN,ConvLSTM},\n abstract = {Emotional dependencies play a crucial role in understanding complexities of dyadic interactions. Recent studies have shown that affect recognition tasks can benefit by the incorporation of a particular interaction’s context, however, the investigation of affect context in dyadic settings using neural network frameworks remains a complex and open problem. In this paper, we formulate the concept of dyadic affect context (DAC) and propose convolutional neural network (CNN) based architectures to model and incorporate DAC to improve continuous emotion recognition (CER) in dyadic scenarios. We begin by defining a CNN architecture for single-subject CER-based on speech and body motion data. We then introduce dyadic CER as a two-stage regression framework. Specifically, we propose two dyadic CNN architectures where cross-speaker affect contribution to the CER task is achieved by: (i) the fusion of cross-subject affect (FoA) or (ii) the fusion of cross-subject feature maps (FoM). Based on the preceding dyadic models, we finally propose a new Convolutional LSTM (ConvLSTM) model for the dyadic CER. ConvLSTM architecture captures local spectro-temporal correlations in speech and body motion as well as the long-term affect inter-dependencies between subjects. Our multimodal analysis demonstrates that modeling and incorporation of the DAC in the proposed CER models provide significant performance improvements on the USC CreativeIT database and the achieved results compare favorably to the state-of-the-art.},\n publisher = {Elsevier B.V.},\n}\n\n
\n
\n\n\n
\n Emotional dependencies play a crucial role in understanding complexities of dyadic interactions. Recent studies have shown that affect recognition tasks can benefit by the incorporation of a particular interaction’s context, however, the investigation of affect context in dyadic settings using neural network frameworks remains a complex and open problem. In this paper, we formulate the concept of dyadic affect context (DAC) and propose convolutional neural network (CNN) based architectures to model and incorporate DAC to improve continuous emotion recognition (CER) in dyadic scenarios. We begin by defining a CNN architecture for single-subject CER-based on speech and body motion data. We then introduce dyadic CER as a two-stage regression framework. Specifically, we propose two dyadic CNN architectures where cross-speaker affect contribution to the CER task is achieved by: (i) the fusion of cross-subject affect (FoA) or (ii) the fusion of cross-subject feature maps (FoM). Based on the preceding dyadic models, we finally propose a new Convolutional LSTM (ConvLSTM) model for the dyadic CER. ConvLSTM architecture captures local spectro-temporal correlations in speech and body motion as well as the long-term affect inter-dependencies between subjects. Our multimodal analysis demonstrates that modeling and incorporation of the DAC in the proposed CER models provide significant performance improvements on the USC CreativeIT database and the achieved results compare favorably to the state-of-the-art.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Engagement Rewarded Actor-Critic with Conservative Q-Learning for Speech-Driven Laughter Backchannel Generation.\n \n \n \n \n\n\n \n Bayramoğlu, Ö. Z.; Erzin, E.; Sezgin, T. M.; and Yemez, Y.\n\n\n \n\n\n\n In
Proceedings of the 2021 International Conference on Multimodal Interaction, pages 613–618, New York, NY, USA, 2021. Association for Computing Machinery\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3462244.3479944,\n author = {Bayramo\\u{g}lu, \\"{O}yk\\"{u} Zeynep and Erzin, Engin and Sezgin, Tevfik Metin and Yemez, Y\\"{u}cel},\n title = {Engagement Rewarded Actor-Critic with Conservative Q-Learning for Speech-Driven Laughter Backchannel Generation},\n year = {2021},\n isbn = {9781450384810},\n publisher = {Association for Computing Machinery},\n address = {New York, NY, USA},\n url = {https://doi.org/10.1145/3462244.3479944},\n doi = {10.1145/3462244.3479944},\n abstract = {We propose a speech-driven laughter backchannel generation model to reward engagement during human-agent interaction. We formulate the problem as a Markov decision process where speech signal represents the state and the objective is to maximize human engagement. Since online training is often impractical in the case of human-agent interaction, we utilize the existing human-to-human dyadic interaction datasets to train our agent for the backchannel generation task. We address the problem using an actor-critic method based on conservative Q-learning (CQL), that mitigates the distributional shift problem by suppressing Q-value over-estimation during training. The proposed CQL based approach is evaluated objectively on the IEMOCAP dataset for laughter generation task. When compared to the existing off-policy Q-learning methods, we observe an improved compliance with the dataset in terms of laugh generation rate. Furthermore, we show the effectiveness of the learned policy by estimating the expected engagement using off-policy policy evaluation techniques.},\n booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},\n pages = {613–618},\n keywords = {human-agent interaction,backchannels,offline reinforcement learning,user engagement},\n}\n\n
\n
\n\n\n
\n We propose a speech-driven laughter backchannel generation model to reward engagement during human-agent interaction. We formulate the problem as a Markov decision process where speech signal represents the state and the objective is to maximize human engagement. Since online training is often impractical in the case of human-agent interaction, we utilize the existing human-to-human dyadic interaction datasets to train our agent for the backchannel generation task. We address the problem using an actor-critic method based on conservative Q-learning (CQL), that mitigates the distributional shift problem by suppressing Q-value over-estimation during training. The proposed CQL based approach is evaluated objectively on the IEMOCAP dataset for laughter generation task. When compared to the existing off-policy Q-learning methods, we observe an improved compliance with the dataset in terms of laugh generation rate. Furthermore, we show the effectiveness of the learned policy by estimating the expected engagement using off-policy policy evaluation techniques.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Investigating Contributions of Speech and Facial Landmarks for Talking Head Generation.\n \n \n \n \n\n\n \n Kesim, E.; and Erzin, E.\n\n\n \n\n\n\n In
Proc. Interspeech 2021, pages 1624–1628, 2021. \n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{kesim21_interspeech,\n author = {Ege Kesim and Engin Erzin},\n title = {{Investigating Contributions of Speech and Facial Landmarks for Talking Head Generation}},\n year = {2021},\n booktitle = {Proc. Interspeech 2021},\n pages = {1624--1628},\n doi = {10.21437/Interspeech.2021-1585},\n url = {https://www.isca-speech.org/archive/interspeech_2021/kesim21_interspeech.html},\n abstract = {Talking head generation is an active research problem. It has been widely studied as a direct speech-to-video or two stage speech-to-landmarks-to-video mapping problem. In this study, our main motivation is to assess individual and joint contributions of the speech and facial landmarks to the talking head generation quality through a state-of-the-art generative adversarial network (GAN) architecture. Incorporating frame and sequence discriminators and a feature matching loss, we investigate performances of speech only, landmark only and joint speech and landmark driven talking head generation on the CREMA-D dataset. Objective evaluations using the peak signal-to-noise ratio (PSNR), structural similarity index (SSIM) and landmark distance (LMD) indicate that while landmarks bring PSNR and SSIM improvements to the speech driven system, speech brings LMD improvement to the landmark driven system. Furthermore, feature matching is observed to improve the speech driven talking head generation models significantly.},\n}\n\n
\n
\n\n\n
\n Talking head generation is an active research problem. It has been widely studied as a direct speech-to-video or two stage speech-to-landmarks-to-video mapping problem. In this study, our main motivation is to assess individual and joint contributions of the speech and facial landmarks to the talking head generation quality through a state-of-the-art generative adversarial network (GAN) architecture. Incorporating frame and sequence discriminators and a feature matching loss, we investigate performances of speech only, landmark only and joint speech and landmark driven talking head generation on the CREMA-D dataset. Objective evaluations using the peak signal-to-noise ratio (PSNR), structural similarity index (SSIM) and landmark distance (LMD) indicate that while landmarks bring PSNR and SSIM improvements to the speech driven system, speech brings LMD improvement to the landmark driven system. Furthermore, feature matching is observed to improve the speech driven talking head generation models significantly.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Reusetracker: Fast yet accurate multicore reuse distance analyzer.\n \n \n \n\n\n \n Sasongko, M. A.; Chabbi, M.; Marzijarani, M. B.; and Unat, D.\n\n\n \n\n\n\n
ACM Transactions on Architecture and Code Optimization (TACO), 19(1): 1–25. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{sasongko2021reusetracker,\n title = {Reusetracker: Fast yet accurate multicore reuse distance analyzer},\n author = {Sasongko, Muhammad Aditya and Chabbi, Milind and Marzijarani, Mandana Bagheri and Unat, Didem},\n journal = {ACM Transactions on Architecture and Code Optimization (TACO)},\n volume = {19},\n number = {1},\n pages = {1--25},\n year = {2021},\n publisher = {ACM New York, NY},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n End-to-end rate-distortion optimized learned hierarchical bi-directional video compression.\n \n \n \n\n\n \n Yılmaz, M A.; and Tekalp, A M.\n\n\n \n\n\n\n
IEEE Transactions on Image Processing, 31: 974–983. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{yilmaz2021end,\n title = {End-to-end rate-distortion optimized learned hierarchical bi-directional video compression},\n author = {Y{\\i}lmaz, M Ak{\\i}n and Tekalp, A Murat},\n journal = {IEEE Transactions on Image Processing},\n volume = {31},\n pages = {974--983},\n year = {2021},\n publisher = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n A Gated Fusion Network for Dynamic Saliency Prediction.\n \n \n \n\n\n \n Kocak, A.; Erdem, E.; and Erdem, A.\n\n\n \n\n\n\n
IEEE Transactions on Cognitive and Developmental Systems, 14(3): 995–1008. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{kocak2021gated,\n title = {A Gated Fusion Network for Dynamic Saliency Prediction},\n author = {Kocak, Aysun and Erdem, Erkut and Erdem, Aykut},\n journal = {IEEE Transactions on Cognitive and Developmental Systems},\n volume = {14},\n number = {3},\n pages = {995--1008},\n year = {2021},\n publisher = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Burst photography for learning to enhance extremely dark images.\n \n \n \n\n\n \n Karadeniz, A. S.; Erdem, E.; and Erdem, A.\n\n\n \n\n\n\n
IEEE Transactions on Image Processing, 30: 9372–9385. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{karadeniz2021burst,\n title = {Burst photography for learning to enhance extremely dark images},\n author = {Karadeniz, Ahmet Serdar and Erdem, Erkut and Erdem, Aykut},\n journal = {IEEE Transactions on Image Processing},\n volume = {30},\n pages = {9372--9385},\n year = {2021},\n publisher = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n From Noon to Sunset: Interactive Rendering, Relighting, and Recolouring of Landscape Photographs by Modifying Solar Position.\n \n \n \n\n\n \n Türe, M.; Çıklabakkal, M. E.; Erdem, A.; Erdem, E.; Satılmış, P.; and Akyüz, A. O.\n\n\n \n\n\n\n In
Computer Graphics Forum, volume 40, pages 500–515, 2021. Wiley Online Library\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{ture2021noon,\n title = {From Noon to Sunset: Interactive Rendering, Relighting, and Recolouring of Landscape Photographs by Modifying Solar Position},\n author = {T{\\"u}re, Murat and {\\c{C}}{\\i}klabakkal, Mustafa Ege and Erdem, Aykut and Erdem, Erkut and Sat{\\i}lm{\\i}{\\c{s}}, Pinar and Aky{\\"u}z, Ahmet Oguz},\n booktitle = {Computer Graphics Forum},\n volume = {40},\n number = {6},\n pages = {500--515},\n year = {2021},\n organization = {Wiley Online Library},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n MSVD-Turkish: a comprehensive multimodal video dataset for integrated vision and language research in Turkish.\n \n \n \n\n\n \n Citamak, B.; Caglayan, O.; Kuyu, M.; Erdem, E.; Erdem, A.; Madhyastha, P.; and Specia, L.\n\n\n \n\n\n\n
Machine Translation, 35: 265–288. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{citamak2021msvd,\n title = {MSVD-Turkish: a comprehensive multimodal video dataset for integrated vision and language research in Turkish},\n author = {Citamak, Begum and Caglayan, Ozan and Kuyu, Menekse and Erdem, Erkut and Erdem, Aykut and Madhyastha, Pranava and Specia, Lucia},\n journal = {Machine Translation},\n volume = {35},\n pages = {265--288},\n year = {2021},\n publisher = {Springer},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Synthetic18K: Learning better representations for person re-ID and attribute recognition from 1.4 million synthetic images.\n \n \n \n\n\n \n Uner, O. C.; Aslan, C.; Ercan, B.; Ates, T.; Celikcan, U.; Erdem, A.; and Erdem, E.\n\n\n \n\n\n\n
Signal Processing: Image Communication, 97: 116335. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{uner2021synthetic18k,\n title = {Synthetic18K: Learning better representations for person re-ID and attribute recognition from 1.4 million synthetic images},\n author = {Uner, Onur Can and Aslan, Cem and Ercan, Burak and Ates, Tayfun and Celikcan, Ufuk and Erdem, Aykut and Erdem, Erkut},\n journal = {Signal Processing: Image Communication},\n volume = {97},\n pages = {116335},\n year = {2021},\n publisher = {Elsevier},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n NOVA: Rendering virtual worlds with humans for computer vision tasks.\n \n \n \n\n\n \n Kerim, A.; Aslan, C.; Celikcan, U.; Erdem, E.; and Erdem, A.\n\n\n \n\n\n\n In
Computer Graphics Forum, volume 40, pages 258–272, 2021. Wiley Online Library\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{kerim2021nova,\n title = {NOVA: Rendering virtual worlds with humans for computer vision tasks},\n author = {Kerim, Abdulrahman and Aslan, Cem and Celikcan, Ufuk and Erdem, Erkut and Erdem, Aykut},\n booktitle = {Computer Graphics Forum},\n volume = {40},\n number = {6},\n pages = {258--272},\n year = {2021},\n organization = {Wiley Online Library},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Using synthetic data for person tracking under adverse weather conditions.\n \n \n \n\n\n \n Kerim, A.; Celikcan, U.; Erdem, E.; and Erdem, A.\n\n\n \n\n\n\n
Image and Vision Computing, 111: 104187. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{kerim2021using,\n title = {Using synthetic data for person tracking under adverse weather conditions},\n author = {Kerim, Abdulrahman and Celikcan, Ufuk and Erdem, Erkut and Erdem, Aykut},\n journal = {Image and Vision Computing},\n volume = {111},\n pages = {104187},\n year = {2021},\n publisher = {Elsevier},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Leveraging auxiliary image descriptions for dense video captioning.\n \n \n \n\n\n \n Boran, E.; Erdem, A.; Ikizler-Cinbis, N.; Erdem, E.; Madhyastha, P.; and Specia, L.\n\n\n \n\n\n\n
Pattern Recognition Letters, 146: 70–76. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{boran2021leveraging,\n title = {Leveraging auxiliary image descriptions for dense video captioning},\n author = {Boran, Emre and Erdem, Aykut and Ikizler-Cinbis, Nazli and Erdem, Erkut and Madhyastha, Pranava and Specia, Lucia},\n journal = {Pattern Recognition Letters},\n volume = {146},\n pages = {70--76},\n year = {2021},\n publisher = {Elsevier},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Adaptive human force scaling via admittance control for physical human-robot interaction.\n \n \n \n\n\n \n Hamad, Y. M; Aydin, Y.; and Basdogan, C.\n\n\n \n\n\n\n
IEEE Transactions on Haptics, 14(4): 750–761. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{hamad2021adaptive,\n title = {Adaptive human force scaling via admittance control for physical human-robot interaction},\n author = {Hamad, Yahya M and Aydin, Yusuf and Basdogan, Cagatay},\n journal = {IEEE Transactions on Haptics},\n volume = {14},\n number = {4},\n pages = {750--761},\n year = {2021},\n publisher = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Towards collaborative drilling with a cobot using admittance controller.\n \n \n \n\n\n \n Aydin, Y.; Sirintuna, D.; and Basdogan, C.\n\n\n \n\n\n\n
Transactions of the Institute of Measurement and Control, 43(8): 1760–1773. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{aydin2021towards,\n title = {Towards collaborative drilling with a cobot using admittance controller},\n author = {Aydin, Yusuf and Sirintuna, Doganay and Basdogan, Cagatay},\n journal = {Transactions of the Institute of Measurement and Control},\n volume = {43},\n number = {8},\n pages = {1760--1773},\n year = {2021},\n publisher = {SAGE Publications Sage UK: London, England},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Use of affect context in dyadic interactions for continuous emotion recognition.\n \n \n \n\n\n \n Fatima, S. N.; and Erzin, E.\n\n\n \n\n\n\n
Speech Communication, 132: 70–82. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{fatima2021use,\n title = {Use of affect context in dyadic interactions for continuous emotion recognition},\n author = {Fatima, Syeda Narjis and Erzin, Engin},\n journal = {Speech Communication},\n volume = {132},\n pages = {70--82},\n year = {2021},\n publisher = {Elsevier},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Polytopic matrix factorization: Determinant maximization based criterion and identifiability.\n \n \n \n\n\n \n Tatli, G.; and Erdogan, A. T\n\n\n \n\n\n\n
IEEE Transactions on Signal Processing, 69: 5431–5447. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{tatli2021polytopic,\n title = {Polytopic matrix factorization: Determinant maximization based criterion and identifiability},\n author = {Tatli, Gokcan and Erdogan, Alper T},\n journal = {IEEE Transactions on Signal Processing},\n volume = {69},\n pages = {5431--5447},\n year = {2021},\n publisher = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Introduction to the Issue on Deep Learning for Image/Video Restoration and Compression.\n \n \n \n\n\n \n Tekalp, A M.; Covell, M.; Timofte, R.; and Dong, C.\n\n\n \n\n\n\n
IEEE Journal of Selected Topics in Signal Processing, 15(2): 157–161. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{tekalp2021introduction,\n title = {Introduction to the Issue on Deep Learning for Image/Video Restoration and Compression},\n author = {Tekalp, A Murat and Covell, Michele and Timofte, Radu and Dong, Chao},\n journal = {IEEE Journal of Selected Topics in Signal Processing},\n volume = {15},\n number = {2},\n pages = {157--161},\n year = {2021},\n publisher = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n The Universal Fog Proxy: A Third-party Authentication Solution for Federated Fog Systems with Multiple Protocols.\n \n \n \n\n\n \n Ali, A.; Şahin, A. U.; Özkasap, Ö.; and Lin, Y.\n\n\n \n\n\n\n
IEEE Network, 35(6): 285–291. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{ali2021universal,\n title = {The Universal Fog Proxy: A Third-party Authentication Solution for Federated Fog Systems with Multiple Protocols},\n author = {Ali, Asad and {\\c{S}}ahin, Ali Utkan and {\\"O}zkasap, {\\"O}znur and Lin, Ying-Dar},\n journal = {IEEE Network},\n volume = {35},\n number = {6},\n pages = {285--291},\n year = {2021},\n publisher = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Interlaced: Fully decentralized churn stabilization for skip graph-based dhts.\n \n \n \n\n\n \n Hassanzadeh-Nazarabadi, Y.; Küpçü, A.; and Özkasap, Ö.\n\n\n \n\n\n\n
Journal of Parallel and Distributed Computing, 149: 13–28. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{hassanzadeh2021interlaced,\n title = {Interlaced: Fully decentralized churn stabilization for skip graph-based dhts},\n author = {Hassanzadeh-Nazarabadi, Yahya and K{\\"u}p{\\c{c}}{\\"u}, Alptekin and {\\"O}zkasap, {\\"O}znur},\n journal = {Journal of Parallel and Distributed Computing},\n volume = {149},\n pages = {13--28},\n year = {2021},\n publisher = {Elsevier},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Lightchain: Scalable dht-based blockchain.\n \n \n \n\n\n \n Hassanzadeh-Nazarabadi, Y.; Küpçü, A.; and Özkasap, Ö.\n\n\n \n\n\n\n
IEEE Transactions on Parallel and Distributed Systems, 32(10): 2582–2593. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{hassanzadeh2021lightchain,\n title = {Lightchain: Scalable dht-based blockchain},\n author = {Hassanzadeh-Nazarabadi, Yahya and K{\\"u}p{\\c{c}}{\\"u}, Alptekin and {\\"O}zkasap, {\\"O}znur},\n journal = {IEEE Transactions on Parallel and Distributed Systems},\n volume = {32},\n number = {10},\n pages = {2582--2593},\n year = {2021},\n publisher = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n A computational-graph partitioning method for training memory-constrained DNNs.\n \n \n \n\n\n \n Qararyah, F.; Wahib, M.; Dikbayır, D.; Belviranli, M. E.; and Unat, D.\n\n\n \n\n\n\n
Parallel computing, 104: 102792. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{qararyah2021computational,\n title = {A computational-graph partitioning method for training memory-constrained DNNs},\n author = {Qararyah, Fareed and Wahib, Mohamed and Dikbay{\\i}r, Do{\\u{g}}a and Belviranli, Mehmet Esat and Unat, Didem},\n journal = {Parallel computing},\n volume = {104},\n pages = {102792},\n year = {2021},\n publisher = {Elsevier},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n A split execution model for SpTRSV.\n \n \n \n\n\n \n Ahmad, N.; Yilmaz, B.; and Unat, D.\n\n\n \n\n\n\n
IEEE Transactions on Parallel and Distributed Systems, 32(11): 2809–2822. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{ahmad2021split,\n title = {A split execution model for SpTRSV},\n author = {Ahmad, Najeeb and Yilmaz, Buse and Unat, Didem},\n journal = {IEEE Transactions on Parallel and Distributed Systems},\n volume = {32},\n number = {11},\n pages = {2809--2822},\n year = {2021},\n publisher = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Normal mode analysis of KRas4B reveals partner specific dynamics.\n \n \n \n\n\n \n Eren, M.; Tuncbag, N.; Jang, H.; Nussinov, R.; Gursoy, A.; and Keskin, O.\n\n\n \n\n\n\n
The Journal of Physical Chemistry B, 125(20): 5210–5221. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{eren2021normal,\n title = {Normal mode analysis of KRas4B reveals partner specific dynamics},\n author = {Eren, Meryem and Tuncbag, Nurcan and Jang, Hyunbum and Nussinov, Ruth and Gursoy, Attila and Keskin, Ozlem},\n journal = {The Journal of Physical Chemistry B},\n volume = {125},\n number = {20},\n pages = {5210--5221},\n year = {2021},\n publisher = {ACS Publications},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Mechanistic Differences of Activation of Rac1P29S and Rac1A159V.\n \n \n \n\n\n \n Senyuz, S.; Jang, H.; Nussinov, R.; Keskin, O.; and Gursoy, A.\n\n\n \n\n\n\n
The Journal of Physical Chemistry B, 125(15): 3790–3802. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{senyuz2021mechanistic,\n title = {Mechanistic Differences of Activation of Rac1P29S and Rac1A159V},\n author = {Senyuz, Simge and Jang, Hyunbum and Nussinov, Ruth and Keskin, Ozlem and Gursoy, Attila},\n journal = {The Journal of Physical Chemistry B},\n volume = {125},\n number = {15},\n pages = {3790--3802},\n year = {2021},\n publisher = {ACS Publications},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Slamp: Stochastic latent appearance and motion prediction.\n \n \n \n\n\n \n Akan, A. K.; Erdem, E.; Erdem, A.; and Güney, F.\n\n\n \n\n\n\n In
Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14728–14737, 2021. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{akan2021slamp,\n title = {Slamp: Stochastic latent appearance and motion prediction},\n author = {Akan, Adil Kaan and Erdem, Erkut and Erdem, Aykut and G{\\"u}ney, Fatma},\n booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision},\n pages = {14728--14737},\n year = {2021},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Leveraging frequency based salient spatial sound localization to improve 360 video saliency prediction.\n \n \n \n\n\n \n Cokelek, M.; Imamoglu, N.; Ozcinar, C.; Erdem, E.; and Erdem, A.\n\n\n \n\n\n\n In
2021 17th International Conference on Machine Vision and Applications (MVA), pages 1–5, 2021. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{cokelek2021leveraging,\n title = {Leveraging frequency based salient spatial sound localization to improve 360 video saliency prediction},\n author = {Cokelek, Mert and Imamoglu, Nevrez and Ozcinar, Cagri and Erdem, Erkut and Erdem, Aykut},\n booktitle = {2021 17th International Conference on Machine Vision and Applications (MVA)},\n pages = {1--5},\n year = {2021},\n organization = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Self-supervised monocular scene decomposition and depth estimation.\n \n \n \n\n\n \n Safadoust, S.; and Güney, F.\n\n\n \n\n\n\n In
2021 International Conference on 3D Vision (3DV), pages 627–636, 2021. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{safadoust2021self,\n title = {Self-supervised monocular scene decomposition and depth estimation},\n author = {Safadoust, Sadra and G{\\"u}ney, Fatma},\n booktitle = {2021 International Conference on 3D Vision (3DV)},\n pages = {627--636},\n year = {2021},\n organization = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Engagement rewarded actor-critic with conservative Q-learning for speech-driven laughter backchannel generation.\n \n \n \n\n\n \n Bayramoğlu, Ö. Z.; Erzin, E.; Sezgin, T. M.; and Yemez, Y.\n\n\n \n\n\n\n In
Proceedings of the 2021 International Conference on Multimodal Interaction, pages 613–618, 2021. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{bayramouglu2021engagement,\n title = {Engagement rewarded actor-critic with conservative Q-learning for speech-driven laughter backchannel generation},\n author = {Bayramo{\\u{g}}lu, {\\"O}yk{\\"u} Zeynep and Erzin, Engin and Sezgin, Tevfik Metin and Yemez, Y{\\"u}cel},\n booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},\n pages = {613--618},\n year = {2021},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Investigating Contributions of Speech and Facial Landmarks for Talking Head Generation.\n \n \n \n\n\n \n Kesim, E.; and Erzin, E.\n\n\n \n\n\n\n In
Interspeech, pages 1624–1628, 2021. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{kesim2021investigating,\n title = {Investigating Contributions of Speech and Facial Landmarks for Talking Head Generation.},\n author = {Kesim, Ege and Erzin, Engin},\n booktitle = {Interspeech},\n pages = {1624--1628},\n year = {2021},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Generalized polytopic matrix factorization.\n \n \n \n\n\n \n Tatli, G.; and Erdogan, A. T\n\n\n \n\n\n\n In
ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 3235–3239, 2021. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{tatli2021generalized,\n title = {Generalized polytopic matrix factorization},\n author = {Tatli, Gokcan and Erdogan, Alper T},\n booktitle = {ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},\n pages = {3235--3239},\n year = {2021},\n organization = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n On the Computation of PSNR for a Set of Images or Video.\n \n \n \n\n\n \n Keleş, O.; Y$ι$lmaz, M A.; Tekalp, A M.; Korkmaz, C.; and Doğan, Z.\n\n\n \n\n\n\n In
2021 Picture Coding Symposium (PCS), pages 1–5, 2021. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{kelecs2021computation,\n title = {On the Computation of PSNR for a Set of Images or Video},\n author = {Kele{\\c{s}}, Onur and Y$\\iota$lmaz, M Ak$\\iota$n and Tekalp, A Murat and Korkmaz, Cansu and Do{\\u{g}}an, Zafer},\n booktitle = {2021 Picture Coding Symposium (PCS)},\n pages = {1--5},\n year = {2021},\n organization = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n A Practical Approach for Rate-Distortion-Perception Analysis in Learned Image Compression.\n \n \n \n\n\n \n Kirmemis, O.; and Tekalp, A M.\n\n\n \n\n\n\n In
2021 Picture Coding Symposium (PCS), pages 1–5, 2021. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{kirmemis2021practical,\n title = {A Practical Approach for Rate-Distortion-Perception Analysis in Learned Image Compression},\n author = {Kirmemis, Ogun and Tekalp, A Murat},\n booktitle = {2021 Picture Coding Symposium (PCS)},\n pages = {1--5},\n year = {2021},\n organization = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n \n\n\n
\n
\n\n \n \n \n \n \n Self-organized residual blocks for image super-resolution.\n \n \n \n\n\n \n Keleş, O.; Tekalp, A M.; Malik, J.; and K$ι$ranyaz, S.\n\n\n \n\n\n\n In
2021 IEEE International Conference on Image Processing (ICIP), pages 589–593, 2021. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{kelecs2021self,\n title = {Self-organized residual blocks for image super-resolution},\n author = {Kele{\\c{s}}, Onur and Tekalp, A Murat and Malik, Junaid and K$\\iota$ranyaz, Serkan},\n booktitle = {2021 IEEE International Conference on Image Processing (ICIP)},\n pages = {589--593},\n year = {2021},\n organization = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Two-stage domain adapted training for better generalization in real-world image restoration and super-resolution.\n \n \n \n\n\n \n Korkmaz, C.; Tekalp, A M.; and Doğan, Z.\n\n\n \n\n\n\n In
2021 IEEE International Conference on Image Processing (ICIP), pages 569–573, 2021. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{korkmaz2021two,\n title = {Two-stage domain adapted training for better generalization in real-world image restoration and super-resolution},\n author = {Korkmaz, Cansu and Tekalp, A Murat and Do{\\u{g}}an, Zafer},\n booktitle = {2021 IEEE International Conference on Image Processing (ICIP)},\n pages = {569--573},\n year = {2021},\n organization = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n -Zelig: Customizable Blockchain Simulator.\n \n \n \n\n\n \n Erdogan, E.; Aydin, C. A.; Ozkasap, O.; and Gill, W.\n\n\n \n\n\n\n
arXiv preprint arXiv:2107.07972. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{erdogan2021zelig,\n title = {-Zelig: Customizable Blockchain Simulator},\n author = {Erdogan, Ege and Aydin, Can Arda and Ozkasap, Oznur and Gill, Waris},\n journal = {arXiv preprint arXiv:2107.07972},\n year = {2021},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Smart Contract-enabled LightChain Test Network.\n \n \n \n\n\n \n Hassanzadeh-Nazarabadi, Y.; Kshatriya, K.; and Özkasap, Ö.\n\n\n \n\n\n\n In
IEEE INFOCOM 2021-IEEE Conference on Computer Communications Workshops (INFOCOM WKSHPS), pages 1–2, 2021. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{hassanzadeh2021smart,\n title = {Smart Contract-enabled LightChain Test Network},\n author = {Hassanzadeh-Nazarabadi, Yahya and Kshatriya, Kedar and {\\"O}zkasap, {\\"O}znur},\n booktitle = {IEEE INFOCOM 2021-IEEE Conference on Computer Communications Workshops (INFOCOM WKSHPS)},\n pages = {1--2},\n year = {2021},\n organization = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Segmentation and Recognition of Offline Sketch Scenes Using Dynamic Programming.\n \n \n \n\n\n \n Tümen, R. S.; and Sezgin, M.\n\n\n \n\n\n\n
IEEE Computer Graphics and Applications, 42(1): 56–72. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{tumen2021segmentation,\n title = {Segmentation and Recognition of Offline Sketch Scenes Using Dynamic Programming},\n author = {T{\\"u}men, Recep Sinan and Sezgin, Metin},\n journal = {IEEE Computer Graphics and Applications},\n volume = {42},\n number = {1},\n pages = {56--72},\n year = {2021},\n publisher = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n On training sketch recognizers for new domains.\n \n \n \n\n\n \n Yesilbek, K. T.; and Sezgin, M.\n\n\n \n\n\n\n In
Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2142–2149, 2021. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{yesilbek2021training,\n title = {On training sketch recognizers for new domains},\n author = {Yesilbek, Kemal Tugrul and Sezgin, Metin},\n booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},\n pages = {2142--2149},\n year = {2021},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n ATM Allocation Using Decision Tree-Based Algorithms.\n \n \n \n\n\n \n Yurdakul, H. H.; Kaşıkcı, K.; Cağatay, İ.; Güven, M.; Koraş, M.; Akgün, B.; and Gönen, M.\n\n\n \n\n\n\n In
2021 29th Signal Processing and Communications Applications Conference (SIU), pages 1–4, 2021. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{yurdakul2021atm,\n title = {ATM Allocation Using Decision Tree-Based Algorithms},\n author = {Yurdakul, Hazal Hasret and Ka{\\c{s}}{\\i}kc{\\i}, Kerem and Ca{\\u{g}}atay, {\\.I}lhan and G{\\"u}ven, Melih and Kora{\\c{s}}, Murat and Akg{\\"u}n, Bar{\\i}{\\c{s}} and G{\\"o}nen, Mehmet},\n booktitle = {2021 29th Signal Processing and Communications Applications Conference (SIU)},\n pages = {1--4},\n year = {2021},\n organization = {IEEE},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n ComScribe: Identifying Intra-node GPU Communication.\n \n \n \n\n\n \n Unat, D.\n\n\n \n\n\n\n In
Benchmarking, Measuring, and Optimizing: Third BenchCouncil International Symposium, Bench 2020, Virtual Event, November 15–16, 2020, Revised Selected Papers, volume 12614, pages 157, 2021. Springer Nature\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{unat2021comscribe,\n title = {ComScribe: Identifying Intra-node GPU Communication},\n author = {Unat, Didem},\n booktitle = {Benchmarking, Measuring, and Optimizing: Third BenchCouncil International Symposium, Bench 2020, Virtual Event, November 15--16, 2020, Revised Selected Papers},\n volume = {12614},\n pages = {157},\n year = {2021},\n organization = {Springer Nature},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Inhibition of nonfunctional Ras.\n \n \n \n\n\n \n Nussinov, R.; Jang, H.; Gursoy, A.; Keskin, O.; and Gaponenko, V.\n\n\n \n\n\n\n
Cell chemical biology, 28(2): 121–133. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{nussinov2021inhibition,\n title = {Inhibition of nonfunctional Ras},\n author = {Nussinov, Ruth and Jang, Hyunbum and Gursoy, Attila and Keskin, Ozlem and Gaponenko, Vadim},\n journal = {Cell chemical biology},\n volume = {28},\n number = {2},\n pages = {121--133},\n year = {2021},\n publisher = {Elsevier},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n