var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https://kuis-ai-publications.s3.eu-central-1.amazonaws.com/msp.txt?dl=0&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https://kuis-ai-publications.s3.eu-central-1.amazonaws.com/msp.txt?dl=0&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https://kuis-ai-publications.s3.eu-central-1.amazonaws.com/msp.txt?dl=0&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2021\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Improving phoneme recognition of throat microphone speech recordings using transfer learning.\n \n \n \n \n\n\n \n Turan, M. T.; and Erzin, E.\n\n\n \n\n\n\n Speech Communication, 129: 25-32. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ImprovingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{TURAN202125,\n    author = "Turan, M.A. Tu{\\u{g}}tekin and Erzin, Engin",\n    title = "Improving phoneme recognition of throat microphone speech recordings using transfer learning",\n    journal = "Speech Communication",\n    volume = "129",\n    pages = "25-32",\n    year = "2021",\n    issn = "0167-6393",\n    doi = "https://doi.org/10.1016/j.specom.2021.02.004",\n    url = "https://www.sciencedirect.com/science/article/pii/S0167639321000200",\n    keywords = "Phoneme recognition,Feature augmentation,Transfer learning,Throat microphone,Denoising auto-encoder,MSP",\n    abstract = "Throat microphones (TM) are a type of skin-attached non-acoustic sensors, which are robust to environmental noise but carry a lower signal bandwidth characterization than the traditional close-talk microphones (CM). Attaining high-performance phoneme recognition is a challenging task when the training data from a degrading channel, such as TM, is limited. In this paper, we address this challenge for the TM speech recordings using a transfer learning approach based on the stacked denoising auto-encoders (SDA). The proposed transfer learning approach defines an SDA-based domain adaptation framework to map the source domain CM representations and the target domain TM representations into a common latent space, where the mismatch across TM and CM is eliminated to better train an acoustic model and to improve the TM phoneme recognition. For the phoneme recognition task, we use the convolutional neural network (CNN) and the hidden Markov model (HMM) based CNN/HMM hybrid system, which delivers better acoustic modeling performance compared to the conventional Gaussian mixture model (GMM) based models. In the experimental evaluations, we observed more than 12\\% relative phoneme error rate (PER) improvement for the TM recordings with the proposed transfer learning approach compared to baseline performances.",\n    publisher = "Elsevier B.V."\n}\n\n
\n
\n\n\n
\n Throat microphones (TM) are a type of skin-attached non-acoustic sensors, which are robust to environmental noise but carry a lower signal bandwidth characterization than the traditional close-talk microphones (CM). Attaining high-performance phoneme recognition is a challenging task when the training data from a degrading channel, such as TM, is limited. In this paper, we address this challenge for the TM speech recordings using a transfer learning approach based on the stacked denoising auto-encoders (SDA). The proposed transfer learning approach defines an SDA-based domain adaptation framework to map the source domain CM representations and the target domain TM representations into a common latent space, where the mismatch across TM and CM is eliminated to better train an acoustic model and to improve the TM phoneme recognition. For the phoneme recognition task, we use the convolutional neural network (CNN) and the hidden Markov model (HMM) based CNN/HMM hybrid system, which delivers better acoustic modeling performance compared to the conventional Gaussian mixture model (GMM) based models. In the experimental evaluations, we observed more than 12% relative phoneme error rate (PER) improvement for the TM recordings with the proposed transfer learning approach compared to baseline performances.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Domain Adaptation for Food Intake Classification with Teacher/Student Learning.\n \n \n \n \n\n\n \n Turan, M. A. T.; and Erzin, E.\n\n\n \n\n\n\n IEEE Transactions on Multimedia, 23: 4220 - 4231. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"DomainPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{turan2020domain,\n    author = "Turan, Mehmet Ali Tugtekin and Erzin, Engin",\n    title = "Domain Adaptation for Food Intake Classification with Teacher/Student Learning",\n    journal = "IEEE Transactions on Multimedia",\n    publisher = "IEEE",\n    keywords = "MSP",\n    url = "https://ieeexplore.ieee.org/document/9261115",\n    volume = "23",\n    pages = "4220 - 4231",\n    doi = "10.1109/TMM.2020.3038315",\n    abstract = "Automatic dietary monitoring (ADM) stands as a challenging application in wearable healthcare technologies. In this paper, we define an ADM to perform food intake classification (FIC) over throat microphone recordings. We investigate the use of transfer learning to design an improved FIC system. Although labeled data with acoustic close-talk microphones are abundant, throat data is scarce. Therefore, we propose a new adaptation framework based on teacher/student learning. The teacher network is trained over high-quality acoustic microphone recordings, whereas the student network distills deep feature extraction capacity of the teacher over a parallel dataset. Our approach allows us to transfer the representational capacity, adds robustness to the resulting model, and improves the FIC through throat microphone recordings. The classification problem is formulated as a spectra-temporal sequence recognition using the Convolutional LSTM (ConvLSTM) models. We evaluate the proposed approach using a large scale acoustic dataset collected from online recordings, an in-house food intake throat microphone dataset, and a parallel speech dataset. The bidirectional ConvLSTM network with the proposed domain adaptation approach consistently outperforms the SVM- and CNN-based baseline methods and attains 85.2\\% accuracy for the classification of 10 different food intake items. This translates to 17.8\\% accuracy improvement with the proposed domain adaptation.",\n    year = "2021"\n}\n\n
\n
\n\n\n
\n Automatic dietary monitoring (ADM) stands as a challenging application in wearable healthcare technologies. In this paper, we define an ADM to perform food intake classification (FIC) over throat microphone recordings. We investigate the use of transfer learning to design an improved FIC system. Although labeled data with acoustic close-talk microphones are abundant, throat data is scarce. Therefore, we propose a new adaptation framework based on teacher/student learning. The teacher network is trained over high-quality acoustic microphone recordings, whereas the student network distills deep feature extraction capacity of the teacher over a parallel dataset. Our approach allows us to transfer the representational capacity, adds robustness to the resulting model, and improves the FIC through throat microphone recordings. The classification problem is formulated as a spectra-temporal sequence recognition using the Convolutional LSTM (ConvLSTM) models. We evaluate the proposed approach using a large scale acoustic dataset collected from online recordings, an in-house food intake throat microphone dataset, and a parallel speech dataset. The bidirectional ConvLSTM network with the proposed domain adaptation approach consistently outperforms the SVM- and CNN-based baseline methods and attains 85.2% accuracy for the classification of 10 different food intake items. This translates to 17.8% accuracy improvement with the proposed domain adaptation.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n End-to-End Rate-Distortion Optimization for Bi-Directional Learned Video Compression.\n \n \n \n\n\n \n Yilmaz, M.; and Tekalp, A.\n\n\n \n\n\n\n 2020 IEEE International Conference on Image Processing (ICIP),1311-1315. 2020.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Yilmaz2020EndtoEndRO,\n    author = "Yilmaz, M. and Tekalp, A.",\n    title = "End-to-End Rate-Distortion Optimization for Bi-Directional Learned Video Compression",\n    journal = "2020 IEEE International Conference on Image Processing (ICIP)",\n    year = "2020",\n    pages = "1311-1315",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n AffectON: Incorporating Affect Into Dialog Generation.\n \n \n \n \n\n\n \n Bucinca, Z.; Yemez, Y.; Erzin, E.; and Sezgin, M.\n\n\n \n\n\n\n 2020.\n \n\n\n\n
\n\n\n\n \n \n \"AffectON:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@misc{bucinca2020affecton,\n    author = "Bucinca, Zana and Yemez, Yucel and Erzin, Engin and Sezgin, Metin",\n    title = "AffectON: Incorporating Affect Into Dialog Generation",\n    year = "2020",\n    keywords = "MSP",\n    url = "https://ieeexplore.ieee.org/document/9286511",\n    doi = "10.1109/TAFFC.2020.3043067",\n    abstract = "Due to its expressivity, natural language is paramount for explicit and implicit affective state communication among humans. The same linguistic inquiry (e.g. How are you ?) might induce responses with different affects depending on the affective state of the conversational partner(s) and the context of the conversation. Yet, most dialog systems do not consider affect as constitutive aspect of response generation. In this paper, we introduce AffectON, an approach for generating affective responses during inference. For generating language in a targeted affect, our approach leverages a probabilistic language model and an affective space. AffectON is language model agnostic, since it can work with probabilities generated by any language model (e.g., sequence-to-sequence models, neural language models, n-grams). Hence, it can be employed for both affective dialog and affective language generation. We experimented with affective dialog generation and evaluated the generated text objectively and subjectively. For the subjective part of the evaluation, we designed a custom user interface for rating and provided recommendations for the design of such interfaces. The results, both subjective and objective demonstrate that our approach is successful in pulling the generated language toward the targeted affect, with little sacrifice in syntactic coherence."\n}\n\n
\n
\n\n\n
\n Due to its expressivity, natural language is paramount for explicit and implicit affective state communication among humans. The same linguistic inquiry (e.g. How are you ?) might induce responses with different affects depending on the affective state of the conversational partner(s) and the context of the conversation. Yet, most dialog systems do not consider affect as constitutive aspect of response generation. In this paper, we introduce AffectON, an approach for generating affective responses during inference. For generating language in a targeted affect, our approach leverages a probabilistic language model and an affective space. AffectON is language model agnostic, since it can work with probabilities generated by any language model (e.g., sequence-to-sequence models, neural language models, n-grams). Hence, it can be employed for both affective dialog and affective language generation. We experimented with affective dialog generation and evaluated the generated text objectively and subjectively. For the subjective part of the evaluation, we designed a custom user interface for rating and provided recommendations for the design of such interfaces. The results, both subjective and objective demonstrate that our approach is successful in pulling the generated language toward the targeted affect, with little sacrifice in syntactic coherence.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Affective synthesis and animation of arm gestures from speech prosody.\n \n \n \n\n\n \n Bozkurt, E.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n Speech Communication. 2020.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{bozkurt2020affective,\n    author = {Bozkurt, Elif and Yemez, Y{\\"u}cel and Erzin, Engin},\n    title = "Affective synthesis and animation of arm gestures from speech prosody",\n    journal = "Speech Communication",\n    year = "2020",\n    publisher = "North-Holland",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Automatic Vocal Tractlandmark Tracking in Rtmri Using Fully Convolutional Networks and Kalman Filter.\n \n \n \n\n\n \n Asadiabadi, S.; and Erzin, E.\n\n\n \n\n\n\n In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 7339–7343, 2020. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{asadiabadi2020automatic,\n    author = "Asadiabadi, Sasan and Erzin, Engin",\n    title = "Automatic Vocal Tractlandmark Tracking in Rtmri Using Fully Convolutional Networks and Kalman Filter",\n    booktitle = "ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",\n    pages = "7339--7343",\n    year = "2020",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Diversity Combination Model Incorporating an Inward Bias for Interaural Time-Level Difference Cue Integration in Sound Lateralization.\n \n \n \n\n\n \n Mojtahedi, S.; Erzin, E.; and Ungan, P.\n\n\n \n\n\n\n Applied Sciences, 10(18): 6356. 2020.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{mojtahedi2020diversity,\n    author = "Mojtahedi, Sina and Erzin, Engin and Ungan, Pekcan",\n    title = "A Diversity Combination Model Incorporating an Inward Bias for Interaural Time-Level Difference Cue Integration in Sound Lateralization",\n    journal = "Applied Sciences",\n    volume = "10",\n    number = "18",\n    pages = "6356",\n    year = "2020",\n    publisher = "Multidisciplinary Digital Publishing Institute",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multimodal Continuous Emotion Recognition using Deep Multi-Task Learning with Correlation Loss.\n \n \n \n\n\n \n Köprü, B.; and Erzin, E.\n\n\n \n\n\n\n arXiv preprint arXiv:2011.00876. 2020.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{kopru2020multimodal,\n    author = {K{\\"o}pr{\\"u}, Berkay and Erzin, Engin},\n    title = "Multimodal Continuous Emotion Recognition using Deep Multi-Task Learning with Correlation Loss",\n    journal = "arXiv preprint arXiv:2011.00876",\n    year = "2020",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Vocal Tract Contour Tracking in rtMRI Using Deep Temporal Regression Network.\n \n \n \n\n\n \n Asadiabadi, S.; and Erzin, E.\n\n\n \n\n\n\n IEEE/ACM Transactions on Audio, Speech, and Language Processing. 2020.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{asadiabadi2020vocal,\n    author = "Asadiabadi, Sasan and Erzin, Engin",\n    title = "Vocal Tract Contour Tracking in rtMRI Using Deep Temporal Regression Network",\n    journal = "IEEE/ACM Transactions on Audio, Speech, and Language Processing",\n    year = "2020",\n    publisher = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Head Nod Detection in Dyadic Conversations.\n \n \n \n\n\n \n Numanoglu, T.; Erzin, E.; Yemez, Y.; and Sezgin, M.\n\n\n \n\n\n\n 2019 27th Signal Processing and Communications Applications Conference (SIU),1-4. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Numanoglu2019HeadND,\n    author = "Numanoglu, Tuge and Erzin, E. and Yemez, Y. and Sezgin, M.",\n    title = "Head Nod Detection in Dyadic Conversations",\n    journal = "2019 27th Signal Processing and Communications Applications Conference (SIU)",\n    year = "2019",\n    pages = "1-4",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n SDN-enabled distributed open exchange: Dynamic QoS-path optimization in multi-operator services.\n \n \n \n\n\n \n Bagci, K.; and Tekalp, A.\n\n\n \n\n\n\n Comput. Networks, 162. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Bagci2019SDNenabledDO,\n    author = "Bagci, K. and Tekalp, A.",\n    title = "SDN-enabled distributed open exchange: Dynamic QoS-path optimization in multi-operator services",\n    journal = "Comput. Networks",\n    year = "2019",\n    volume = "162",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Effect of Architectures and Training Methods on the Performance of Learned Video Frame Prediction.\n \n \n \n\n\n \n Yilmaz, M.; and Tekalp, A.\n\n\n \n\n\n\n 2019 IEEE International Conference on Image Processing (ICIP),4210-4214. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Yilmaz2019EffectOA,\n    author = "Yilmaz, M. and Tekalp, A.",\n    title = "Effect of Architectures and Training Methods on the Performance of Learned Video Frame Prediction",\n    journal = "2019 IEEE International Conference on Image Processing (ICIP)",\n    year = "2019",\n    pages = "4210-4214",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Motion-Based Rate Adaptation in WebRTC Videoconferencing Using Scalable Video Coding.\n \n \n \n\n\n \n Bakar, G.; Kirmizioglu, R. A.; and Tekalp, A.\n\n\n \n\n\n\n IEEE Transactions on Multimedia, 21: 429-441. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Bakar2019MotionBasedRA,\n    author = "Bakar, G. and Kirmizioglu, R. A. and Tekalp, A.",\n    title = "Motion-Based Rate Adaptation in WebRTC Videoconferencing Using Scalable Video Coding",\n    journal = "IEEE Transactions on Multimedia",\n    year = "2019",\n    volume = "21",\n    pages = "429-441",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Emotion Dependent Facial Animation from Affective Speech.\n \n \n \n\n\n \n Sadiq, R.; AsadiAbadi, S.; and Erzin, E.\n\n\n \n\n\n\n 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@misc{sadiq2019emotion,\n    author = "Sadiq, Rizwan and AsadiAbadi, Sasan and Erzin, Engin",\n    title = "Emotion Dependent Facial Animation from Affective Speech",\n    year = "2019",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Batch Recurrent Q-Learning for Backchannel Generation Towards Engaging Agents.\n \n \n \n\n\n \n Hussain, N.; Erzin, E.; Sezgin, T M.; and Yemez, Y.\n\n\n \n\n\n\n In 2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII), pages 1–7, 2019. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{hussain2019batch,\n    author = {Hussain, Nusrah and Erzin, Engin and Sezgin, T Metin and Yemez, Y{\\"u}cel},\n    title = "Batch Recurrent Q-Learning for Backchannel Generation Towards Engaging Agents",\n    booktitle = "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",\n    pages = "1--7",\n    year = "2019",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Speech Driven Backchannel Generation using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction.\n \n \n \n\n\n \n Hussain, N.; Erzin, E.; Sezgin, T M.; and Yemez, Y.\n\n\n \n\n\n\n arXiv preprint arXiv:1908.01618. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{hussain2019speech,\n    author = "Hussain, Nusrah and Erzin, Engin and Sezgin, T Metin and Yemez, Yucel",\n    title = "Speech Driven Backchannel Generation using Deep Q-Network for Enhancing Engagement in Human-Robot Interaction",\n    journal = "arXiv preprint arXiv:1908.01618",\n    year = "2019",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A New Interface for Affective State Estimation and Annotation from Speech.\n \n \n \n\n\n \n Fidan, U.; Tomar, D.; Özdil, P G.; and Erzin, E.\n\n\n \n\n\n\n In 2019 27th Signal Processing and Communications Applications Conference (SIU), pages 1–4, 2019. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{fidan2019new,\n    author = {Fidan, Umut and Tomar, Deniz and {\\"O}zdil, P Gizem and Erzin, Engin},\n    title = "A New Interface for Affective State Estimation and Annotation from Speech",\n    booktitle = "2019 27th Signal Processing and Communications Applications Conference (SIU)",\n    pages = "1--4",\n    year = "2019",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Head Nod Detection in Dyadic Conversations.\n \n \n \n\n\n \n Numanoğlu, T.; Erzin, E.; Yemezy, Y.; and Sezginy, M T.\n\n\n \n\n\n\n In 2019 27th Signal Processing and Communications Applications Conference (SIU), pages 1–4, 2019. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{numanouglu2019head,\n    author = {Numano{\\u{g}}lu, Tu{\\u{g}}ҫe and Erzin, Engin and Yemezy, Y{\\"u}cel and Sezginy, M Tevfik},\n    title = "Head Nod Detection in Dyadic Conversations",\n    booktitle = "2019 27th Signal Processing and Communications Applications Conference (SIU)",\n    pages = "1--4",\n    year = "2019",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Use of non-verbal vocalizations for continuous emotion recognition from speech and head motion.\n \n \n \n\n\n \n Fatima, S. N.; and Erzin, E.\n\n\n \n\n\n\n In 2019 14th IEEE Conference on Industrial Electronics and Applications (ICIEA), pages 433–437, 2019. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{fatima2019use,\n    author = "Fatima, Syeda Narjis and Erzin, Engin",\n    title = "Use of non-verbal vocalizations for continuous emotion recognition from speech and head motion",\n    booktitle = "2019 14th IEEE Conference on Industrial Electronics and Applications (ICIEA)",\n    pages = "433--437",\n    year = "2019",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (22)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Multifaceted Engagement in Social Interaction with a Machine: The JOKER Project.\n \n \n \n\n\n \n Devillers, L.; Rosset, S.; Duplessis, G. D.; Bechade, L.; Yemez, Y.; Türker, B. B.; Sezgin, T. M.; Erzin, E.; Haddad, K.; Dupont, S.; Deléglise, P.; Estève, Y.; Lailler, C.; Gilmartin, E.; and Campbell, N.\n\n\n \n\n\n\n 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018),697-701. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{Devillers2018MultifacetedEI,\n    author = {Devillers, L. and Rosset, S. and Duplessis, G. D. and Bechade, Lucile and Yemez, Y. and T{\\"u}rker, Bekir Berker and Sezgin, T. M. and Erzin, E. and Haddad, K. and Dupont, S. and Del{\\'e}glise, P. and Est{\\`e}ve, Y. and Lailler, C. and Gilmartin, E. and Campbell, N.},\n    title = "Multifaceted Engagement in Social Interaction with a Machine: The JOKER Project",\n    journal = "2018 13th IEEE International Conference on Automatic Face \\& Gesture Recognition (FG 2018)",\n    year = "2018",\n    pages = "697-701",\n    keywords = "HCI,MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Dynamic Control Plane for SDN at Scale.\n \n \n \n\n\n \n Görkemli, B.; Tatlicioglu, S.; Tekalp, A.; Civanlar, S.; and Lokman, E.\n\n\n \n\n\n\n IEEE Journal on Selected Areas in Communications, 36: 2688-2701. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Grkemli2018DynamicCP,\n    author = {G{\\"o}rkemli, Burak and Tatlicioglu, Sinan and Tekalp, A. and Civanlar, S. and Lokman, E.},\n    title = "Dynamic Control Plane for SDN at Scale",\n    journal = "IEEE Journal on Selected Areas in Communications",\n    year = "2018",\n    volume = "36",\n    pages = "2688-2701",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Detection of Food Intake Events From Throat Microphone Recordings Using Convolutional Neural Networks.\n \n \n \n\n\n \n Turan, M.; and Erzin, E.\n\n\n \n\n\n\n 2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW),1-6. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Turan2018DetectionOF,\n    author = "Turan, M. and Erzin, E.",\n    title = "Detection of Food Intake Events From Throat Microphone Recordings Using Convolutional Neural Networks",\n    journal = "2018 IEEE International Conference on Multimedia \\& Expo Workshops (ICMEW)",\n    year = "2018",\n    pages = "1-6",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Dynamic Resource Allocation by Batch Optimization for Value-Added Video Services Over SDN.\n \n \n \n\n\n \n Bagci, K.; and Tekalp, A.\n\n\n \n\n\n\n IEEE Transactions on Multimedia, 20: 3084-3096. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Bagci2018DynamicRA,\n    author = "Bagci, K. and Tekalp, A.",\n    title = "Dynamic Resource Allocation by Batch Optimization for Value-Added Video Services Over SDN",\n    journal = "IEEE Transactions on Multimedia",\n    year = "2018",\n    volume = "20",\n    pages = "3084-3096",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On the importance of hidden bias and hidden entropy in representational efficiency of the Gaussian-Bipolar Restricted Boltzmann Machines.\n \n \n \n\n\n \n Isabekov, A.; and Erzin, E.\n\n\n \n\n\n\n Neural networks : the official journal of the International Neural Network Society, 105. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Isabekov2018OnTI,\n    author = "Isabekov, Altynbek and Erzin, E.",\n    title = "On the importance of hidden bias and hidden entropy in representational efficiency of the Gaussian-Bipolar Restricted Boltzmann Machines",\n    journal = "Neural networks : the official journal of the International Neural Network Society",\n    year = "2018",\n    volume = "105",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multi-Party Webrtc Videoconferencing Using Scalable Vp9 Video: From Best-Effort Over-The-Top To Managed Value-Added Services.\n \n \n \n\n\n \n Kirmizioglu, R. A.; Kaya, B. C.; and Tekalp, A. M.\n\n\n \n\n\n\n 2018 IEEE International Conference on Multimedia and Expo (ICME),1-6. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Kirmizioglu2018MultiPartyWV,\n    author = "Kirmizioglu, R. Arda and Kaya, B. Can and Tekalp, A. Murat",\n    title = "Multi-Party Webrtc Videoconferencing Using Scalable Vp9 Video: From Best-Effort Over-The-Top To Managed Value-Added Services",\n    journal = "2018 IEEE International Conference on Multimedia and Expo (ICME)",\n    year = "2018",\n    pages = "1-6",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Learned Compression Artifact Removal by Deep Residual Networks.\n \n \n \n\n\n \n Kirmemis, O.; Bakar, G.; and Tekalp, A.\n\n\n \n\n\n\n In CVPR Workshops, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Kirmemis2018LearnedCA,\n    author = "Kirmemis, Ogun and Bakar, G. and Tekalp, A.",\n    title = "Learned Compression Artifact Removal by Deep Residual Networks",\n    booktitle = "CVPR Workshops",\n    year = "2018",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n TCP congestion avoidance for selective flows in SDN.\n \n \n \n\n\n \n Atli, A. V.; Uluderya, M. S.; Civanlar, S.; Gorkemli, B.; and Tekalp, A.\n\n\n \n\n\n\n 2018 26th Signal Processing and Communications Applications Conference (SIU),1-4. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Atli2018TCPCA,\n    author = "Atli, Ali Volkan and Uluderya, M. S. and Civanlar, S. and Gorkemli, B. and Tekalp, A.",\n    title = "TCP congestion avoidance for selective flows in SDN",\n    journal = "2018 26th Signal Processing and Communications Applications Conference (SIU)",\n    year = "2018",\n    pages = "1-4",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Managed P2P-assisted video services over SDN.\n \n \n \n\n\n \n Yilmaz, S.; and Tekalp, A.\n\n\n \n\n\n\n 2018 26th Signal Processing and Communications Applications Conference (SIU),1-4. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Yilmaz2018ManagedPV,\n    author = "Yilmaz, S. and Tekalp, A.",\n    title = "Managed P2P-assisted video services over SDN",\n    journal = "2018 26th Signal Processing and Communications Applications Conference (SIU)",\n    year = "2018",\n    pages = "1-4",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Effect of Training and Test Datasets on Image Restoration and Super-Resolution by Deep Learning.\n \n \n \n\n\n \n Kirmemis, O.; and Tekalp, A.\n\n\n \n\n\n\n 2018 26th European Signal Processing Conference (EUSIPCO),514-518. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Kirmemis2018EffectOT,\n    author = "Kirmemis, Ogun and Tekalp, A.",\n    title = "Effect of Training and Test Datasets on Image Restoration and Super-Resolution by Deep Learning",\n    journal = "2018 26th European Signal Processing Conference (EUSIPCO)",\n    year = "2018",\n    pages = "514-518",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multimodal Speech Driven Facial Shape Animation Using Deep Neural Networks.\n \n \n \n\n\n \n Asadiabadi, S.; Sadiq, R.; and Erzin, E.\n\n\n \n\n\n\n 2018 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC),1508-1512. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Asadiabadi2018MultimodalSD,\n    author = "Asadiabadi, Sasan and Sadiq, R. and Erzin, E.",\n    title = "Multimodal Speech Driven Facial Shape Animation Using Deep Neural Networks",\n    journal = "2018 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC)",\n    year = "2018",\n    pages = "1508-1512",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Monitoring Infant's Emotional Cry in Domestic Environments Using the Capsule Network Architecture.\n \n \n \n\n\n \n Turan, M.; and Erzin, E.\n\n\n \n\n\n\n In INTERSPEECH, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Turan2018MonitoringIE,\n    author = "Turan, Mehmet and Erzin, E.",\n    title = "Monitoring Infant's Emotional Cry in Domestic Environments Using the Capsule Network Architecture",\n    booktitle = "INTERSPEECH",\n    year = "2018",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multifaceted engagement in social interaction with a machine: The joker project.\n \n \n \n\n\n \n Devillers, L.; Rosset, S.; Duplessis, G. D.; Bechade, L.; Yemez, Y.; Turker, B. B; Sezgin, M.; Erzin, E.; El Haddad, K.; Dupont, S.; and others\n\n\n \n\n\n\n In 2018 13th IEEE International Conference on Automatic Face \\& Gesture Recognition (FG 2018), pages 697–701, 2018. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{devillers2018multifaceted,\n    author = "Devillers, Laurence and Rosset, Sophie and Duplessis, Guillaume Dubuisson and Bechade, Lucile and Yemez, Yucel and Turker, Bekir B and Sezgin, Metin and Erzin, Engin and El Haddad, Kevin and Dupont, Stephane and others",\n    title = "Multifaceted engagement in social interaction with a machine: The joker project",\n    booktitle = "2018 13th IEEE International Conference on Automatic Face \\\\& Gesture Recognition (FG 2018)",\n    pages = "697--701",\n    year = "2018",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On the importance of hidden bias and hidden entropy in representational efficiency of the Gaussian-Bipolar Restricted Boltzmann Machines.\n \n \n \n\n\n \n Isabekov, A.; and Erzin, E.\n\n\n \n\n\n\n Neural Networks, 105: 405–418. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{isabekov2018importance,\n    author = "Isabekov, Altynbek and Erzin, Engin",\n    title = "On the importance of hidden bias and hidden entropy in representational efficiency of the Gaussian-Bipolar Restricted Boltzmann Machines",\n    journal = "Neural Networks",\n    volume = "105",\n    pages = "405--418",\n    year = "2018",\n    publisher = "Pergamon",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Audio-Visual Prediction of Head-Nod and Turn-Taking Events in Dyadic Interactions.\n \n \n \n\n\n \n Türker, B. B.; Erzin, E.; Yemez, Y.; and Sezgin, T M.\n\n\n \n\n\n\n In INTERSPEECH, pages 1741–1745, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{turker2018audio,\n    author = {T{\\"u}rker, Bekir Berker and Erzin, Engin and Yemez, Y{\\"u}cel and Sezgin, T Metin},\n    title = "Audio-Visual Prediction of Head-Nod and Turn-Taking Events in Dyadic Interactions.",\n    booktitle = "INTERSPEECH",\n    pages = "1741--1745",\n    year = "2018",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multimodal prediction of head nods in dyadic conversations.\n \n \n \n\n\n \n Türker, B B.; Sezgin, M T.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n In 2018 26th Signal Processing and Communications Applications Conference (SIU), pages 1–4, 2018. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{turker2018multimodal,\n    author = {T{\\"u}rker, B Berker and Sezgin, M Tevfik and Yemez, Y{\\"u}cel and Erzin, Engin},\n    title = "Multimodal prediction of head nods in dyadic conversations",\n    booktitle = "2018 26th Signal Processing and Communications Applications Conference (SIU)",\n    pages = "1--4",\n    year = "2018",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Food intake detection using autoencoder-based deep neural networks.\n \n \n \n\n\n \n Turan, M. T.; and Erzin, E.\n\n\n \n\n\n\n In 2018 26th Signal Processing and Communications Applications Conference (SIU), pages 1–4, 2018. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{turan2018food,\n    author = "Turan, MA Tu{\\u{g}}tekin and Erzin, Engin",\n    title = "Food intake detection using autoencoder-based deep neural networks",\n    booktitle = "2018 26th Signal Processing and Communications Applications Conference (SIU)",\n    pages = "1--4",\n    year = "2018",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Monitoring Infant's Emotional Cry in Domestic Environments Using the Capsule Network Architecture.\n \n \n \n\n\n \n Turan, M. A. T.; and Erzin, E.\n\n\n \n\n\n\n In Interspeech, pages 132–136, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{turan2018monitoring,\n    author = "Turan, Mehmet Ali Tugtekin and Erzin, Engin",\n    title = "Monitoring Infant's Emotional Cry in Domestic Environments Using the Capsule Network Architecture.",\n    booktitle = "Interspeech",\n    pages = "132--136",\n    year = "2018",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multimodal speech driven facial shape animation using deep neural networks.\n \n \n \n\n\n \n Asadiabadi, S.; Sadiq, R.; and Erzin, E.\n\n\n \n\n\n\n In 2018 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC), pages 1508–1512, 2018. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{asadiabadi2018multimodal,\n    author = "Asadiabadi, Sasan and Sadiq, Rizwan and Erzin, Engin",\n    title = "Multimodal speech driven facial shape animation using deep neural networks",\n    booktitle = "2018 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC)",\n    pages = "1508--1512",\n    year = "2018",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Detection of food intake events from throat microphone recordings using convolutional neural networks.\n \n \n \n\n\n \n Turan, M. T.; and Erzin, E.\n\n\n \n\n\n\n In 2018 IEEE International Conference on Multimedia \\& Expo Workshops (ICMEW), pages 1–6, 2018. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{turan2018detection,\n    author = "Turan, MA Tu{\\u{g}}tekin and Erzin, Engin",\n    title = "Detection of food intake events from throat microphone recordings using convolutional neural networks",\n    booktitle = "2018 IEEE International Conference on Multimedia \\\\& Expo Workshops (ICMEW)",\n    pages = "1--6",\n    year = "2018",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A deep learning approach for data driven vocal tract area function estimation.\n \n \n \n\n\n \n Asadiabadi, S.; and Erzin, E.\n\n\n \n\n\n\n In 2018 IEEE Spoken Language Technology Workshop (SLT), pages 167–173, 2018. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{asadiabadi2018deep,\n    author = "Asadiabadi, Sasan and Erzin, Engin",\n    title = "A deep learning approach for data driven vocal tract area function estimation",\n    booktitle = "2018 IEEE Spoken Language Technology Workshop (SLT)",\n    pages = "167--173",\n    year = "2018",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Effect of Training and Test Datasets on Image Restoration and Super-Resolution by Deep Learning.\n \n \n \n\n\n \n Kirmemis, O.; and Tekalp, A.\n\n\n \n\n\n\n 2018 26th European Signal Processing Conference (EUSIPCO),514-518. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{Kirmemis2018EffectOT1,\n    author = "Kirmemis, Ogun and Tekalp, A.",\n    title = "Effect of Training and Test Datasets on Image Restoration and Super-Resolution by Deep Learning",\n    journal = "2018 26th European Signal Processing Conference (EUSIPCO)",\n    year = "2018",\n    keywords = "MSP,Affordable tangible programming",\n    pages = "514-518"\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (23)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Motion-Based Adaptive Streaming in WebRTC Using Spatio-Temporal Scalable VP9 Video Coding.\n \n \n \n\n\n \n Bakar, G.; Kirmizioglu, R. A.; and Tekalp, A.\n\n\n \n\n\n\n GLOBECOM 2017 - 2017 IEEE Global Communications Conference,1-6. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Bakar2017MotionBasedAS,\n    author = "Bakar, G. and Kirmizioglu, R. A. and Tekalp, A.",\n    title = "Motion-Based Adaptive Streaming in WebRTC Using Spatio-Temporal Scalable VP9 Video Coding",\n    journal = "GLOBECOM 2017 - 2017 IEEE Global Communications Conference",\n    year = "2017",\n    pages = "1-6",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Distributed-collaborative managed dash video services.\n \n \n \n\n\n \n Sahin, K. E.; Bagci, K.; and Tekalp, A.\n\n\n \n\n\n\n 2017 13th International Conference on Network and Service Management (CNSM),1-5. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Sahin2017DistributedcollaborativeMD,\n    author = "Sahin, Kemal E. and Bagci, K. and Tekalp, A.",\n    title = "Distributed-collaborative managed dash video services",\n    journal = "2017 13th International Conference on Network and Service Management (CNSM)",\n    year = "2017",\n    pages = "1-5",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Empirical Mode Decomposition of Throat Microphone Recordings for Intake Classification.\n \n \n \n\n\n \n Turan, M.; and Erzin, E.\n\n\n \n\n\n\n Proceedings of the 2nd International Workshop on Multimedia for Personal Health and Health Care. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Turan2017EmpiricalMD,\n    author = "Turan, M. and Erzin, E.",\n    title = "Empirical Mode Decomposition of Throat Microphone Recordings for Intake Classification",\n    journal = "Proceedings of the 2nd International Workshop on Multimedia for Personal Health and Health Care",\n    year = "2017",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Compete or Collaborate: Architectures for Collaborative DASH Video Over Future Networks.\n \n \n \n\n\n \n Bagci, K.; Sahin, K. E.; and Tekalp, A.\n\n\n \n\n\n\n IEEE Transactions on Multimedia, 19: 2152-2165. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Bagci2017CompeteOC,\n    author = "Bagci, K. and Sahin, Kemal E. and Tekalp, A.",\n    title = "Compete or Collaborate: Architectures for Collaborative DASH Video Over Future Networks",\n    journal = "IEEE Transactions on Multimedia",\n    year = "2017",\n    volume = "19",\n    pages = "2152-2165",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Adaptive Multiview Video Delivery Using Hybrid Networking.\n \n \n \n\n\n \n Ekmekcioglu, E.; Gurler, C. G.; Kondoz, A.; and Tekalp, A.\n\n\n \n\n\n\n IEEE Transactions on Circuits and Systems for Video Technology, 27: 1313-1325. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Ekmekcioglu2017AdaptiveMV,\n    author = "Ekmekcioglu, E. and Gurler, C. G. and Kondoz, A. and Tekalp, A.",\n    title = "Adaptive Multiview Video Delivery Using Hybrid Networking",\n    journal = "IEEE Transactions on Circuits and Systems for Video Technology",\n    year = "2017",\n    volume = "27",\n    pages = "1313-1325",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Emerging 3-D Imaging and Display Technologies.\n \n \n \n\n\n \n Javidi, B.; and Tekalp, A.\n\n\n \n\n\n\n Proc. IEEE, 105: 786-788. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Javidi2017Emerging3I,\n    author = "Javidi, B. and Tekalp, A.",\n    title = "Emerging 3-D Imaging and Display Technologies",\n    journal = "Proc. IEEE",\n    year = "2017",\n    volume = "105",\n    pages = "786-788",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Virtualized cloud video services.\n \n \n \n\n\n \n Yilmaz, S.; Sahin, K. E.; Bagci, K.; and Tekalp, A.\n\n\n \n\n\n\n 2017 25th Signal Processing and Communications Applications Conference (SIU),1-4. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Yilmaz2017VirtualizedCV,\n    author = "Yilmaz, S. and Sahin, Kemal E. and Bagci, K. and Tekalp, A.",\n    title = "Virtualized cloud video services",\n    journal = "2017 25th Signal Processing and Communications Applications Conference (SIU)",\n    year = "2017",\n    pages = "1-4",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Vocal Tract Airway Tissue Boundary Tracking for rtMRI Using Shape and Appearance Priors.\n \n \n \n\n\n \n Asadiabadi, S.; and Erzin, E.\n\n\n \n\n\n\n In INTERSPEECH, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Asadiabadi2017VocalTA,\n    author = "Asadiabadi, Sasan and Erzin, E.",\n    title = "Vocal Tract Airway Tissue Boundary Tracking for rtMRI Using Shape and Appearance Priors",\n    booktitle = "INTERSPEECH",\n    year = "2017",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Use of affect based interaction classification for continuous emotion tracking.\n \n \n \n\n\n \n Khaki, H.; and Erzin, E.\n\n\n \n\n\n\n 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),2881-2885. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Khaki2017UseOA,\n    author = "Khaki, H. and Erzin, E.",\n    title = "Use of affect based interaction classification for continuous emotion tracking",\n    journal = "2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",\n    year = "2017",\n    pages = "2881-2885",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Speech features for telemonitoring of Parkinson's disease symptoms.\n \n \n \n\n\n \n Ramezani, H.; Khaki, H.; Erzin, E.; and Akan, Ö. B.\n\n\n \n\n\n\n 2017 39th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC),3801-3805. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Ramezani2017SpeechFF,\n    author = {Ramezani, Hamideh and Khaki, H. and Erzin, E. and Akan, {\\"O}zg{\\"u}r B.},\n    title = "Speech features for telemonitoring of Parkinson's disease symptoms",\n    journal = "2017 39th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)",\n    year = "2017",\n    pages = "3801-3805",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Cross-Subject Continuous Emotion Recognition Using Speech and Body Motion in Dyadic Interactions.\n \n \n \n\n\n \n Fatima, S. N.; and Erzin, E.\n\n\n \n\n\n\n In INTERSPEECH, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Fatima2017CrossSubjectCE,\n    author = "Fatima, Syeda Narjis and Erzin, E.",\n    title = "Cross-Subject Continuous Emotion Recognition Using Speech and Body Motion in Dyadic Interactions",\n    booktitle = "INTERSPEECH",\n    year = "2017",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Affect recognition from lip articulations.\n \n \n \n\n\n \n Sadiq, R.; and Erzin, E.\n\n\n \n\n\n\n 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),2432-2436. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Sadiq2017AffectRF,\n    author = "Sadiq, R. and Erzin, E.",\n    title = "Affect recognition from lip articulations",\n    journal = "2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",\n    year = "2017",\n    pages = "2432-2436",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n The JESTKOD database: an affective multimodal database of dyadic interactions.\n \n \n \n\n\n \n Bozkurt, E.; Khaki, H.; Keçeci, S.; Türker, B B.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n Language Resources and Evaluation, 51(3): 857–872. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{bozkurt2017jestkod,\n    author = {Bozkurt, Elif and Khaki, Hossein and Ke{\\c{c}}eci, Sinan and T{\\"u}rker, B Berker and Yemez, Y{\\"u}cel and Erzin, Engin},\n    title = "The JESTKOD database: an affective multimodal database of dyadic interactions",\n    journal = "Language Resources and Evaluation",\n    volume = "51",\n    number = "3",\n    pages = "857--872",\n    year = "2017",\n    publisher = "Springer Netherlands",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Use of affect based interaction classification for continuous emotion tracking.\n \n \n \n\n\n \n Khaki, H.; and Erzin, E.\n\n\n \n\n\n\n In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 2881–2885, 2017. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{khaki2017use,\n    author = "Khaki, Hossein and Erzin, Engin",\n    title = "Use of affect based interaction classification for continuous emotion tracking",\n    booktitle = "2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",\n    pages = "2881--2885",\n    year = "2017",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Affect recognition from lip articulations.\n \n \n \n\n\n \n Sadiq, R.; and Erzin, E.\n\n\n \n\n\n\n In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 2432–2436, 2017. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{sadiq2017affect,\n    author = "Sadiq, Rizwan and Erzin, Engin",\n    title = "Affect recognition from lip articulations",\n    booktitle = "2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",\n    pages = "2432--2436",\n    year = "2017",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Classification of ingestion sounds using Hilbert-huang transform.\n \n \n \n\n\n \n Turan, M. T.; and Erzin, E.\n\n\n \n\n\n\n In 2017 25th Signal Processing and Communications Applications Conference (SIU), pages 1–4, 2017. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{turan2017classification,\n    author = "Turan, MA Tuggtekin and Erzin, Engin",\n    title = "Classification of ingestion sounds using Hilbert-huang transform",\n    booktitle = "2017 25th Signal Processing and Communications Applications Conference (SIU)",\n    pages = "1--4",\n    year = "2017",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Real-time audiovisual laughter detection.\n \n \n \n\n\n \n Türker, B B.; Buçinca, Z.; Sezgin, M T.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n In 2017 25th Signal Processing and Communications Applications Conference (SIU), pages 1–4, 2017. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{turker2017real,\n    author = {T{\\"u}rker, B Berker and Bu{\\c{c}}inca, Zana and Sezgin, M Tevfik and Yemez, Y{\\"u}cel and Erzin, Engin},\n    title = "Real-time audiovisual laughter detection",\n    booktitle = "2017 25th Signal Processing and Communications Applications Conference (SIU)",\n    pages = "1--4",\n    year = "2017",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Analysis of Engagement and User Experience with a Laughter Responsive Social Robot.\n \n \n \n\n\n \n Türker, B. B.; Buçinca, Z.; Erzin, E.; Yemez, Y.; and Sezgin, T M.\n\n\n \n\n\n\n In Interspeech, pages 844–848, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{turker2017analysis,\n    author = {T{\\"u}rker, Bekir Berker and Bu{\\c{c}}inca, Zana and Erzin, Engin and Yemez, Y{\\"u}cel and Sezgin, T Metin},\n    title = "Analysis of Engagement and User Experience with a Laughter Responsive Social Robot.",\n    booktitle = "Interspeech",\n    pages = "844--848",\n    year = "2017",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Vocal Tract Airway Tissue Boundary Tracking for rtMRI Using Shape and Appearance Priors.\n \n \n \n\n\n \n Asadiabadi, S.; and Erzin, E.\n\n\n \n\n\n\n In Interspeech, pages 636–640, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{asadiabadi2017vocal,\n    author = "Asadiabadi, Sasan and Erzin, Engin",\n    title = "Vocal Tract Airway Tissue Boundary Tracking for rtMRI Using Shape and Appearance Priors.",\n    booktitle = "Interspeech",\n    pages = "636--640",\n    year = "2017",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Cross-Subject Continuous Emotion Recognition Using Speech and Body Motion in Dyadic Interactions.\n \n \n \n\n\n \n Fatima, S. N.; and Erzin, E.\n\n\n \n\n\n\n In INTERSPEECH, pages 1731–1735, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{fatima2017cross,\n    author = "Fatima, Syeda Narjis and Erzin, Engin",\n    title = "Cross-Subject Continuous Emotion Recognition Using Speech and Body Motion in Dyadic Interactions.",\n    booktitle = "INTERSPEECH",\n    pages = "1731--1735",\n    year = "2017",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Speech features for telemonitoring of Parkinson's disease symptoms.\n \n \n \n\n\n \n Ramezani, H.; Khaki, H.; Erzin, E.; and Akan, O. B\n\n\n \n\n\n\n In 2017 39th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC), pages 3801–3805, 2017. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{ramezani2017speech,\n    author = "Ramezani, Hamideh and Khaki, Hossein and Erzin, Engin and Akan, Ozgur B",\n    title = "Speech features for telemonitoring of Parkinson's disease symptoms",\n    booktitle = "2017 39th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)",\n    pages = "3801--3805",\n    year = "2017",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Audio-facial laughter detection in naturalistic dyadic conversations.\n \n \n \n\n\n \n Turker, B. B.; Yemez, Y.; Sezgin, T M.; and Erzin, E.\n\n\n \n\n\n\n IEEE Transactions on Affective Computing, 8(4): 534–545. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{turker2017audio,\n    author = "Turker, Bekir Berker and Yemez, Yucel and Sezgin, T Metin and Erzin, Engin",\n    title = "Audio-facial laughter detection in naturalistic dyadic conversations",\n    journal = "IEEE Transactions on Affective Computing",\n    volume = "8",\n    number = "4",\n    pages = "534--545",\n    year = "2017",\n    publisher = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Empirical mode decomposition of throat microphone recordings for intake classification.\n \n \n \n\n\n \n Turan, M. T.; and Erzin, E.\n\n\n \n\n\n\n In Proceedings of the 2nd International Workshop on Multimedia for Personal Health and Health Care, pages 45–52, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{turan2017empirical,\n    author = "Turan, MA Tu{\\u{g}}tekin and Erzin, Engin",\n    title = "Empirical mode decomposition of throat microphone recordings for intake classification",\n    booktitle = "Proceedings of the 2nd International Workshop on Multimedia for Personal Health and Health Care",\n    pages = "45--52",\n    year = "2017",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (16)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Managed video services over multi-domain software defined networks.\n \n \n \n\n\n \n Bagci, K.; and Tekalp, A.\n\n\n \n\n\n\n 2016 24th European Signal Processing Conference (EUSIPCO),120-124. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Bagci2016ManagedVS,\n    author = "Bagci, K. and Tekalp, A.",\n    title = "Managed video services over multi-domain software defined networks",\n    journal = "2016 24th European Signal Processing Conference (EUSIPCO)",\n    year = "2016",\n    pages = "120-124",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Dynamic end-to-end service-level negotiation over multi-domain software defined networks.\n \n \n \n\n\n \n Bagci, K.; Yilmaz, S.; Sahin, K. E.; and Tekalp, A.\n\n\n \n\n\n\n 2016 IEEE Sixth International Conference on Communications and Electronics (ICCE),33-39. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Bagci2016DynamicES,\n    author = "Bagci, K. and Yilmaz, S. and Sahin, Kemal E. and Tekalp, A.",\n    title = "Dynamic end-to-end service-level negotiation over multi-domain software defined networks",\n    journal = "2016 IEEE Sixth International Conference on Communications and Electronics (ICCE)",\n    year = "2016",\n    pages = "33-39",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Dynamic management of control plane performance in software-defined networks.\n \n \n \n\n\n \n Gorkemli, B.; Parlakisik, M.; Civanlar, S.; Ulas, A.; and Tekalp, A.\n\n\n \n\n\n\n 2016 IEEE NetSoft Conference and Workshops (NetSoft),68-72. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Gorkemli2016DynamicMO,\n    author = "Gorkemli, B. and Parlakisik, Murat and Civanlar, S. and Ulas, Aydin and Tekalp, A.",\n    title = "Dynamic management of control plane performance in software-defined networks",\n    journal = "2016 IEEE NetSoft Conference and Workshops (NetSoft)",\n    year = "2016",\n    pages = "68-72",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n End-to-end service-level management framework over multi-domain software defined networks.\n \n \n \n\n\n \n Bagci, K.; Yilmaz, S.; Sahin, K. E.; and Tekalp, A.\n\n\n \n\n\n\n 2016 24th Signal Processing and Communication Application Conference (SIU),121-124. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Bagci2016EndtoendSM,\n    author = "Bagci, K. and Yilmaz, S. and Sahin, Kemal E. and Tekalp, A.",\n    title = "End-to-end service-level management framework over multi-domain software defined networks",\n    journal = "2016 24th Signal Processing and Communication Application Conference (SIU)",\n    year = "2016",\n    pages = "121-124",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Queue-allocation optimization for adaptive video streaming over software defined networks with multiple service-levels.\n \n \n \n\n\n \n Bagci, K.; Sahin, K. E.; and Tekalp, A.\n\n\n \n\n\n\n 2016 IEEE International Conference on Image Processing (ICIP),1519-1523. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Bagci2016QueueallocationOF,\n    author = "Bagci, K. and Sahin, Kemal E. and Tekalp, A.",\n    title = "Queue-allocation optimization for adaptive video streaming over software defined networks with multiple service-levels",\n    journal = "2016 IEEE International Conference on Image Processing (ICIP)",\n    year = "2016",\n    pages = "1519-1523",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Use of Agreement/Disagreement Classification in Dyadic Interactions for Continuous Emotion Recognition.\n \n \n \n\n\n \n Khaki, H.; and Erzin, E.\n\n\n \n\n\n\n In INTERSPEECH, 2016. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Khaki2016UseOA,\n    author = "Khaki, H. and Erzin, E.",\n    title = "Use of Agreement/Disagreement Classification in Dyadic Interactions for Continuous Emotion Recognition",\n    booktitle = "INTERSPEECH",\n    year = "2016",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Source and Filter Estimation for Throat-Microphone Speech Enhancement.\n \n \n \n\n\n \n Turan, M.; and Erzin, E.\n\n\n \n\n\n\n IEEE/ACM Transactions on Audio, Speech, and Language Processing, 24: 265-275. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Turan2016SourceAF,\n    author = "Turan, M. and Erzin, E.",\n    title = "Source and Filter Estimation for Throat-Microphone Speech Enhancement",\n    journal = "IEEE/ACM Transactions on Audio, Speech, and Language Processing",\n    year = "2016",\n    volume = "24",\n    pages = "265-275",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Agreement and disagreement classification of dyadic interactions using vocal and gestural cues.\n \n \n \n\n\n \n Khaki, H.; Bozkurt, E.; and Erzin, E.\n\n\n \n\n\n\n 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),2762-2766. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Khaki2016AgreementAD,\n    author = "Khaki, H. and Bozkurt, E. and Erzin, E.",\n    title = "Agreement and disagreement classification of dyadic interactions using vocal and gestural cues",\n    journal = "2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",\n    year = "2016",\n    pages = "2762-2766",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A subjective listening test of six different artificial bandwidth extension approaches in English, Chinese, German, and Korean.\n \n \n \n\n\n \n Abel, J.; Kaniewska, M.; Guillaume, C.; Tirry, W.; Pulakka, H.; Myllylä, V.; Sjoberg, J.; Alku, P.; Katsir, I.; Malah, D.; Cohen, I.; Turan, M.; Erzin, E.; Schlien, T.; Vary, P.; Nour-Eldin, A. H.; Kabal, P.; and Fingscheidt, T.\n\n\n \n\n\n\n 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),5915-5919. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Abel2016ASL,\n    author = {Abel, J. and Kaniewska, M. and Guillaume, C. and Tirry, Wouter and Pulakka, Hannu and Myllyl{\\"a}, V. and Sjoberg, Jari and Alku, P. and Katsir, Itai and Malah, D. and Cohen, I. and Turan, M. and Erzin, E. and Schlien, Thomas and Vary, P. and Nour-Eldin, Amr H. and Kabal, P. and Fingscheidt, T.},\n    title = "A subjective listening test of six different artificial bandwidth extension approaches in English, Chinese, German, and Korean",\n    journal = "2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",\n    year = "2016",\n    pages = "5915-5919",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A subjective listening test of six different artificial bandwidth extension approaches in English, Chinese, German, and Korean.\n \n \n \n\n\n \n Abel, J.; Kaniewska, M.; Guillaume, C.; Tirry, W.; Pulakka, H.; Myllylä, V.; Sjöberg, J.; Alku, P.; Katsir, I.; Malah, D.; and others\n\n\n \n\n\n\n In 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5915–5919, 2016. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{abel2016subjective,\n    author = {Abel, Johannes and Kaniewska, Magdalena and Guillaume, Cyril and Tirry, Wouter and Pulakka, Hannu and Myllyl{\\"a}, Ville and Sj{\\"o}berg, Jari and Alku, Paavo and Katsir, Itai and Malah, David and others},\n    title = "A subjective listening test of six different artificial bandwidth extension approaches in English, Chinese, German, and Korean",\n    booktitle = "2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",\n    pages = "5915--5919",\n    year = "2016",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Agreement and disagreement classification of dyadic interactions using vocal and gestural cues.\n \n \n \n\n\n \n Khaki, H.; Bozkurt, E.; and Erzin, E.\n\n\n \n\n\n\n In 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 2762–2766, 2016. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{khaki2016agreement,\n    author = "Khaki, Hossein and Bozkurt, Elif and Erzin, Engin",\n    title = "Agreement and disagreement classification of dyadic interactions using vocal and gestural cues",\n    booktitle = "2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",\n    pages = "2762--2766",\n    year = "2016",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Real-time speech driven gesture animation.\n \n \n \n\n\n \n Kasarcı, K.; Bozkurt, E.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n In 2016 24th Signal Processing and Communication Application Conference (SIU), pages 1917–1920, 2016. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{kasarci2016real,\n    author = {Kasarc{\\i}, Kenan and Bozkurt, Elif and Yemez, Y{\\"u}cel and Erzin, Engin},\n    title = "Real-time speech driven gesture animation",\n    booktitle = "2016 24th Signal Processing and Communication Application Conference (SIU)",\n    pages = "1917--1920",\n    year = "2016",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Analysis of jestkod database using affective state annotations.\n \n \n \n\n\n \n Keçeci, S.; Erzin, E.; and Yemez, Y.\n\n\n \n\n\n\n In 2016 24th Signal Processing and Communication Application Conference (SIU), pages 1033–1036, 2016. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{kecceci2016analysis,\n    author = {Ke{\\c{c}}eci, Sinan and Erzin, Engin and Yemez, Y{\\"u}cel},\n    title = "Analysis of jestkod database using affective state annotations",\n    booktitle = "2016 24th Signal Processing and Communication Application Conference (SIU)",\n    pages = "1033--1036",\n    year = "2016",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Food intake classification using throat microphone.\n \n \n \n\n\n \n Turan, M. T.; and Erzin, E.\n\n\n \n\n\n\n In 2016 24th Signal Processing and Communication Application Conference (SIU), pages 1873–1876, 2016. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{turan2016food,\n    author = "Turan, MA Tu{\\u{g}}tekin and Erzin, Engin",\n    title = "Food intake classification using throat microphone",\n    booktitle = "2016 24th Signal Processing and Communication Application Conference (SIU)",\n    pages = "1873--1876",\n    year = "2016",\n    organization = "IEEE",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Use of Agreement/Disagreement Classification in Dyadic Interactions for Continuous Emotion Recognition.\n \n \n \n\n\n \n Khaki, H.; and Erzin, E.\n\n\n \n\n\n\n In INTERSPEECH, pages 605–609, 2016. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{khaki2016use,\n    author = "Khaki, Hossein and Erzin, Engin",\n    title = "Use of Agreement/Disagreement Classification in Dyadic Interactions for Continuous Emotion Recognition.",\n    booktitle = "INTERSPEECH",\n    pages = "605--609",\n    year = "2016",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multimodal analysis of speech and arm motion for prosody-driven synthesis of beat gestures.\n \n \n \n\n\n \n Bozkurt, E.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n Speech Communication, 85: 29–42. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{bozkurt2016multimodal,\n    author = {Bozkurt, Elif and Yemez, Y{\\"u}cel and Erzin, Engin},\n    title = "Multimodal analysis of speech and arm motion for prosody-driven synthesis of beat gestures",\n    journal = "Speech Communication",\n    volume = "85",\n    pages = "29--42",\n    year = "2016",\n    publisher = "North-Holland",\n    keywords = "MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);