var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=http%3A%2F%2Fhome.ku.edu.tr%2F%7Eeerzin%2Fpubs%2FErzin.bib&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=http%3A%2F%2Fhome.ku.edu.tr%2F%7Eeerzin%2Fpubs%2FErzin.bib&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=http%3A%2F%2Fhome.ku.edu.tr%2F%7Eeerzin%2Fpubs%2FErzin.bib&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2015\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Cok-Kipli Modelleme ile Duygusal Patlama Sezimi.\n \n \n \n\n\n \n Turker, B B.; Marzban, S.; Sezgin, M T.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Turker2015,\nauthor = {Turker, B Berker and Marzban, Shabbir and Sezgin, M Tevfik and Yemez, Yucel and Erzin, Engin},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2015/Turker et al/2015 - Turker et al. - Cok-Kipli Modelleme ile Duygusal Patlama Sezimi.pdf:pdf},\nisbn = {9781479948741},\nlanguage = {Turkish},\ntitle = {{Cok-Kipli Modelleme ile Duygusal Patlama Sezimi}},\nyear = {2015}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n JESTKOD Veritabani:Ikili Iletisim Analizi.\n \n \n \n\n\n \n Bozkurt, E.; Khaki, H.; Kececi, S.; Turker, B B.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2015a,\nauthor = {Bozkurt, Elif and Khaki, Hossein and Kececi, Sinan and Turker, B Berker and Yemez, Yucel and Erzin, Engin},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2015/Bozkurt et al/2015 - Bozkurt et al. - JESTKOD VeritabaniIkili Iletisim Analizi.pdf:pdf},\nisbn = {9781479948741},\nkeywords = {affective state tracking,gesticulation,human-computer interaction,speech,virtual character animation},\nlanguage = {Turkish},\ntitle = {{JESTKOD Veritabani:Ikili Iletisim Analizi}},\nyear = {2015}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Synchronous Overlap and Add of Spectra for Enhancement of Excitation in Artificial Bandwidth Extension of Speech.\n \n \n \n\n\n \n Turan, M. A. T.; and Erzin, E.\n\n\n \n\n\n\n In INTERSPEECH: Annual Conference of the International Speech Communication Association, Dresden, Germany, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Turan2015b,\naddress = {Dresden, Germany},\nauthor = {Turan, M. A. Tugtekin and Erzin, Engin},\nbooktitle = {INTERSPEECH: Annual Conference of the International Speech Communication Association},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2015/Turan, Erzin/2015 - Turan, Erzin - Synchronous Overlap and Add of Spectra for Enhancement of Excitation in Artificial Bandwidth Extension of Speech.pdf:pdf},\ntitle = {{Synchronous Overlap and Add of Spectra for Enhancement of Excitation in Artificial Bandwidth Extension of Speech}},\nyear = {2015}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Continuous Emotion Tracking using Total Variability Space.\n \n \n \n\n\n \n Khaki, H.; and Erzin, E.\n\n\n \n\n\n\n In INTERSPEECH: Annual Conference of the International Speech Communication Association, Dresden, Germany, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Khaki2015,\naddress = {Dresden, Germany},\nauthor = {Khaki, Hossein and Erzin, Engin},\nbooktitle = {INTERSPEECH: Annual Conference of the International Speech Communication Association},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2015/Khaki, Erzin/2015 - Khaki, Erzin - Continuous Emotion Tracking using Total Variability Space.pdf:pdf},\ntitle = {{Continuous Emotion Tracking using Total Variability Space}},\nyear = {2015}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Konusma Kaynagi icin Yapay Bant Genisletme.\n \n \n \n\n\n \n Turan, M. A. T.; and Erzin, E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Turan2015a,\nauthor = {Turan, M. A. Tugtekin and Erzin, Engin},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2015/Turan, Erzin/2015 - Turan, Erzin - Konusma Kaynagi icin Yapay Bant Genisletme.pdf:pdf},\nisbn = {9781479948741},\nkeywords = {artificial bandwidth extension,hidden markov model,speech enhancement},\nlanguage = {Turkish},\ntitle = {{Konusma Kaynagi icin Yapay Bant Genisletme}},\nyear = {2015}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Affect-Expressive Hand Gestures Synthesis and Animation.\n \n \n \n\n\n \n Bozkurt, E.; Erzin, E.; and Yemez, Y.\n\n\n \n\n\n\n In IEEE International Conference on Multimedia and Expo (ICME), Torino, Italy, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2015,\naddress = {Torino, Italy},\nauthor = {Bozkurt, Elif and Erzin, Engin and Yemez, Yucel},\nbooktitle = {IEEE International Conference on Multimedia and Expo (ICME)},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2015/Bozkurt, Erzin, Yemez/2015 - Bozkurt, Erzin, Yemez - Affect-Expressive Hand Gestures Synthesis and Animation.pdf:pdf},\ntitle = {{Affect-Expressive Hand Gestures Synthesis and Animation}},\nyear = {2015}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Fonetik SIniflandirma ile Girtlak Mikrofonu Iyilestirme.\n \n \n \n\n\n \n Turan, M. A. T.; and Erzin, E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, 2014. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Turan2014,\nauthor = {Turan, M. A. Tugtekin and Erzin, Engin},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2014/Turan, Erzin/2014 - Turan, Erzin - Fonetik SIniflandirma ile Girtlak Mikrofonu Iyilestirme.pdf:pdf},\ntitle = {{Fonetik SIniflandirma ile Girtlak Mikrofonu Iyilestirme}},\nyear = {2014}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Analysis of interaction attitudes using data-driven hand gesture phrases.\n \n \n \n\n\n \n Yang, Z.; Metallinou, A.; Erzin, E.; and Narayanan, S. S\n\n\n \n\n\n\n In Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE International Conference on, pages 699–703, 2014. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Yang2014,\nauthor = {Yang, Zhaojun and Metallinou, Angeliki and Erzin, Engin and Narayanan, Shrikanth S},\nbooktitle = {Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE International Conference on},\npages = {699--703},\npublisher = {IEEE},\ntitle = {{Analysis of interaction attitudes using data-driven hand gesture phrases}},\nyear = {2014}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Cok-Kipli Ipuclari Kullanarak Duygusal Patlama Tanima.\n \n \n \n\n\n \n Turker, B. B.; Marzban, S.; Erzin, E.; Yemez, Y.; and Sezgin, T. M.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 1608–1611, 2014. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Turker2014,\nauthor = {Turker, Bekir Berker and Marzban, Shabbir and Erzin, Engin and Yemez, Yucel and Sezgin, Tevfik Metin},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2014/Turker et al/2014 - Turker et al. - Cok-Kipli Ipuclari Kullanarak Duygusal Patlama Tanima.pdf:pdf},\npages = {1608--1611},\ntitle = {{Cok-Kipli Ipuclari Kullanarak Duygusal Patlama Tanima}},\nyear = {2014}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n A New Statistical Excitation Mapping for Enhancement of Throat Microphone Recordings.\n \n \n \n\n\n \n Turan, M. A. T.; and Erzin, E.\n\n\n \n\n\n\n In INTERSPEECH: Annual Conference of the International Speech Communication Association, Lyon, 2013. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Turan2013a,\naddress = {Lyon},\nauthor = {Turan, M. A. Tugtekin and Erzin, Engin},\nbooktitle = {INTERSPEECH: Annual Conference of the International Speech Communication Association},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2013/Turan, Erzin/2013 - Turan, Erzin - A New Statistical Excitation Mapping for Enhancement of Throat Microphone Recordings.pdf:pdf},\ntitle = {{A New Statistical Excitation Mapping for Enhancement of Throat Microphone Recordings}},\nyear = {2013}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Konusma Ritmi Surumlu Jest Animasyonu.\n \n \n \n\n\n \n Bozkurt, E.; Erzin, E.; and Yemez, Y.\n\n\n \n\n\n\n In SIU: Sinyal İşleme ve İletişim Uygulamaları Kurultayı, Girne, 2013. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2013a,\naddress = {Girne},\nauthor = {Bozkurt, Elif and Erzin, Engin and Yemez, Yucel},\nbooktitle = {SIU: Sinyal İşleme ve İletişim Uygulamaları Kurultayı},\nlanguage = {Turkish},\ntitle = {{Konusma Ritmi Surumlu Jest Animasyonu}},\nyear = {2013}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Artificial bandwidth extension of spectral envelope along a Viterbi path.\n \n \n \n\n\n \n Yagli, C.; Turan, M. A. T.; and Erzin, E.\n\n\n \n\n\n\n Speech Communication, 55(1): 111–118. jan 2013.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Yagli2012,\nauthor = {Yagli, Can and Turan, M. A. Tugtekin and Erzin, Engin},\ndoi = {10.1016/j.specom.2012.07.003},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2013/Yagli, Turan, Erzin/2013 - Yagli, Turan, Erzin - Artificial bandwidth extension of spectral envelope along a Viterbi path.pdf:pdf},\nissn = {01676393},\njournal = {Speech Communication},\nkeywords = {artificial bandwidth extension,joint temporal analysis,line spectral frequency,source-filter separation},\nmonth = {jan},\nnumber = {1},\npages = {111--118},\ntitle = {{Artificial bandwidth extension of spectral envelope along a Viterbi path}},\nvolume = {55},\nyear = {2013}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Enhancement of Throat Microphone Recordings by Learning Phone-Dependent Mappings of Speech Spectra.\n \n \n \n\n\n \n Turan, M. A. T.; and Erzin, E.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech and Signal Processing, Vancouver, 2013. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Turan2013,\naddress = {Vancouver},\nauthor = {Turan, M. A. Tugtekin and Erzin, Engin},\nbooktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2013/Turan, Erzin/2013 - Turan, Erzin - Enhancement of Throat Microphone Recordings by Learning Phone-Dependent Mappings of Speech Spectra.pdf:pdf},\ntitle = {{Enhancement of Throat Microphone Recordings by Learning Phone-Dependent Mappings of Speech Spectra}},\nyear = {2013}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multimodal Analysis of Speech Prosody and Upper Body Gestures using Hidden Semi-Markov Models.\n \n \n \n\n\n \n Bozkurt, E.; Asta, S.; Ozkul, S.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech and Signal Processing, Vancouver, 2013. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2013,\naddress = {Vancouver},\nauthor = {Bozkurt, Elif and Asta, Shahriar and Ozkul, Serkan and Yemez, Yucel and Erzin, Engin},\nbooktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2013/Bozkurt et al/2013 - Bozkurt et al. - Multimodal Analysis of Speech Prosody and Upper Body Gestures using Hidden Semi-Markov Models.pdf:pdf},\ntitle = {{Multimodal Analysis of Speech Prosody and Upper Body Gestures using Hidden Semi-Markov Models}},\nyear = {2013}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Konusmadan Duygu Tanima Uzerine Degerlendirmeler.\n \n \n \n\n\n \n Bozkurt, E.; and Erzin, E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, Fethiye, 2012. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2012a,\naddress = {Fethiye},\nauthor = {Bozkurt, Elif and Erzin, Engin},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2012/Bozkurt, Erzin/2012 - Bozkurt, Erzin - Konusmadan Duygu Tanima Uzerine Degerlendirmeler.pdf:pdf},\nlanguage = {Turkish},\ntitle = {{Konusmadan Duygu Tanima Uzerine Degerlendirmeler}},\nyear = {2012}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multimodal analysis of upper-body gestures, facial expressions and speech.\n \n \n \n\n\n \n Ozkul, S.; Bozkurt, E.; Asta, S.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n In Proc. of 4th International Workshop on Corpora for Research on Emotion Sentiment and Social Signals (ES3), 2012. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Ozkul2012,\nauthor = {Ozkul, S. and Bozkurt, Elif and Asta, S. and Yemez, Yucel and Erzin, Engin},\nbooktitle = {Proc. of 4th International Workshop on Corpora for Research on Emotion Sentiment and Social Signals (ES3)},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2012/Ozkul et al/2012 - Ozkul et al. - Multimodal analysis of upper-body gestures, facial expressions and speech.docx:docx},\ntitle = {{Multimodal analysis of upper-body gestures, facial expressions and speech}},\nyear = {2012}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learn2Dance: Learning Statistical Music-to-Dance Mappings for Choreography Synthesis.\n \n \n \n \n\n\n \n Ofli, F.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n IEEE Transactions on Multimedia, 14(3): 747–759. jun 2012.\n \n\n\n\n
\n\n\n\n \n \n \"Learn2Dance:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Ofli2012,\nauthor = {Ofli, Ferda and Erzin, Engin and Yemez, Y{\\"{u}}cel and Tekalp, A. Murat},\ndoi = {10.1109/TMM.2011.2181492},\nissn = {1520-9210},\njournal = {IEEE Transactions on Multimedia},\nmonth = {jun},\nnumber = {3},\npages = {747--759},\ntitle = {{Learn2Dance: Learning Statistical Music-to-Dance Mappings for Choreography Synthesis}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6112231},\nvolume = {14},\nyear = {2012}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Cok Kipli Dans Koreografi Modeli.\n \n \n \n\n\n \n Ofli, F.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, 2011. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Ofli2011,\nauthor = {Ofli, Ferda and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2011/Ofli et al/2011 - Ofli et al. - Cok Kipli Dans Koreografi Modeli.pdf:pdf},\nlanguage = {Turkish},\ntitle = {{Cok Kipli Dans Koreografi Modeli}},\nyear = {2011}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Artificial Bandwidth Extension of Spectral Envelope with Temporal Clustering.\n \n \n \n\n\n \n Yagli, C.; and Erzin, E.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech, and Signal Processing, pages 5096–5099, Prague, 2011. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Yagli2011,\naddress = {Prague},\nauthor = {Yagli, Can and Erzin, Engin},\nbooktitle = {IEEE International Conference on Acoustics, Speech, and Signal Processing},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2011/Yagli, Erzin/2011 - Yagli, Erzin - Artificial Bandwidth Extension of Spectral Envelope with Temporal Clustering.pdf:pdf},\nisbn = {978-1-4577-0539-7},\nissn = {1520-6149},\npages = {5096--5099},\ntitle = {{Artificial Bandwidth Extension of Spectral Envelope with Temporal Clustering}},\nyear = {2011}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n RANSAC-Based Training Data Selection on Spectral Features for Emotion Recognition from Spontaneous Speech.\n \n \n \n\n\n \n Bozkurt, E.; Erzin, E.; Erdem, C. E.; and Erdem, A. T.\n\n\n \n\n\n\n In Esposito, A; Vinciarelli, A; Vicsi, K; Pelachaud, C; and Nijholt, A, editor(s), Analysis of Verbal and Nonverbal Communication and Enactment: The Processing Issues, volume 6800, of Lecture Notes in Computer Science, pages 36–47, 2011. European COST Action 2102\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2011a,\nauthor = {Bozkurt, Elif and Erzin, Engin and Erdem, Cigdem Eroglu and Erdem, A. Tanju},\nbooktitle = {Analysis of Verbal and Nonverbal Communication and Enactment: The Processing Issues},\neditor = {Esposito, A and Vinciarelli, A and Vicsi, K and Pelachaud, C and Nijholt, A},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2011/Bozkurt et al/2011 - Bozkurt et al. - RANSAC-Based Training Data Selection on Spectral Features for Emotion Recognition from Spontaneous Speech.pdf:pdf},\nisbn = {978-3-642-25774-2},\nissn = {0302-9743},\norganization = {European COST Action 2102},\npages = {36--47},\nseries = {Lecture Notes in Computer Science},\ntitle = {{RANSAC-Based Training Data Selection on Spectral Features for Emotion Recognition from Spontaneous Speech}},\nvolume = {6800},\nyear = {2011}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Formant position based weighted spectral features for emotion recognition.\n \n \n \n\n\n \n Bozkurt, E.; Erzin, E.; Erdem, C. E.; and Erdem, A. T.\n\n\n \n\n\n\n Speech Communication, 53(9-10, SI): 1186–1197. 2011.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Bozkurt2011,\nauthor = {Bozkurt, Elif and Erzin, Engin and Erdem, Cigdem Eroglu and Erdem, A. Tanju},\ndoi = {10.1016/j.specom.2011.04.003},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2011/Bozkurt et al/2011 - Bozkurt et al. - Formant position based weighted spectral features for emotion recognition.pdf:pdf},\nissn = {0167-6393},\njournal = {Speech Communication},\nnumber = {9-10, SI},\npages = {1186--1197},\ntitle = {{Formant position based weighted spectral features for emotion recognition}},\nvolume = {53},\nyear = {2011}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n RANSAC-based Training Data Selection for Speaker State Recognition.\n \n \n \n\n\n \n Bozkurt, E.; Erzin, E.; Erdem, C. E.; and Erdem, A. T.\n\n\n \n\n\n\n In INTERSPEECH: Annual Conference of the International Speech Communication Association, Florence, 2011. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2011b,\naddress = {Florence},\nauthor = {Bozkurt, Elif and Erzin, Engin and Erdem, Cigdem Eroglu and Erdem, A. Tanju},\nbooktitle = {INTERSPEECH: Annual Conference of the International Speech Communication Association},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2011/Bozkurt et al/2011 - Bozkurt et al. - RANSAC-based Training Data Selection for Speaker State Recognition.pdf:pdf},\ntitle = {{RANSAC-based Training Data Selection for Speaker State Recognition}},\nyear = {2011}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Use of Line Spectral Frequencies for Emotion Recognition from Speech.\n \n \n \n \n\n\n \n Bozkurt, E.; Erzin, E.; Erdem, C. E.; and Erdem, a. T.\n\n\n \n\n\n\n In International Conference on Pattern Recognition, pages 3708–3711, aug 2010. Ieee\n \n\n\n\n
\n\n\n\n \n \n \"UsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2010a,\nauthor = {Bozkurt, Elif and Erzin, Engin and Erdem, Cigdem Eroglu and Erdem, a. Tanju},\nbooktitle = {International Conference on Pattern Recognition},\ndoi = {10.1109/ICPR.2010.903},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2010/Bozkurt et al/2010 - Bozkurt et al. - Use of Line Spectral Frequencies for Emotion Recognition from Speech.pdf:pdf},\nisbn = {978-1-4244-7542-1},\nmonth = {aug},\npages = {3708--3711},\npublisher = {Ieee},\ntitle = {{Use of Line Spectral Frequencies for Emotion Recognition from Speech}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5597892},\nyear = {2010}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n INTERSPEECH 2009 Duygu Tanima Yarismasi Degerlendirmesi.\n \n \n \n\n\n \n Bozkurt, E.; Erzin, E.; Erdem, C. E.; and Erdem, A. T.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 216–219, Diyarbakır, 2010. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2010,\naddress = {Diyarbakır},\nauthor = {Bozkurt, Elif and Erzin, Engin and Erdem, Cigdem Eroglu and Erdem, A. Tanju},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2010/Bozkurt et al/2010 - Bozkurt et al. - INTERSPEECH 2009 Duygu Tanima Yarismasi Degerlendirmesi.pdf:pdf},\nlanguage = {Turkish},\npages = {216--219},\ntitle = {{INTERSPEECH 2009 Duygu Tanima Yarismasi Degerlendirmesi}},\nyear = {2010}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n RANSAC-based Training Data Selection for Emotion Recognition from Spontenous Speech.\n \n \n \n\n\n \n Erdem, C. E.; Bozkurt, E.; Erzin, E.; and Erdem, A. T.\n\n\n \n\n\n\n In AFFINE, Frienze, 2010. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Erdem2010,\naddress = {Frienze},\nauthor = {Erdem, Cigdem Eroglu and Bozkurt, Elif and Erzin, Engin and Erdem, A. Tanju},\nbooktitle = {AFFINE},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2010/Erdem et al/2010 - Erdem et al. - RANSAC-based Training Data Selection for Emotion Recognition from Spontenous Speech.pdf:pdf},\ntitle = {{RANSAC-based Training Data Selection for Emotion Recognition from Spontenous Speech}},\nyear = {2010}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multi-modal analysis of dance performances for music-driven choreography synthesis.\n \n \n \n\n\n \n Ofli, F.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech and Signal Processing, pages 2466–2469, Dallas, 2010. IEEE Signal Proc Soc\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Ofli2010,\naddress = {Dallas},\nauthor = {Ofli, Ferda and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing},\ndoi = {10.1109/ICASSP.2010.5494891},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2010/Ofli et al/2010 - Ofli et al. - Multi-modal analysis of dance performances for music-driven choreography synthesis.pdf:pdf},\nisbn = {978-1-4244-4296-6},\nissn = {1520-6149},\norganization = {IEEE Signal Proc Soc},\npages = {2466--2469},\ntitle = {{Multi-modal analysis of dance performances for music-driven choreography synthesis}},\nyear = {2010}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Estimation of Acoustic Microphone Vocal Track Parameters from Throat Microphone Recordings.\n \n \n \n\n\n \n Akargun, U.; and Erzin, E.\n\n\n \n\n\n\n In Takeda, K.; Erdogan, H.; Hansen, J.; and Abut, H., editor(s), In-Vehicle Corpus and Signal Processing for Driver Behavior. Springer-Verlag, 2009.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Akargun2009,\nauthor = {Akargun, U.C. and Erzin, Engin},\nbooktitle = {In-Vehicle Corpus and Signal Processing for Driver Behavior},\neditor = {Takeda, K. and Erdogan, H. and Hansen, J.H.L. and Abut, H.},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2009/Akargun, Erzin/2009 - Akargun, Erzin - Estimation of Acoustic Microphone Vocal Track Parameters from Throat Microphone Recordings.pdf:pdf},\npublisher = {Springer-Verlag},\ntitle = {{Estimation of Acoustic Microphone Vocal Track Parameters from Throat Microphone Recordings}},\nyear = {2009}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Driving Status Identification Under Different Distraction Conditions from Driving Behaviour Signals.\n \n \n \n\n\n \n Ozturk, E.; and Erzin, E.\n\n\n \n\n\n\n In 4th Biennial Workshop on DSP for In-Vehicle Systems and Safety, Dallas, jun 2009. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Ozturk2009,\naddress = {Dallas},\nauthor = {Ozturk, E. and Erzin, Engin},\nbooktitle = {4th Biennial Workshop on DSP for In-Vehicle Systems and Safety},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2009/Ozturk, Erzin/2009 - Ozturk, Erzin - Driving Status Identification Under Different Distraction Conditions from Driving Behaviour Signals.pdf:pdf},\nmonth = {jun},\ntitle = {{Driving Status Identification Under Different Distraction Conditions from Driving Behaviour Signals}},\nyear = {2009}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Sample Statistics from Collaborative Naturalistic Vehicular Corpus.\n \n \n \n\n\n \n Abut, H.; Erdogan, H.; Erzin, E.; and Cokelek, E.\n\n\n \n\n\n\n In 4th Biennial Workshop on DSP for In-Vehicle Systems and Safety, Dallas, jun 2009. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Abut2009,\naddress = {Dallas},\nauthor = {Abut, H. and Erdogan, Hakan and Erzin, Engin and Cokelek, E.},\nbooktitle = {4th Biennial Workshop on DSP for In-Vehicle Systems and Safety},\nmonth = {jun},\ntitle = {{Sample Statistics from Collaborative Naturalistic Vehicular Corpus}},\nyear = {2009}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Yuz ifadesi Canlandirma icin Konusma Sinyalinden Otomatik Duygu Tanima.\n \n \n \n\n\n \n Bozkurt, E.; Erzin, E.; Erdem, C. E.; and Erdem, A. T.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 989–992, Side, 2009. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2009a,\naddress = {Side},\nauthor = {Bozkurt, Elif and Erzin, Engin and Erdem, Cigdem Eroglu and Erdem, A. Tanju},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2009/Bozkurt et al/2009 - Bozkurt et al. - Yuz ifadesi Canlandirma icin Konusma Sinyalinden Otomatik Duygu Tanima.pdf:pdf},\nlanguage = {Turkish},\npages = {989--992},\ntitle = {{Yuz ifadesi Canlandirma icin Konusma Sinyalinden Otomatik Duygu Tanima}},\nyear = {2009}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Improving Automatic Emotion Recognition from Speech Signals.\n \n \n \n\n\n \n Bozkurt, E.; Erzin, E.; Erdem, C. E.; and Erdem, A. T.\n\n\n \n\n\n\n In INTERSPEECH: Annual Conference of the International Speech Communication Association, pages 989–992, 2009. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2009,\nauthor = {Bozkurt, Elif and Erzin, Engin and Erdem, Cigdem Eroglu and Erdem, A. Tanju},\nbooktitle = {INTERSPEECH: Annual Conference of the International Speech Communication Association},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2009/Bozkurt et al/2009 - Bozkurt et al. - Improving Automatic Emotion Recognition from Speech Signals.pdf:pdf},\nisbn = {978-1-4244-4435-9},\npages = {989--992},\ntitle = {{Improving Automatic Emotion Recognition from Speech Signals}},\nyear = {2009}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Improving Throat Microphone Speech Recognition by Joint Analysis of Throat and Acoustic Microphone Recordings.\n \n \n \n\n\n \n Erzin, E.\n\n\n \n\n\n\n IEEE Transactions on Audio, Speech, and Language Processing, 17(7): 1316–1324. sep 2009.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Erzin2009,\nauthor = {Erzin, Engin},\ndoi = {10.1109/TASL.2009.2016733},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2009/Erzin/2009 - Erzin - Improving Throat Microphone Speech Recognition by Joint Analysis of Throat and Acoustic Microphone Recordings.pdf:pdf},\nissn = {1558-7916},\njournal = {IEEE Transactions on Audio, Speech, and Language Processing},\nmonth = {sep},\nnumber = {7},\npages = {1316--1324},\ntitle = {{Improving Throat Microphone Speech Recognition by Joint Analysis of Throat and Acoustic Microphone Recordings}},\nvolume = {17},\nyear = {2009}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (9)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Speech-Driven Automatic Facial Expression Synthesis.\n \n \n \n\n\n \n Bozkurt, E.; Erdem, C. E.; Erzin, E.; Erdem, A. T.; Ozkan, M.; and Tekalp, A. M.\n\n\n \n\n\n\n In 3DTV-CONFERENCE: The True Vision-Capture, Transmission and Display of 3D Video, pages 273–276, 2008. European Union Informat Soc Technol\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2008,\nauthor = {Bozkurt, Elif and Erdem, Cigdem Eroglu and Erzin, Engin and Erdem, A. Tanju and Ozkan, Mehmet and Tekalp, A. Murat},\nbooktitle = {3DTV-CONFERENCE: The True Vision-Capture, Transmission and Display of 3D Video},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2008/Bozkurt et al/2008 - Bozkurt et al. - Speech-Driven Automatic Facial Expression Synthesis.pdf:pdf},\nisbn = {978-1-4244-1760-5},\norganization = {European Union Informat Soc Technol},\npages = {273--276},\ntitle = {{Speech-Driven Automatic Facial Expression Synthesis}},\nyear = {2008}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Unsupervised Dance Figure Analysis from Video for Dancing Avatar Animation.\n \n \n \n\n\n \n Ofli, F.; Erzin, E.; Yemez, Y.; Tekalp, A. M.; Erdem, C. E.; Erdem, A. T.; Abaci, T.; and Ozkan, M.\n\n\n \n\n\n\n In IEEE International Conference on Image Processing, pages 1484–1487, San Diego, CA, USA, 2008. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Ofli2008a,\nabstract = {This paper presents a framework for unsupervised video analysis in the context of dance performances, where gestures and 3D movements of a dancer are characterized by repetition of a set of unknown dance figures. The system is trained in an unsupervised manner using hidden Markov models (HMMs) to automatically segment multiview video recordings of a dancer into recurring elementary temporal body motion patterns to identify the dance figures. That is, a parallel HMM structure is employed to automatically determine the number and the temporal boundaries of different dance figures in a given dance video. The success of the analysis framework has been evaluated by visualizing these dance figures on a dancing avatar animated by the computed 3D analysis parameters. Experimental results demonstrate that the proposed framework enables synthetic agents and/or robots to learn dance figures from video automatically.},\naddress = {San Diego, CA, USA},\nauthor = {Ofli, Ferda and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat and Erdem, Cigdem Eroglu and Erdem, A. Tanju and Abaci, T. and Ozkan, Mehmet},\nbooktitle = {IEEE International Conference on Image Processing},\ndoi = {10.1109/ICIP.2008.4712047},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2008/Ofli et al/2008 - Ofli et al. - Unsupervised Dance Figure Analysis from Video for Dancing Avatar Animation.pdf:pdf},\nkeywords = {dance analysis,dance animation},\nmendeley-tags = {dance analysis,dance animation},\npages = {1484--1487},\ntitle = {{Unsupervised Dance Figure Analysis from Video for Dancing Avatar Animation}},\nyear = {2008}\n}\n
\n
\n\n\n
\n This paper presents a framework for unsupervised video analysis in the context of dance performances, where gestures and 3D movements of a dancer are characterized by repetition of a set of unknown dance figures. The system is trained in an unsupervised manner using hidden Markov models (HMMs) to automatically segment multiview video recordings of a dancer into recurring elementary temporal body motion patterns to identify the dance figures. That is, a parallel HMM structure is employed to automatically determine the number and the temporal boundaries of different dance figures in a given dance video. The success of the analysis framework has been evaluated by visualizing these dance figures on a dancing avatar animated by the computed 3D analysis parameters. Experimental results demonstrate that the proposed framework enables synthetic agents and/or robots to learn dance figures from video automatically.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An audio-driven dancing avatar.\n \n \n \n \n\n\n \n Ofli, F.; Demir, Y.; Yemez, Y.; Erzin, E.; Tekalp, A. M.; Balci, K.; Kızoglu, İ.; Akarun, L.; Canton-Ferrer, C.; Tilmanne, J.; Bozkurt, E.; and Erdem, A. T.\n\n\n \n\n\n\n Journal on Multimodal User Interfaces, 2(2): 93–103. sep 2008.\n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Ofli2008c,\nauthor = {Ofli, Ferda and Demir, Yasemin and Yemez, Yucel and Erzin, Engin and Tekalp, A. Murat and Balci, K. and Kızoglu, İdil and Akarun, Lale and Canton-Ferrer, Cristian and Tilmanne, Jo{\\"{e}}lle and Bozkurt, Elif and Erdem, A. Tanju},\ndoi = {10.1007/s12193-008-0009-x},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2008/Ofli et al/2008 - Ofli et al. - An audio-driven dancing avatar.pdf:pdf},\nissn = {1783-7677},\njournal = {Journal on Multimodal User Interfaces},\nmonth = {sep},\nnumber = {2},\npages = {93--103},\ntitle = {{An audio-driven dancing avatar}},\nurl = {http://www.springerlink.com/index/10.1007/s12193-008-0009-x},\nvolume = {2},\nyear = {2008}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multimodal Speaker Identification using Discriminative Lip Motion Features.\n \n \n \n\n\n \n Cetingul, H. E.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In Liew, A.; and Wang, S., editor(s), Visual Speech Recognition: Lip Segmentation and Mapping. IGI Global, 2008.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Cetingul2008,\nauthor = {Cetingul, H. Ertan and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {Visual Speech Recognition: Lip Segmentation and Mapping},\neditor = {Liew, A. and Wang, S.},\npublisher = {IGI Global},\ntitle = {{Multimodal Speaker Identification using Discriminative Lip Motion Features}},\nyear = {2008}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Dans figurlerinin isitsel-gorsel analizi icin isitsel ozniteliklerin degerlendirilmesi.\n \n \n \n\n\n \n Demir, Y.; Ofli, F.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, Didim, 2008. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Demir2008,\naddress = {Didim},\nauthor = {Demir, Yasemin and Ofli, Ferda and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2008/Demir et al/2008 - Demir et al. - Dans figurlerinin isitsel-gorsel analizi icin isitsel ozniteliklerin degerlendirilmesi.pdf:pdf},\nlanguage = {Turkish},\ntitle = {{Dans figurlerinin isitsel-gorsel analizi icin isitsel ozniteliklerin degerlendirilmesi}},\nyear = {2008}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Cok Bakisli Isitsel-Gorsel Dans Verilerinin Analizi ve Sentezi.\n \n \n \n\n\n \n Ofli, F.; Demir, Y.; Canton-Ferrer, C.; Tilmanne, J.; Balci, K.; Bozkurt, E.; Kizoglu, I.; Yemez, Y.; Erzin, E.; Tekalp, A. M.; Akarun, L.; and Erdem, A. T.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 761–764, Didim, 2008. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Ofli2008,\naddress = {Didim},\nauthor = {Ofli, Ferda and Demir, Yasemin and Canton-Ferrer, Cristian and Tilmanne, J. and Balci, K. and Bozkurt, Elif and Kizoglu, I. and Yemez, Yucel and Erzin, Engin and Tekalp, A. Murat and Akarun, Lale and Erdem, A. Tanju},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2008/Ofli et al/2008 - Ofli et al. - Cok Bakisli Isitsel-Gorsel Dans Verilerinin Analizi ve Sentezi.pdf:pdf},\nisbn = {978-1-4244-1998-2},\nlanguage = {Turkish},\npages = {761--764},\ntitle = {{Cok Bakisli Isitsel-Gorsel Dans Verilerinin Analizi ve Sentezi}},\nyear = {2008}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Evaluation of Audio Features for Audio-Visual Analysis of Dance Figures.\n \n \n \n\n\n \n Demir, Y.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In EUSIPCO: European Signal Processing Conference, Lausanne, Switzerland, 2008. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Demir2008a,\naddress = {Lausanne, Switzerland},\nauthor = {Demir, Yasemin and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {EUSIPCO: European Signal Processing Conference},\ntitle = {{Evaluation of Audio Features for Audio-Visual Analysis of Dance Figures}},\nyear = {2008}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Audio-driven human body motion analysis and synthesis.\n \n \n \n \n\n\n \n Ofli, F.; Canton-Ferrer, C.; Tilmanne, J.; Demir, Y.; Bozkurt, E.; Yemez, Y.; Erzin, E.; and Tekalp, A. M.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech and Signal Processing, pages 2233–2236, mar 2008. Ieee\n \n\n\n\n
\n\n\n\n \n \n \"Audio-drivenPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Ofli2008b,\nauthor = {Ofli, Ferda and Canton-Ferrer, Cristian and Tilmanne, J. and Demir, Yasemin and Bozkurt, Elif and Yemez, Yucel and Erzin, Engin and Tekalp, A. Murat},\nbooktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing},\ndoi = {10.1109/ICASSP.2008.4518089},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2008/Ofli et al/2008 - Ofli et al. - Audio-driven human body motion analysis and synthesis.pdf:pdf},\nisbn = {978-1-4244-1483-3},\nissn = {1520-6149},\nmonth = {mar},\npages = {2233--2236},\npublisher = {Ieee},\ntitle = {{Audio-driven human body motion analysis and synthesis}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4518089},\nyear = {2008}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Analysis of Head Gesture and Prosody Patterns for Prosody-Driven Head-Gesture Animation.\n \n \n \n\n\n \n Sargin, M. E.; Yemez, Y.; Erzin, E.; and Tekalp, A. M.\n\n\n \n\n\n\n IEEE Transactions on Pattern Analysis and Machine Intelligence, 30(8): 1330–1345. aug 2008.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Sargin2008,\nabstract = {We propose a new two-stage framework for joint analysis of head gesture and speech prosody patterns of a speaker towards automatic realistic synthesis of head gestures from speech prosody. In the first stage analysis, we perform Hidden Markov Model (HMM) based unsupervised temporal segmentation of head gesture and speech prosody features separately to determine elementary head gesture and speech prosody patterns, respectively, for a particular speaker. In the second stage, joint analysis of correlations between these elementary head gesture and prosody patterns is performed using Multi-Stream HMMs to determine an audio-visual mapping model. The resulting audio-visual mapping model is then employed to synthesize natural head gestures from arbitrary input test speech given a head model for the speaker. In the synthesis stage, the audio-visual mapping model is used to predict a sequence of gesture patterns from the prosody pattern sequence computed for the input test speech. The Euler angles associated with each gesture pattern are then applied to animate the speaker head model. Objective and subjective evaluations indicate that the proposed synthesis by analysis scheme provides natural looking head gestures for the speaker with any input test speech, as well as in "prosody transplant" and gesture transplant" scenarios.},\nauthor = {Sargin, M. Emre and Yemez, Yucel and Erzin, Engin and Tekalp, A. Murat},\ndoi = {10.1109/TPAMI.2007.70797},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2008/Sargin et al/2008 - Sargin et al. - Analysis of Head Gesture and Prosody Patterns for Prosody-Driven Head-Gesture Animation.pdf:pdf},\nissn = {0162-8828},\njournal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},\nkeywords = {Algorithms,Automated,Automated: methods,Cluster Analysis,Computer-Assisted,Computer-Assisted: methods,Face,Face: physiology,Head,Head: physiology,Humans,Image Enhancement,Image Enhancement: methods,Image Interpretation,Imaging,Pattern Recognition,Reproducibility of Results,Sensitivity and Specificity,Speech,Speech: physiology,Three-Dimensional,Three-Dimensional: methods},\nmonth = {aug},\nnumber = {8},\npages = {1330--1345},\npmid = {18566489},\ntitle = {{Analysis of Head Gesture and Prosody Patterns for Prosody-Driven Head-Gesture Animation}},\nvolume = {30},\nyear = {2008}\n}\n
\n
\n\n\n
\n We propose a new two-stage framework for joint analysis of head gesture and speech prosody patterns of a speaker towards automatic realistic synthesis of head gestures from speech prosody. In the first stage analysis, we perform Hidden Markov Model (HMM) based unsupervised temporal segmentation of head gesture and speech prosody features separately to determine elementary head gesture and speech prosody patterns, respectively, for a particular speaker. In the second stage, joint analysis of correlations between these elementary head gesture and prosody patterns is performed using Multi-Stream HMMs to determine an audio-visual mapping model. The resulting audio-visual mapping model is then employed to synthesize natural head gestures from arbitrary input test speech given a head model for the speaker. In the synthesis stage, the audio-visual mapping model is used to predict a sequence of gesture patterns from the prosody pattern sequence computed for the input test speech. The Euler angles associated with each gesture pattern are then applied to animate the speaker head model. Objective and subjective evaluations indicate that the proposed synthesis by analysis scheme provides natural looking head gestures for the speaker with any input test speech, as well as in \"prosody transplant\" and gesture transplant\" scenarios.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (13)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Automatic classification,of musical genres using inter-genre similarity.\n \n \n \n \n\n\n \n Bagci, U.; and Erzin, E.\n\n\n \n\n\n\n IEEE Signal Processing Letters, 14(8): 521–524. aug 2007.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Bagci2007,\nauthor = {Bagci, Ulas and Erzin, Engin},\ndoi = {10.1109/LSP.2006.891320},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2007/Bagci, Erzin/2007 - Bagci, Erzin - Automatic classification,of musical genres using inter-genre similarity.pdf:pdf},\nissn = {1070-9908},\njournal = {IEEE Signal Processing Letters},\nmonth = {aug},\nnumber = {8},\npages = {521--524},\ntitle = {{Automatic classification,of musical genres using inter-genre similarity}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4276724},\nvolume = {14},\nyear = {2007}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Prosody-driven head-gesture animation.\n \n \n \n\n\n \n Sargin, M. E.; Erzin, E.; Yemez, Y.; Tekalp, A. M.; Erdem, A. T.; Erdem, C. E.; and Ozkan, M.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech, and Signal Processing, volume II, pages 677–680, 2007. IEEE Signal Proc Soc\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Sargin2007a,\nauthor = {Sargin, M. Emre and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat and Erdem, A. Tanju and Erdem, Cigdem Eroglu and Ozkan, Mehmet},\nbooktitle = {IEEE International Conference on Acoustics, Speech, and Signal Processing},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2007/Sargin et al/2007 - Sargin et al. - Prosody-driven head-gesture animation.pdf:pdf},\nissn = {1520-6149},\norganization = {IEEE Signal Proc Soc},\npages = {677--680},\ntitle = {{Prosody-driven head-gesture animation}},\nvolume = {II},\nyear = {2007}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Akustik Mikrofon Ses Yolu Parametrelerinin Girtlak Mikrofon Kayitlarindan Kestirimi.\n \n \n \n\n\n \n Akargun, U.; and Erzin, E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 747–750, Eskişehir, 2007. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Akargun2007a,\naddress = {Eskişehir},\nauthor = {Akargun, U.C. and Erzin, Engin},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\ndoi = {10.1007/978-0-387-79582-9_13},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2007/Akargun, Erzin/2007 - Akargun, Erzin - Akustik Mikrofon Ses Yolu Parametrelerinin Girtlak Mikrofon Kayitlarindan Kestirimi.pdf:pdf},\nisbn = {978-0-387-79581-2},\nlanguage = {Turkish},\npages = {747--750},\ntitle = {{Akustik Mikrofon Ses Yolu Parametrelerinin Girtlak Mikrofon Kayitlarindan Kestirimi}},\nyear = {2007}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Estimation of Acoustic Microphone Vocal Tract Parameters from Throat Microphone Recordings.\n \n \n \n\n\n \n Akargun, U.; and Erzin, E.\n\n\n \n\n\n\n In Takeda, K; Hansen, J.; Erdogan, H; and Abut, H, editor(s), In-Vehicle Corpus and Signal Processing for Driver Behavior, pages 747–750, Istanbul, 2007. NEDO Japan; US Natl Sci Fdn; Sabanci Univ Turkey; Nagoya Univ Japan\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Akargun2007,\naddress = {Istanbul},\nauthor = {Akargun, U.C. and Erzin, Engin},\nbooktitle = {In-Vehicle Corpus and Signal Processing for Driver Behavior},\ndoi = {10.1007/978-0-387-79582-9_13},\neditor = {Takeda, K and Hansen, JHL and Erdogan, H and Abut, H},\nisbn = {978-0-387-79581-2},\norganization = {NEDO Japan; US Natl Sci Fdn; Sabanci Univ Turkey; Nagoya Univ Japan},\npages = {747--750},\ntitle = {{Estimation of Acoustic Microphone Vocal Tract Parameters from Throat Microphone Recordings}},\nyear = {2007}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Gercekci Dudak Animasyonu icin Fonem ve Vizeme Dayali Akustik Birimlerin Karsilastirilmasi.\n \n \n \n\n\n \n Bozkurt, E.; Erdem, C. E.; Erzin, E.; Erdem, A. T.; and Ozkan, M.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 85–88, Eskişehir, 2007. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2007,\nabstract = {Natural looking lip animation, synchronized with incoming speech, is essential for realistic character animation. In this work, we evaluate the performance of phone and viseme based acoustic units, with and without context information, for generating realistic lip synchronization using HMM based recognition systems. We conclude via objective evaluations that utilization of viseme based units with context information outperforms the other methods.},\naddress = {Eskişehir},\nauthor = {Bozkurt, Elif and Erdem, Cigdem Eroglu and Erzin, Engin and Erdem, A. Tanju and Ozkan, Mehmet},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nisbn = {978-1-4244-0721-7},\nlanguage = {Turkish},\npages = {85--88},\ntitle = {{Gercekci Dudak Animasyonu icin Fonem ve Vizeme Dayali Akustik Birimlerin Karsilastirilmasi}},\nyear = {2007}\n}\n
\n
\n\n\n
\n Natural looking lip animation, synchronized with incoming speech, is essential for realistic character animation. In this work, we evaluate the performance of phone and viseme based acoustic units, with and without context information, for generating realistic lip synchronization using HMM based recognition systems. We conclude via objective evaluations that utilization of viseme based units with context information outperforms the other methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Kisisellestirilmis Yuz Jest Oruntulerinin Kestirimi.\n \n \n \n \n\n\n \n Ofli, F.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 596–599, Eskişehir, jun 2007. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"KisisellestirilmisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Ofli2007,\naddress = {Eskişehir},\nauthor = {Ofli, Ferda and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\ndoi = {10.1109/SIU.2007.4298615},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2007/Ofli et al/2007 - Ofli et al. - Kisisellestirilmis Yuz Jest Oruntulerinin Kestirimi.pdf:pdf},\nisbn = {978-1-4244-0719-4},\nlanguage = {Turkish},\nmonth = {jun},\norganization = {IEEE},\npages = {596--599},\ntitle = {{Kisisellestirilmis Yuz Jest Oruntulerinin Kestirimi}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4298615},\nyear = {2007}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Joint Correlation Analysis of Audio-Visual Dance Figures.\n \n \n \n\n\n \n Ofli, F.; Demir, Y.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In IEEE International Conference on Multimedia and Expo, pages 1703–1706, 2007. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Ofli2007b,\nauthor = {Ofli, Ferda and Demir, Yasemin and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {IEEE International Conference on Multimedia and Expo},\nisbn = {978-1-4244-1016-3},\npages = {1703--1706},\ntitle = {{Joint Correlation Analysis of Audio-Visual Dance Figures}},\nyear = {2007}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Sakli Markov Modelleri Araciligi ile Gen Duzenlenmelerinin Mikrodizi Verilerinden Ogrenilmesi.\n \n \n \n\n\n \n Abali, A. O.; Erzin, E.; and Gursoy, A.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 71–74, Eskişehir, 2007. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Abali2007,\naddress = {Eskişehir},\nannote = {\n        \n\n      },\nauthor = {Abali, Ali Ozgur and Erzin, Engin and Gursoy, Attila},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2007/Abali, Erzin, Gursoy/2007 - Abali, Erzin, Gursoy - Sakli Markov Modelleri Araciligi ile Gen Duzenlenmelerinin Mikrodizi Verilerinden Ogrenilmesi.pdf:pdf},\nisbn = {978-1-4244-0719-4},\nlanguage = {Turkish},\norganization = {IEEE},\npages = {71--74},\ntitle = {{Sakli Markov Modelleri Araciligi ile Gen Duzenlenmelerinin Mikrodizi Verilerinden Ogrenilmesi}},\nyear = {2007}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Comparison of Phoneme and Viseme Based Acoustic Units for Speech Driven Realistic Lip Animation.\n \n \n \n\n\n \n Bozkurt, E.; Erdem, C. E.; Erzin, E.; Erdem, A. T.; and Ozkan, M.\n\n\n \n\n\n\n In 3DTV-CONFERENCE: The True Vision-Capture, Transmission and Display of 3D Video, Kos Island, may 2007. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bozkurt2007a,\naddress = {Kos Island},\nauthor = {Bozkurt, Elif and Erdem, Cigdem Eroglu and Erzin, Engin and Erdem, A. Tanju and Ozkan, Mehmet},\nbooktitle = {3DTV-CONFERENCE: The True Vision-Capture, Transmission and Display of 3D Video},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2007/Bozkurt et al/2007 - Bozkurt et al. - Comparison of Phoneme and Viseme Based Acoustic Units for Speech Driven Realistic Lip Animation.pdf:pdf},\nmonth = {may},\ntitle = {{Comparison of Phoneme and Viseme Based Acoustic Units for Speech Driven Realistic Lip Animation}},\nyear = {2007}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Estimation and Analysis of Facial Animation Parameter Patterns.\n \n \n \n\n\n \n Ofli, F.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In IEEE International Conference on Image Processing, pages 293–296, 2007. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Ofli2007c,\nauthor = {Ofli, Ferda and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {IEEE International Conference on Image Processing},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2007/Ofli et al/2007 - Ofli et al. - Estimation and Analysis of Facial Animation Parameter Patterns.pdf:pdf},\npages = {293--296},\ntitle = {{Estimation and Analysis of Facial Animation Parameter Patterns}},\nyear = {2007}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Audiovisual Synchronization and Fusion Using Canonical Correlation Analysis.\n \n \n \n \n\n\n \n Sargin, M. E.; Yemez, Y.; Erzin, E.; and Tekalp, A. M.\n\n\n \n\n\n\n IEEE Transactions on Multimedia, 9(7): 1396–1403. nov 2007.\n \n\n\n\n
\n\n\n\n \n \n \"AudiovisualPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Sargin2007,\nauthor = {Sargin, M. Emre and Yemez, Yucel and Erzin, Engin and Tekalp, A. Murat},\ndoi = {10.1109/TMM.2007.906583},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2007/Sargin et al/2007 - Sargin et al. - Audiovisual Synchronization and Fusion Using Canonical Correlation Analysis.pdf:pdf},\nissn = {1520-9210},\njournal = {IEEE Transactions on Multimedia},\nmonth = {nov},\nnumber = {7},\npages = {1396--1403},\ntitle = {{Audiovisual Synchronization and Fusion Using Canonical Correlation Analysis}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4351913},\nvolume = {9},\nyear = {2007}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Estimation and analysis of facial animation parameter patterns.\n \n \n \n\n\n \n Ofli, F.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In IEEE International Conference on Image Processing, pages 1989–1992, 2007. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Ofli2007d,\nauthor = {Ofli, Ferda and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {IEEE International Conference on Image Processing},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2007/Ofli et al/2007 - Ofli et al. - Estimation and Analysis of Facial Animation Parameter Patterns.pdf:pdf},\nisbn = {978-1-4244-1436-9},\nissn = {1522-4880},\norganization = {IEEE},\npages = {1989--1992},\ntitle = {{Estimation and analysis of facial animation parameter patterns}},\nyear = {2007}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multicamera audio-visual analysis of dance figures.\n \n \n \n\n\n \n Ofli, F.; Demir, Y.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In IEEE International Conference on Multimedia and Expo, pages 1703–1706, 2007. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Ofli2007a,\nauthor = {Ofli, Ferda and Demir, Yasemin and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {IEEE International Conference on Multimedia and Expo},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2007/Ofli et al/2007 - Ofli et al. - Multicamera audio-visual analysis of dance figures.pdf:pdf},\nisbn = {978-1-4244-1016-3},\npages = {1703--1706},\ntitle = {{Multicamera audio-visual analysis of dance figures}},\nyear = {2007}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Multimodal Person Recognition for Human-Vehicle Interaction.\n \n \n \n \n\n\n \n Erzin, E.; Yemez, Y.; Tekalp, A. M.; Ercil, A.; Erdogan, H.; and Abut, H.\n\n\n \n\n\n\n IEEE Multimedia, 13(2): 18–31. 2006.\n \n\n\n\n
\n\n\n\n \n \n \"MultimodalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Erzin2006,\nauthor = {Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat and Ercil, Aytul and Erdogan, Hakan and Abut, H.},\ndoi = {10.1109/MMUL.2006.37},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2006/Erzin et al/2006 - Erzin et al. - Multimodal Person Recognition for Human-Vehicle Interaction.pdf:pdf},\nissn = {1070-986X},\njournal = {IEEE Multimedia},\nnumber = {2},\npages = {18--31},\ntitle = {{Multimodal Person Recognition for Human-Vehicle Interaction}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1621030},\nvolume = {13},\nyear = {2006}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Konusma ile Surulen Kafa Jesti Analizi ve Sentezi.\n \n \n \n\n\n \n Sargin, M. E.; Erzin, E.; Yemez, Y.; Tekalp, A. M.; and Erdem, A. T.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 237–240, Antalya, 2006. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Sargin2006b,\naddress = {Antalya},\nauthor = {Sargin, M. Emre and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat and Erdem, A. Tanju},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2006/Sargin et al/2006 - Sargin et al. - Konusma ile Surulen Kafa Jesti Analizi ve Sentezi.pdf:pdf},\nisbn = {978-1-4244-0238-0},\nlanguage = {Turkish},\norganization = {IEEE},\npages = {237--240},\ntitle = {{Konusma ile Surulen Kafa Jesti Analizi ve Sentezi}},\nyear = {2006}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Discriminative analysis of lip motion features for speaker identification and speech-reading.\n \n \n \n \n\n\n \n Cetingul, H. E.; Yemez, Y.; Erzin, E.; and Tekalp, A. M.\n\n\n \n\n\n\n IEEE Transactions on Image Processing, 15(10): 2879–2891. oct 2006.\n \n\n\n\n
\n\n\n\n \n \n \"DiscriminativePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Cetingul2006,\nauthor = {Cetingul, H. Ertan and Yemez, Yucel and Erzin, Engin and Tekalp, A. Murat},\ndoi = {10.1109/TIP.2006.877528},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2006/Cetingul et al/2006 - Cetingul et al. - Discriminative analysis of lip motion features for speaker identification and speech-reading.pdf:pdf},\nissn = {1057-7149},\njournal = {IEEE Transactions on Image Processing},\nmonth = {oct},\nnumber = {10},\npages = {2879--2891},\ntitle = {{Discriminative analysis of lip motion features for speaker identification and speech-reading}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1703580},\nvolume = {15},\nyear = {2006}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Muzik Turlerinin Siniflandirilmasinda Benzer Kesisim Bilgileri Uygulamalari.\n \n \n \n\n\n \n Bagci, U.; and Erzin, E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 214–217, Antalya, 2006. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bagci2006,\naddress = {Antalya},\nauthor = {Bagci, Ulas and Erzin, Engin},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2006/Bagci, Erzin/2006 - Bagci, Erzin - Muzik Turlerinin Siniflandirilmasinda Benzer Kesisim Bilgileri Uygulamalari.pdf:pdf},\nisbn = {978-1-4244-0238-0},\nlanguage = {Turkish},\norganization = {IEEE},\npages = {214--217},\ntitle = {{Muzik Turlerinin Siniflandirilmasinda Benzer Kesisim Bilgileri Uygulamalari}},\nyear = {2006}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Extracting Gene Regulation Information from Microarray Time-Series Data Using Hidden Markov Models.\n \n \n \n\n\n \n Yogurtcu, O. N.; Erzin, E.; and Gursoy, A.\n\n\n \n\n\n\n In Levi, A; Savas, E; Yenigun, H; Balcisory, S; and Saygin, Y, editor(s), Computer and Information Sciences - ISCIS, Proceedings, volume LNCS 4263, pages 144–153, 2006. Sabanci Univ, Fac Engn & Nat Sci; Sci & Technol Res Council Turkey; Sabanci Univ; Inst Elect & Elect Engineers, Turkey Sect; IFIP, Springer\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Yogurtcu2006,\nauthor = {Yogurtcu, Osman N. and Erzin, Engin and Gursoy, Attila},\nbooktitle = {Computer and Information Sciences - ISCIS, Proceedings},\neditor = {Levi, A and Savas, E and Yenigun, H and Balcisory, S and Saygin, Y},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2006/Yogurtcu, Erzin, Gursoy/2006 - Yogurtcu, Erzin, Gursoy - Extracting Gene Regulation Information from Microarray Time-Series Data Using Hidden Markov Models.pdf:pdf},\nisbn = {3-540-47242-8},\nissn = {0302-9743},\norganization = {Sabanci Univ, Fac Engn & Nat Sci; Sci & Technol Res Council Turkey; Sabanci Univ; Inst Elect & Elect Engineers, Turkey Sect; IFIP},\npages = {144--153},\npublisher = {Springer},\ntitle = {{Extracting Gene Regulation Information from Microarray Time-Series Data Using Hidden Markov Models}},\nvolume = {LNCS 4263},\nyear = {2006}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multimodal speaker identification using canonical correlation analysis.\n \n \n \n\n\n \n Sargin, M. E.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech and Signal Processing, pages 613–616, Toulouse, may 2006. IEEE Signal Proc Soc\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Sargin2006a,\naddress = {Toulouse},\nauthor = {Sargin, M. Emre and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2006/Sargin et al/2006 - Sargin et al. - Multimodal speaker identification using canonical correlation analysis.pdf:pdf},\nisbn = {978-1-4244-0468-1},\nissn = {1520-6149},\nmonth = {may},\norganization = {IEEE Signal Proc Soc},\npages = {613--616},\ntitle = {{Multimodal speaker identification using canonical correlation analysis}},\nyear = {2006}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Muzik Turlerinin Siniflandirmasinda Siniflandiricilarin Yukseltilmesi.\n \n \n \n\n\n \n Bagci, U.; and Erzin, E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, Antalya, 2006. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bagci2006a,\naddress = {Antalya},\nauthor = {Bagci, Ulas and Erzin, Engin},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2006/Bagci, Erzin/2006 - Bagci, Erzin - Muzik Turlerinin Siniflandirmasinda Siniflandiricilarin Yukseltilmesi.pdf:pdf},\nlanguage = {Turkish},\ntitle = {{Muzik Turlerinin Siniflandirmasinda Siniflandiricilarin Yukseltilmesi}},\nyear = {2006}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Konusma ve Ses Sikistirma.\n \n \n \n\n\n \n Erzin, E.\n\n\n \n\n\n\n In Turkiye Bilisim Ansiklopedisi, pages 543–548. Papatya Yayincilik, 2006.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Erzin2006a,\nauthor = {Erzin, Engin},\nbooktitle = {Turkiye Bilisim Ansiklopedisi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2006/Erzin/2006 - Erzin - Konusma ve Ses Sikistirma.pdf:pdf},\nlanguage = {Turkish},\npages = {543--548},\npublisher = {Papatya Yayincilik},\ntitle = {{Konusma ve Ses Sikistirma}},\nyear = {2006}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Combined Gesture-Speech Analysis and Speech Driven Gesture Synthesis.\n \n \n \n\n\n \n Sargin, M. E.; Aran, O.; Karpov, A.; Ofli, F.; Yasinnik, Y.; Wilson, S.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In IEEE International Conference on Multimedia and Expo, pages 893–896, Toronto, 2006. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Sargin2006,\naddress = {Toronto},\nauthor = {Sargin, M. Emre and Aran, O. and Karpov, A. and Ofli, Ferda and Yasinnik, Y. and Wilson, S. and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {IEEE International Conference on Multimedia and Expo},\ndoi = {10.1109/ICME.2006.262663},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2006/Sargin et al/2006 - Sargin et al. - Combined Gesture-Speech Analysis and Speech Driven Gesture Synthesis.pdf:pdf},\nisbn = {978-1-4244-0366-0},\npages = {893--896},\ntitle = {{Combined Gesture-Speech Analysis and Speech Driven Gesture Synthesis}},\nyear = {2006}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multimodal speaker/speech recognition using lip motion, lip texture and audio.\n \n \n \n \n\n\n \n Cetingul, H. E.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n Signal Processing, 86(12): 3549–3558. dec 2006.\n \n\n\n\n
\n\n\n\n \n \n \"MultimodalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Cetingul2006a,\nauthor = {Cetingul, H. Ertan and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\ndoi = {10.1016/j.sigpro.2006.02.045},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2006/Cetingul et al/2006 - Cetingul et al. - Multimodal speakerspeech recognition using lip motion, lip texture and audio.pdf:pdf},\nissn = {01651684},\njournal = {Signal Processing},\nkeywords = {decision fusion,isolated word recognition,lip motion,lip reading,speaker identification},\nmonth = {dec},\nnumber = {12},\npages = {3549--3558},\ntitle = {{Multimodal speaker/speech recognition using lip motion, lip texture and audio}},\nurl = {http://linkinghub.elsevier.com/retrieve/pii/S0165168406001344},\nvolume = {86},\nyear = {2006}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2005\n \n \n (9)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Konusmaci Tanima icin Karsilastirmali Dudak Devinim Analizi.\n \n \n \n\n\n \n Cetingul, H. E.; Yemez, Y.; Erzin, E.; and Tekalp, A. M.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, Kayseri, 2005. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Cetingul2005a,\naddress = {Kayseri},\nauthor = {Cetingul, H. Ertan and Yemez, Yucel and Erzin, Engin and Tekalp, A. Murat},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nlanguage = {Turkish},\ntitle = {{Konusmaci Tanima icin Karsilastirmali Dudak Devinim Analizi}},\nyear = {2005}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Boosting classifiers for music genre classification.\n \n \n \n\n\n \n Bagci, U.; and Erzin, E.\n\n\n \n\n\n\n In Yolum, P; Gungor, T; Gurgen, F; and Ozturan, C, editor(s), Computer and Information Sciences - ISCIS, Proceedings, volume LNCS 3733, pages 575–584, 2005. Springer-Verlag\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bagci2005,\nauthor = {Bagci, Ulas and Erzin, Engin},\nbooktitle = {Computer and Information Sciences - ISCIS, Proceedings},\neditor = {Yolum, P and Gungor, T and Gurgen, F and Ozturan, C},\nisbn = {3-540-29414-7},\nissn = {0302-9743},\npages = {575--584},\npublisher = {Springer-Verlag},\ntitle = {{Boosting classifiers for music genre classification}},\nvolume = {LNCS 3733},\nyear = {2005}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Lip Feature Extraction based on Audio-Visual Correlation.\n \n \n \n\n\n \n Sargin, M. E.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In EUSIPCO: European Signal Processing Conference, Antalya, 2005. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Sargin2005a,\naddress = {Antalya},\nauthor = {Sargin, M. Emre and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {EUSIPCO: European Signal Processing Conference},\ntitle = {{Lip Feature Extraction based on Audio-Visual Correlation}},\nyear = {2005}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Robust lip-motion features for speaker identification.\n \n \n \n\n\n \n Cetingul, H. E.; Yemez, Y.; Erzin, E.; and Tekalp, A. M.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech and Signal Processing, pages 509–512, Philadelphia, mar 2005. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Cetingul2005,\naddress = {Philadelphia},\nauthor = {Cetingul, H. Ertan and Yemez, Yucel and Erzin, Engin and Tekalp, A. Murat},\nbooktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing},\nisbn = {0-7803-8874-7},\nissn = {1520-6149},\nmonth = {mar},\norganization = {IEEE},\npages = {509--512},\ntitle = {{Robust lip-motion features for speaker identification}},\nyear = {2005}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint Audio-Video Processing for Robust Biometric Speaker Identification in Car.\n \n \n \n \n\n\n \n Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In DSP for In-Vehicle and Mobile Systems, pages 237–256. Springer, 2005.\n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Erzin2005a,\nauthor = {Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {DSP for In-Vehicle and Mobile Systems},\npages = {237--256},\npublisher = {Springer},\ntitle = {{Joint Audio-Video Processing for Robust Biometric Speaker Identification in Car}},\nurl = {http://link.springer.com/chapter/10.1007/0-387-22979-5_16},\nyear = {2005}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Nicemlenmis Yansima Katsayilariyla Muzikal Enstruman Tanima.\n \n \n \n\n\n \n Yogurtcu, O. N.; and Erzin, E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, Kayseri, 2005. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Yogurtcu2005,\naddress = {Kayseri},\nauthor = {Yogurtcu, Osman N. and Erzin, Engin},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2005/Yogurtcu, Erzin/2005 - Yogurtcu, Erzin - Nicemlenmis Yansima Katsayilariyla Muzikal Enstruman Tanima.pdf:pdf},\nlanguage = {Turkish},\ntitle = {{Nicemlenmis Yansima Katsayilariyla Muzikal Enstruman Tanima}},\nyear = {2005}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Use of Lip Information for Robust Speaker Identification and Speech Recognition.\n \n \n \n\n\n \n Cetingul, H. E.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In Biennial on DSP for In-Vehicle and Mobile Systems, Sesimbra, sep 2005. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Cetingul2005b,\naddress = {Sesimbra},\nauthor = {Cetingul, H. Ertan and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {Biennial on DSP for In-Vehicle and Mobile Systems},\nmonth = {sep},\ntitle = {{Use of Lip Information for Robust Speaker Identification and Speech Recognition}},\nyear = {2005}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multimodal speaker identification using an adaptive classifier cascade based on modality reliability.\n \n \n \n \n\n\n \n Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n IEEE Transactions on Multimedia, 7(5): 840–852. oct 2005.\n \n\n\n\n
\n\n\n\n \n \n \"MultimodalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Erzin2005,\nauthor = {Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\ndoi = {10.1109/TMM.2005.854464},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2005/Erzin, Yemez, Tekalp/2005 - Erzin, Yemez, Tekalp - Multimodal speaker identification using an adaptive classifier cascade based on modality reliability.pdf:pdf},\nissn = {1520-9210},\njournal = {IEEE Transactions on Multimedia},\nmonth = {oct},\nnumber = {5},\npages = {840--852},\ntitle = {{Multimodal speaker identification using an adaptive classifier cascade based on modality reliability}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1510631},\nvolume = {7},\nyear = {2005}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Gorsel-isitsel ilintiye Dayali Dudak oznitelik cikarimi.\n \n \n \n\n\n \n Sargin, M. E.; Yemez, Y.; Erzin, E.; and Tekalp, A. M.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, Kayseri, 2005. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Sargin2005,\naddress = {Kayseri},\nauthor = {Sargin, M. Emre and Yemez, Yucel and Erzin, Engin and Tekalp, A. Murat},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2005/Sargin et al/2005 - Sargin et al. - Gorsel-isitsel ilintiye Dayali Dudak oznitelik cikarimi.pdf:pdf},\nlanguage = {Turkish},\ntitle = {{Gorsel-isitsel ilintiye Dayali Dudak oznitelik cikarimi}},\nyear = {2005}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2004\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Discriminative lip-motion features for biometric speaker identification.\n \n \n \n\n\n \n Cetingul, H. E.; Yemez, Y; Erzin, E.; and Tekalp, A M\n\n\n \n\n\n\n In IEEE International Conference on Image Processing, pages 2023–2026, 2004. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Cetingul2004a,\nauthor = {Cetingul, H. Ertan and Yemez, Y and Erzin, Engin and Tekalp, A M},\nbooktitle = {IEEE International Conference on Image Processing},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2004/Cetingul et al/2004 - Cetingul et al. - Discriminative lip-motion features for biometric speaker identification.pdf:pdf},\npages = {2023--2026},\ntitle = {{Discriminative lip-motion features for biometric speaker identification}},\nyear = {2004}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Biyometrik Konusmaci Tanima icin Dudak Devinimi Kullanimi.\n \n \n \n\n\n \n Cetingul, H. E.; Yemez, Y.; Erzin, E.; and Tekalp, A. M.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 148–151, Kuşadası, Izmir, 2004. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Cetingul2004c,\naddress = {Kuşadası, Izmir},\nauthor = {Cetingul, H. Ertan and Yemez, Yucel and Erzin, Engin and Tekalp, A. Murat},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2004/Cetingul et al/2004 - Cetingul et al. - Biyometrik Konusmaci Tanima icin Dudak Devinimi Kullanimi.pdf:pdf},\nlanguage = {Turkish},\npages = {148--151},\ntitle = {{Biyometrik Konusmaci Tanima icin Dudak Devinimi Kullanimi}},\nyear = {2004}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multimodal Audio-Visual Speaker Identification.\n \n \n \n\n\n \n Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In Special Workshop in MAUI (SWIM): Lectures by Masters in Speech Processing, Maui, Hawaii, jan 2004. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Erzin2004a,\naddress = {Maui, Hawaii},\nauthor = {Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {Special Workshop in MAUI (SWIM): Lectures by Masters in Speech Processing},\nmonth = {jan},\ntitle = {{Multimodal Audio-Visual Speaker Identification}},\nyear = {2004}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Uyarlanabilir Gurultu Temizleme ile Dayanikli Ses Tanima.\n \n \n \n\n\n \n Akyol, E.; Erzin, E.; and Tekalp, A. M.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 407–409, Kuşadası, Izmir, 2004. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Akyol2004,\naddress = {Kuşadası, Izmir},\nauthor = {Akyol, E. and Erzin, Engin and Tekalp, A. Murat},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\ndoi = {10.1109/SIU.2004.1338549},\nisbn = {0-7803-8318-4},\nlanguage = {Turkish},\npages = {407--409},\ntitle = {{Uyarlanabilir Gurultu Temizleme ile Dayanikli Ses Tanima}},\nyear = {2004}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On optimal selection of lip-motion features for speaker identification.\n \n \n \n\n\n \n Cetingul, H. E.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In IEEE 6th Workshop on Multimedia Signal Processing, pages 7–10, Siena, 2004. IEEE Signal Proc Soc, Multimedia Signal Proc Tech Comm; Univ Degli Studi di Siena; Monte dei Paschi de Siena\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Cetingul2004b,\naddress = {Siena},\nauthor = {Cetingul, H. Ertan and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {IEEE 6th Workshop on Multimedia Signal Processing},\nisbn = {0-7803-8578-0},\norganization = {IEEE Signal Proc Soc, Multimedia Signal Proc Tech Comm; Univ Degli Studi di Siena; Monte dei Paschi de Siena},\npages = {7--10},\ntitle = {{On optimal selection of lip-motion features for speaker identification}},\nyear = {2004}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Adaptive Classifier Cascade for Multimodal Speaker Identification.\n \n \n \n\n\n \n Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In INTERSPEECH: Annual Conference of the International Speech Communication Association, 2004. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Erzin2004,\nauthor = {Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {INTERSPEECH: Annual Conference of the International Speech Communication Association},\ntitle = {{Adaptive Classifier Cascade for Multimodal Speaker Identification}},\nyear = {2004}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2003\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Multimodal speaker identification with audio-video processing.\n \n \n \n \n\n\n \n Yemez, Y.; Kanak, A.; Erzin, E.; and Tekalp, A. M.\n\n\n \n\n\n\n In IEEE International Conference on Image Processing, pages 5–8, sep 2003. \n \n\n\n\n
\n\n\n\n \n \n \"MultimodalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Yemez2003,\nauthor = {Yemez, Yucel and Kanak, A. and Erzin, Engin and Tekalp, A. Murat},\nbooktitle = {IEEE International Conference on Image Processing},\ndoi = {10.1109/ICIP.2003.1247167},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2003/Yemez et al/2003 - Yemez et al. - Multimodal speaker identification with audio-video processing.pdf:pdf},\nisbn = {0-7803-7750-8},\nissn = {1520-6149},\nmonth = {sep},\npages = {5--8},\ntitle = {{Multimodal speaker identification with audio-video processing}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1247167},\nyear = {2003}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Speech coding and decoding in a voice communication system.\n \n \n \n\n\n \n Erzin, E.\n\n\n \n\n\n\n feb 2003.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Erzin2003a,\nauthor = {Erzin, Engin},\ninstitution = {Lucent Technologies},\nmonth = {feb},\nnumber = {Application No. 20040167772},\ntitle = {{Speech coding and decoding in a voice communication system}},\ntype = {US Patent},\nyear = {2003}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Joint audio-video processing for biometric speaker identification.\n \n \n \n\n\n \n Kanak, A.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech and Signal Processing, pages 377–380, 2003. IEEE Signal Proc Soc\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Kanak2003,\nauthor = {Kanak, A. and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing},\nisbn = {0-7803-7663-3},\nissn = {1520-6149},\norganization = {IEEE Signal Proc Soc},\npages = {377--380},\ntitle = {{Joint audio-video processing for biometric speaker identification}},\nyear = {2003}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Joint Audio-Video Pprocessing for Robust Biometric Speaker Identification in Car.\n \n \n \n\n\n \n Kanak, A.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In Workshop on DSP in Mobile and Vehicular Systems, Nagoya, apr 2003. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Kanak2003a,\naddress = {Nagoya},\nauthor = {Kanak, A. and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {Workshop on DSP in Mobile and Vehicular Systems},\nmonth = {apr},\ntitle = {{Joint Audio-Video Pprocessing for Robust Biometric Speaker Identification in Car}},\nyear = {2003}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Biyometrik konusmaci tanima icin birlesik ses-goruntu isleme.\n \n \n \n\n\n \n Kanak, A.; Erzin, E.; Yemez, Y.; and Tekalp, A. M.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 632–635, Istanbul, 2003. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Kanak2003b,\naddress = {Istanbul},\nauthor = {Kanak, A. and Erzin, Engin and Yemez, Yucel and Tekalp, A. Murat},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nlanguage = {Turkish},\npages = {632--635},\ntitle = {{Biyometrik konusmaci tanima icin birlesik ses-goruntu isleme}},\nyear = {2003}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2002\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Ongorulu Donusum ile Genis Bantli Konusmanin 13 kb/s Hizinda Kodlanmasi.\n \n \n \n\n\n \n Erzin, E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi, pages 459–464, Pamukkale, Denizli, 2002. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Erzin2002a,\naddress = {Pamukkale, Denizli},\nauthor = {Erzin, Engin},\nbooktitle = {SIU: Sinyal Isleme ve Iletisim Uygulamalari Kurultayi},\nlanguage = {Turkish},\npages = {459--464},\ntitle = {{Ongorulu Donusum ile Genis Bantli Konusmanin 13 kb/s Hizinda Kodlanmasi}},\nyear = {2002}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Shaped Fixed Codebook Search for CELP Speech Coding.\n \n \n \n\n\n \n Erzin, E.; and Recchione, M. C.\n\n\n \n\n\n\n sep 2002.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Erzin2002,\nauthor = {Erzin, Engin and Recchione, M. C.},\ninstitution = {Lucent Technologies},\nmonth = {sep},\nnumber = {No. 6449313},\ntitle = {{Shaped Fixed Codebook Search for CELP Speech Coding}},\ntype = {US Patent},\nyear = {2002}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2000\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Shaped fixed codebook search for CELP coding at low bit rates.\n \n \n \n \n\n\n \n Erzin, E.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech and Signal Processing, volume 3, of International Conference on Acoustics Speech and Signal Processing (ICASSP), pages 1495–1498, Istanbul, jun 2000. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"ShapedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Erzin2000,\nabstract = {This paper presents a new shaped fixed codebook (FCB) search technique for Code Excited Linear Predictive (CELP) coding. The state of art CELP coding techniques operate at rates above 4.0 kbps, as it gets harder to build a good FCB contribution with a minimal bit budget. In this paper the shaped FCB search is presented to ease this problem, and achieve a better FCB contribution with a reduced bit budget. The shaped FCB search integrated to a 4 kbps CELP coder is presented and the subjective performance results are reported which show that the coder is significantly better than the IS-127 half rate coder at 4 kbps.},\naddress = {Istanbul},\nauthor = {Erzin, Engin},\nbooktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing},\ndoi = {10.1109/ICASSP.2000.861920},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/2000/Erzin/2000 - Erzin - Shaped fixed codebook search for CELP coding at low bit rates.pdf:pdf},\nisbn = {0-7803-6293-4},\nissn = {1520-6149},\nmonth = {jun},\npages = {1495--1498},\npublisher = {IEEE},\nseries = {International Conference on Acoustics Speech and Signal Processing (ICASSP)},\ntitle = {{Shaped fixed codebook search for CELP coding at low bit rates}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=861920},\nvolume = {3},\nyear = {2000}\n}\n
\n
\n\n\n
\n This paper presents a new shaped fixed codebook (FCB) search technique for Code Excited Linear Predictive (CELP) coding. The state of art CELP coding techniques operate at rates above 4.0 kbps, as it gets harder to build a good FCB contribution with a minimal bit budget. In this paper the shaped FCB search is presented to ease this problem, and achieve a better FCB contribution with a reduced bit budget. The shaped FCB search integrated to a 4 kbps CELP coder is presented and the subjective performance results are reported which show that the coder is significantly better than the IS-127 half rate coder at 4 kbps.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1999\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Teager energy based feature parameters for speech recognition in car noise.\n \n \n \n \n\n\n \n Jabloun, F.; Cetin, A. E.; and Erzin, E.\n\n\n \n\n\n\n IEEE Signal Processing Letters, 6(10): 259–261. oct 1999.\n \n\n\n\n
\n\n\n\n \n \n \"TeagerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Jabloun1999,\nabstract = {In this letter, a new set of speech feature parameters based on multirate signal processing and the Teager energy operator is introduced. The speech signal is first divided into nonuniform subbands in mel-scale using a multirate filterbank, then the Teager energies of the subsignals are estimated. Finally, the feature vector is constructed by log-compression and inverse discrete cosine transform (DCT) computation. The nem feature parameters have robust speech recognition performance in the presence of car engine noise.},\nauthor = {Jabloun, F. and Cetin, A. Enis and Erzin, Engin},\ndoi = {10.1109/97.789604},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/1999/Jabloun, Cetin, Erzin/1999 - Jabloun, Cetin, Erzin - Teager energy based feature parameters for speech recognition in car noise.pdf:pdf},\nissn = {10709908},\njournal = {IEEE Signal Processing Letters},\nmonth = {oct},\nnumber = {10},\npages = {259--261},\ntitle = {{Teager energy based feature parameters for speech recognition in car noise}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=789604},\nvolume = {6},\nyear = {1999}\n}\n
\n
\n\n\n
\n In this letter, a new set of speech feature parameters based on multirate signal processing and the Teager energy operator is introduced. The speech signal is first divided into nonuniform subbands in mel-scale using a multirate filterbank, then the Teager energies of the subsignals are estimated. Finally, the feature vector is constructed by log-compression and inverse discrete cosine transform (DCT) computation. The nem feature parameters have robust speech recognition performance in the presence of car engine noise.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1997\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Natural quality variable-rate spectral speech coding below 3.0 kbps.\n \n \n \n\n\n \n Erzin, E.; Kumar, A.; and Gersho, A.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech and Signal Processing, pages 1579–1582, apr 1997. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Erzin1997,\nabstract = {We propose new techniques for natural quality variable rate spectral speech coding at an average rate of 2.2 kbps for dialog speech and 2.8 kbps for monolog speech. The coder models the Fourier spectrum of each frame and it builds on recent enhancements to the classical multiband excitation (MBE) approach. New techniques for robust pitch estimation and tracking, for efficient quantization of voiced and unvoiced spectra and encoding of partial phase information are the key features that result in improved quality over earlier spectral vocoders. Subjective performance results are reported which show that the coder is very close in quality to the ITU-T G.723.1 algorithm at 5.3 kbps.},\nauthor = {Erzin, Engin and Kumar, A. and Gersho, A.},\nbooktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing},\nisbn = {0-8186-7920-4},\nmonth = {apr},\npages = {1579--1582},\ntitle = {{Natural quality variable-rate spectral speech coding below 3.0 kbps}},\nyear = {1997}\n}\n
\n
\n\n\n
\n We propose new techniques for natural quality variable rate spectral speech coding at an average rate of 2.2 kbps for dialog speech and 2.8 kbps for monolog speech. The coder models the Fourier spectrum of each frame and it builds on recent enhancements to the classical multiband excitation (MBE) approach. New techniques for robust pitch estimation and tracking, for efficient quantization of voiced and unvoiced spectra and encoding of partial phase information are the key features that result in improved quality over earlier spectral vocoders. Subjective performance results are reported which show that the coder is very close in quality to the ITU-T G.723.1 algorithm at 5.3 kbps.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1995\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Subband Analysis for Robust Speech Recognition in the Presence of Car Noise.\n \n \n \n\n\n \n Erzin, E.; Cetin, A. E.; and Yardimci, Y.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech and Signal Processing, pages 417–420, may 1995. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Erzin1995,\nauthor = {Erzin, Engin and Cetin, A. Enis and Yardimci, Y.},\nbooktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing},\nisbn = {0-7803-2432-3},\nmonth = {may},\npages = {417--420},\ntitle = {{Subband Analysis for Robust Speech Recognition in the Presence of Car Noise}},\nyear = {1995}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Konusma Analiz Sistemi (KASIS).\n \n \n \n\n\n \n Demirekler, M.; Cetin, A. E.; Nakiboglu, B.; Erzin, E.; Cetin, D.; and Yildirim, F.\n\n\n \n\n\n\n In Konusma Isleme Calistayi, pages 59–62, Ankara, 1995. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Demirekler1995a,\naddress = {Ankara},\nauthor = {Demirekler, M{\\"{u}}beccel and Cetin, A. Enis and Nakiboglu, B. and Erzin, Engin and Cetin, D. and Yildirim, F.},\nbooktitle = {Konusma Isleme Calistayi},\nlanguage = {Turkish},\npages = {59--62},\ntitle = {{Konusma Analiz Sistemi (KASIS)}},\nyear = {1995}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Araç ici gurbuz ses tanima icin alt bant analizi.\n \n \n \n\n\n \n Erzin, E.; Cetin, A. E.; and Yardimci, Y.\n\n\n \n\n\n\n In SIU: Sinyal Isleme Uygulamaları Kurultayi, pages 120–125, Kapadokya, Nevsehir, 1995. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Erzin1995b,\naddress = {Kapadokya, Nevsehir},\nauthor = {Erzin, Engin and Cetin, A. Enis and Yardimci, Y.},\nbooktitle = {SIU: Sinyal Isleme Uygulamaları Kurultayi},\nlanguage = {Turkish},\npages = {120--125},\ntitle = {{Ara{\\c{c}} ici gurbuz ses tanima icin alt bant analizi}},\nyear = {1995}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Line Spectral Frequency Representation of Subbands for Speech Recognition.\n \n \n \n\n\n \n Erzin, E.; and Cetin, A. E.\n\n\n \n\n\n\n Signal Processing, 44(1): 117–119. jun 1995.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Erzin1995a,\nabstract = {In this paper, a new set of speech feature parameters is constructed from subband analysis based Line Spectral Frequencies (LSFs). The speech signal is divided into several subbands and the resulting subsignals are represented by LSFs. The performance of the new speech feature parameters, SUBLSFs, is compared with the widely used Mel Scale Cepstral Coefficients (MELCEPs). SUBLSFs are observed to be more robust than the MELCEPs in the presence of car noise.},\nauthor = {Erzin, Engin and Cetin, A. Enis},\ndoi = {10.1016/0165-1684(95)00038-F},\nissn = {0165-1684},\njournal = {Signal Processing},\nmonth = {jun},\nnumber = {1},\npages = {117--119},\ntitle = {{Line Spectral Frequency Representation of Subbands for Speech Recognition}},\nvolume = {44},\nyear = {1995}\n}\n
\n
\n\n\n
\n In this paper, a new set of speech feature parameters is constructed from subband analysis based Line Spectral Frequencies (LSFs). The speech signal is divided into several subbands and the resulting subsignals are represented by LSFs. The performance of the new speech feature parameters, SUBLSFs, is compared with the widely used Mel Scale Cepstral Coefficients (MELCEPs). SUBLSFs are observed to be more robust than the MELCEPs in the presence of car noise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Adaptive filtering approaches for non-Gaussian stable processes.\n \n \n \n\n\n \n Arikan, O.; Belge, M.; Cetin, A. E.; and Erzin, E.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech and Signal Processing, pages 1400–1403, may 1995. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Arikan1995,\nauthor = {Arikan, Orhan and Belge, Murat and Cetin, A. Enis and Erzin, Engin},\nbooktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/1995/Arikan et al/1995 - Arikan et al. - Adaptive filtering approaches for non-Gaussian stable processes.pdf:pdf},\nisbn = {0-7803-2432-3},\nmonth = {may},\npages = {1400--1403},\ntitle = {{Adaptive filtering approaches for non-Gaussian stable processes}},\nyear = {1995}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Ayrik sozcuk tanima.\n \n \n \n\n\n \n Demirekler, M.; Cetin, A. E.; Nakiboglu, B.; Erzin, E.; Cetin, D.; and Yildirim, F.\n\n\n \n\n\n\n In Konusma Isleme Calistayi, pages 48–52, Ankara, 1995. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Demirekler1995b,\naddress = {Ankara},\nauthor = {Demirekler, M{\\"{u}}beccel and Cetin, A. Enis and Nakiboglu, B. and Erzin, Engin and Cetin, D. and Yildirim, F.},\nbooktitle = {Konusma Isleme Calistayi},\nlanguage = {Turkish},\npages = {48--52},\ntitle = {{Ayrik sozcuk tanima}},\nyear = {1995}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Metinden bagimsiz konusmaci tanima.\n \n \n \n\n\n \n Demirekler, M.; Cetin, A. E.; Nakiboglu, B.; Erzin, E.; Cetin, D.; and Yildirim, F.\n\n\n \n\n\n\n In Konusma Isleme Calistayi, pages 63–65, Ankara, 1995. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Demirekler1995,\naddress = {Ankara},\nauthor = {Demirekler, M{\\"{u}}beccel and Cetin, A. Enis and Nakiboglu, B. and Erzin, Engin and Cetin, D. and Yildirim, F.},\nbooktitle = {Konusma Isleme Calistayi},\nlanguage = {Turkish},\npages = {63--65},\ntitle = {{Metinden bagimsiz konusmaci tanima}},\nyear = {1995}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Birlesik rakam tanima.\n \n \n \n\n\n \n Demirekler, M.; Cetin, A. E.; Nakiboglu, B.; Erzin, E.; Cetin, D.; and Yildirim, F.\n\n\n \n\n\n\n In Konusma Isleme Calistayi, pages 53–58, Ankara, 1995. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Demirekler1995c,\naddress = {Ankara},\nauthor = {Demirekler, M{\\"{u}}beccel and Cetin, A. Enis and Nakiboglu, B. and Erzin, Engin and Cetin, D. and Yildirim, F.},\nbooktitle = {Konusma Isleme Calistayi},\nlanguage = {Turkish},\npages = {53--58},\ntitle = {{Birlesik rakam tanima}},\nyear = {1995}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1994\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Adaptive filtering for non-Gaussian stable processes.\n \n \n \n \n\n\n \n Arikan, O.; Cetin, A. E.; and Erzin, E.\n\n\n \n\n\n\n IEEE Signal Processing Letters, 1(11): 163–165. nov 1994.\n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Arikan1994,\nabstract = {A large class of physical phenomenon observed in practice exhibit non-Gaussian behavior. In this letter, alpha-stable distributions, which have heavier tails than Gaussian distribution, are considered to model non-Gaussian signals. Adaptive signal procesgSng in the presence of such a noise is a requirement of many practical problems. Since direct application of commonly used adaptation techniques fall in these applications, new algorithms for adaptive filtering for alpha-stable random processes are introduced.},\nauthor = {Arikan, Orhan and Cetin, A. Enis and Erzin, Engin},\ndoi = {10.1109/97.335063},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/1994/Arikan, Cetin, Erzin/1994 - Arikan, Cetin, Erzin - Adaptive filtering for non-Gaussian stable processes.pdf:pdf},\nissn = {10709908},\njournal = {IEEE Signal Processing Letters},\nmonth = {nov},\nnumber = {11},\npages = {163--165},\ntitle = {{Adaptive filtering for non-Gaussian stable processes}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=335063},\nvolume = {1},\nyear = {1994}\n}\n
\n
\n\n\n
\n A large class of physical phenomenon observed in practice exhibit non-Gaussian behavior. In this letter, alpha-stable distributions, which have heavier tails than Gaussian distribution, are considered to model non-Gaussian signals. Adaptive signal procesgSng in the presence of such a noise is a requirement of many practical problems. Since direct application of commonly used adaptation techniques fall in these applications, new algorithms for adaptive filtering for alpha-stable random processes are introduced.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Interframe differential coding of line spectrum frequencies.\n \n \n \n \n\n\n \n Erzin, E.; and Cetin, A. E.\n\n\n \n\n\n\n IEEE Transactions on Speech and Audio Processing, 2(2): 350–352. apr 1994.\n \n\n\n\n
\n\n\n\n \n \n \"InterframePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Erzin1994,\nabstract = {Line spectrum frequencies (LSF's) uniquely represent the linear predictive coding (LPC) filter of a speech frame. In many vocoders LSF's are used to encode the LPC parameters. In this paper, an inter-frame differential coding scheme is presented for the LSF's. The LSF's of the current speech frame are predicted by using both the LSF's of the previous frame and some of the LSF's of the current frame. Then, the difference resulting from prediction is quantized.},\nauthor = {Erzin, Engin and Cetin, A. Enis},\ndoi = {10.1109/89.279286},\nfile = {:Users/eerzin/Dropbox/Docs/Mendeley/1994/Erzin, Cetin/1994 - Erzin, Cetin - Interframe differential coding of line spectrum frequencies.pdf:pdf},\nissn = {10636676},\njournal = {IEEE Transactions on Speech and Audio Processing},\nmonth = {apr},\nnumber = {2},\npages = {350--352},\ntitle = {{Interframe differential coding of line spectrum frequencies}},\nurl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=279286},\nvolume = {2},\nyear = {1994}\n}\n
\n
\n\n\n
\n Line spectrum frequencies (LSF's) uniquely represent the linear predictive coding (LPC) filter of a speech frame. In many vocoders LSF's are used to encode the LPC parameters. In this paper, an inter-frame differential coding scheme is presented for the LSF's. The LSF's of the current speech frame are predicted by using both the LSF's of the previous frame and some of the LSF's of the current frame. Then, the difference resulting from prediction is quantized.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Cizgisel spektrum frekanslarina dayali sozcuk sinirlari belirleme yontemi.\n \n \n \n\n\n \n Erzin, E.; and Cetin, A. E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme Uygulamaları Kurultayi, pages 288–292, Gokova, Mugla, 1994. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Erzin1994b,\naddress = {Gokova, Mugla},\nauthor = {Erzin, Engin and Cetin, A. Enis},\nbooktitle = {SIU: Sinyal Isleme Uygulamaları Kurultayi},\nlanguage = {Turkish},\npages = {288--292},\ntitle = {{Cizgisel spektrum frekanslarina dayali sozcuk sinirlari belirleme yontemi}},\nyear = {1994}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Gauss olmayan kararli surecler icin uyarlanir suzgecleme.\n \n \n \n\n\n \n Cetin, A. E.; Arikan, O.; and Erzin, E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme Uygulamaları Kurultayi, pages 370–372, Gokova, Mugla, 1994. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Cetin1994,\naddress = {Gokova, Mugla},\nauthor = {Cetin, A. Enis and Arikan, Orhan and Erzin, Engin},\nbooktitle = {SIU: Sinyal Isleme Uygulamaları Kurultayi},\nlanguage = {Turkish},\npages = {370--372},\ntitle = {{Gauss olmayan kararli surecler icin uyarlanir suzgecleme}},\nyear = {1994}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1993\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Cizgisel spektrum frekanslari icin cerceveler arasi fark vektoru kodlama yontemi.\n \n \n \n\n\n \n Erzin, E.; and Cetin, A. E.\n\n\n \n\n\n\n In SIU: Sinyal Isleme Uygulamaları Kurultayi, pages 25–29, Istanbul, 1993. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Erzin1993b,\naddress = {Istanbul},\nauthor = {Erzin, Engin and Cetin, A. Enis},\nbooktitle = {SIU: Sinyal Isleme Uygulamaları Kurultayi},\nlanguage = {Turkish},\npages = {25--29},\ntitle = {{Cizgisel spektrum frekanslari icin cerceveler arasi fark vektoru kodlama yontemi}},\nyear = {1993}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Interframe Differential Vector Coding of Line Spectrum Frequencies.\n \n \n \n\n\n \n Erzin, E.; and Cetin, A. E.\n\n\n \n\n\n\n In IEEE International Conference on Acoustics, Speech and Signal Processing, pages B25–B28, 1993. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Erzin1993,\nauthor = {Erzin, Engin and Cetin, A. Enis},\nbooktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing},\nisbn = {0-7803-0946-4},\npages = {B25--B28},\ntitle = {{Interframe Differential Vector Coding of Line Spectrum Frequencies}},\nyear = {1993}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Speaker Independent Isolated Word Recognition System for Turkish.\n \n \n \n\n\n \n Tuzun, B.; Erzin, E.; Demirekler, M.; Memisoglu, T.; Ugur, S.; and Cetin, A. E.\n\n\n \n\n\n\n In NATO-ASI, New Advances and Trends in Speech Recognition and Coding, Bubion (Granada), jun 1993. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Tuzun1993,\naddress = {Bubion (Granada)},\nauthor = {Tuzun, B. and Erzin, Engin and Demirekler, M{\\"{u}}beccel and Memisoglu, T. and Ugur, S. and Cetin, A. Enis},\nbooktitle = {NATO-ASI, New Advances and Trends in Speech Recognition and Coding},\nmonth = {jun},\ntitle = {{A Speaker Independent Isolated Word Recognition System for Turkish}},\nyear = {1993}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On the Use of Interframe Information of Line Spectral Frequencies in Speech Coding.\n \n \n \n\n\n \n Erzin, E.; and Cetin, A. E.\n\n\n \n\n\n\n In NATO-ASI, New Advances and Trends in Speech Recognition and Coding, Bubion (Granada), jun 1993. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Erzin1993a,\naddress = {Bubion (Granada)},\nauthor = {Erzin, Engin and Cetin, A. Enis},\nbooktitle = {NATO-ASI, New Advances and Trends in Speech Recognition and Coding},\nmonth = {jun},\ntitle = {{On the Use of Interframe Information of Line Spectral Frequencies in Speech Coding}},\nyear = {1993}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Zero-Crossing Based Speech Vocoder at Low Bit Rates.\n \n \n \n\n\n \n Gunduzhan, E.; Erzin, E.; and Cetin, A. E.\n\n\n \n\n\n\n In 4th International Conference on Advances in Communication and Control, Rhodes, 1993. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Gunduzhan1993,\naddress = {Rhodes},\nauthor = {Gunduzhan, E. and Erzin, Engin and Cetin, A. Enis},\nbooktitle = {4th International Conference on Advances in Communication and Control},\ntitle = {{Zero-Crossing Based Speech Vocoder at Low Bit Rates}},\nyear = {1993}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1991\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Design of an Integrated Circuit for Ultrasonic Imaging.\n \n \n \n\n\n \n Karaman, M.; Aydin, C.; Kolagasioglu, E.; Toygar, M.; Baktir, A.; Erzin, E.; Tahboub, R.; Kilic, F.; Asyali, M.; and Atalar, A.\n\n\n \n\n\n\n In BILKON, Ankara, 1991. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Karaman1991,\naddress = {Ankara},\nauthor = {Karaman, M. and Aydin, C. and Kolagasioglu, E. and Toygar, M. and Baktir, A. and Erzin, Engin and Tahboub, R. and Kilic, F. and Asyali, M. and Atalar, A.},\nbooktitle = {BILKON},\ntitle = {{Design of an Integrated Circuit for Ultrasonic Imaging}},\nyear = {1991}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);