var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2FRoznn%2FEUSIPCO%2Fmain%2Feusipco2017url.bib&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2FRoznn%2FEUSIPCO%2Fmain%2Feusipco2017url.bib&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2FRoznn%2FEUSIPCO%2Fmain%2Feusipco2017url.bib&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2017\n \n \n (556)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Binaural beamforming using pre-determined relative acoustic transfer functions.\n \n \n \n \n\n\n \n Koutrouvelis, A. I.; Hendriks, R. C.; Heusdens, R.; Jensen, J.; and Guo, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1-5, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BinauralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081157,\n  author = {A. I. Koutrouvelis and R. C. Hendriks and R. Heusdens and J. Jensen and M. Guo},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Binaural beamforming using pre-determined relative acoustic transfer functions},\n  year = {2017},\n  pages = {1-5},\n  abstract = {Binaural beamformers (BFs) aim to reduce the output noise power while simultaneously preserving the binaural cues of all sources. Typically, the latter is accomplished via constraints relating the output and input interaural transfer functions (ITFs). The ITF is a function of the corresponding relative acoustic transfer function (RATF), which implies that RATF estimates of all sources in the acoustic scene are required. Here, we propose an alternative way to approximately preserve the binaural cues of the entire acoustic scene without estimating RATFs. We propose to preserve the binaural cues of all sources with a set of fixed pre-determined RATFs distributed around the head. Two recently proposed binaural BFs are evaluated in the context of using pre-determined RATFs and compared to the binaural minimum variance distrortionless response BF which can only preserve the binaural cues of the target.},\n  keywords = {acoustic signal processing;array signal processing;transfer functions;ITF;binaural cues;binaural minimum variance distrortionless response;binaural beamforming;binaural beamformers;output noise power;input interaural transfer functions;predetermined relative acoustic transfer functions;binaural BF;RATF;acoustic scene;Microphones;Array signal processing;Transfer functions;Noise measurement;Noise reduction;Signal to noise ratio;Binaural beamforming;interaural transfer function (ITF);relative acoustic transfer function (RATF)},\n  doi = {10.23919/EUSIPCO.2017.8081157},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347085.pdf},\n}\n\n
\n
\n\n\n
\n Binaural beamformers (BFs) aim to reduce the output noise power while simultaneously preserving the binaural cues of all sources. Typically, the latter is accomplished via constraints relating the output and input interaural transfer functions (ITFs). The ITF is a function of the corresponding relative acoustic transfer function (RATF), which implies that RATF estimates of all sources in the acoustic scene are required. Here, we propose an alternative way to approximately preserve the binaural cues of the entire acoustic scene without estimating RATFs. We propose to preserve the binaural cues of all sources with a set of fixed pre-determined RATFs distributed around the head. Two recently proposed binaural BFs are evaluated in the context of using pre-determined RATFs and compared to the binaural minimum variance distrortionless response BF which can only preserve the binaural cues of the target.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Retrieval of individualized head-related transfer functions for hearing aid applications.\n \n \n \n \n\n\n \n Buerger, M.; Meier, S.; Hofmann, C.; Kellermann, W.; Fischer, E.; and Puder, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 6-10, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RetrievalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081158,\n  author = {M. Buerger and S. Meier and C. Hofmann and W. Kellermann and E. Fischer and H. Puder},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Retrieval of individualized head-related transfer functions for hearing aid applications},\n  year = {2017},\n  pages = {6-10},\n  abstract = {The capability of modern hearing aids to provide hearing-impaired humans with enhanced signals, which ultimately leads to an increased speech intelligibility, may benefit from fitting the device for each subject individually. This ideally also involves the exploitation of Head-Related Impulse Responses (HRIRs). However, HRIRs vary from person to person and thus require tedious measurements for each individual. In this work, we investigate two approaches which aim at speeding up the HRIR acquisition procedure. These are continuous measurements and interpolation, where Dynamic Time Warping (DTW) as well as linear interpolation of the magnitude and phase responses are considered. In contrast to related publications, the continuous HRIR measurements are not performed in anechoic environments here. The quality of the obtained HRIRs is assessed by means of the system mismatch and the proposed error of relative transfer functions. Both measures reveal that continuous HRIR measurements are on average much more capable than the investigated interpolation approaches, and they furthermore provide a more uniform performance for different source directions.},\n  keywords = {acoustic signal detection;hearing aids;interpolation;speech intelligibility;transfer functions;transient response;linear interpolation;phase responses;continuous HRIR measurements;relative transfer functions;individualized head-related transfer functions;hearing aid applications;modern hearing aids;HRIR acquisition procedure;continuous measurements;dynamic time warping;interpolation approaches;speech intelligibility;head-related impulse responses;Interpolation;Microphones;Rotation measurement;Hearing aids;Transfer functions;Convolution},\n  doi = {10.23919/EUSIPCO.2017.8081158},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346659.pdf},\n}\n\n
\n
\n\n\n
\n The capability of modern hearing aids to provide hearing-impaired humans with enhanced signals, which ultimately leads to an increased speech intelligibility, may benefit from fitting the device for each subject individually. This ideally also involves the exploitation of Head-Related Impulse Responses (HRIRs). However, HRIRs vary from person to person and thus require tedious measurements for each individual. In this work, we investigate two approaches which aim at speeding up the HRIR acquisition procedure. These are continuous measurements and interpolation, where Dynamic Time Warping (DTW) as well as linear interpolation of the magnitude and phase responses are considered. In contrast to related publications, the continuous HRIR measurements are not performed in anechoic environments here. The quality of the obtained HRIRs is assessed by means of the system mismatch and the proposed error of relative transfer functions. Both measures reveal that continuous HRIR measurements are on average much more capable than the investigated interpolation approaches, and they furthermore provide a more uniform performance for different source directions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Scalable source localization with multichannel α-stable distributions.\n \n \n \n\n\n \n Fontaine, M.; Vanwynsberghe, C.; Liutkus, A.; and Badeau, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 11-15, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081159,\n  author = {M. Fontaine and C. Vanwynsberghe and A. Liutkus and R. Badeau},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Scalable source localization with multichannel α-stable distributions},\n  year = {2017},\n  pages = {11-15},\n  abstract = {In this paper, we focus on the problem of sound source localization and we propose a technique that exploits the known and arbitrary geometry of the microphone array. While most probabilistic techniques presented in the past rely on Gaussian models, we go further in this direction and detail a method for source localization that is based on the recently proposed α-stable harmonizable processes. They include Cauchy and Gaussian as special cases and their remarkable feature is to allow a simple modeling of impulsive and real world sounds with few parameters. The approach we present builds on the classical convolutive mixing model and has the particularities of requiring going through the data only once, to also work in the underdetermined case of more sources than microphones and to allow massively parallelizable implementations operating in the time-frequency domain. We show that the method yields interesting performance for acoustic imaging in realistic simulations.},\n  keywords = {audio signal processing;blind source separation;convolution;geometry;microphone arrays;statistical distributions;time-frequency analysis;Wiener filters;multichannel α-stable distributions;sound source localization;microphone array;probabilistic techniques;Gaussian models;α-stable harmonizable processes;classical convolutive mixing model;microphones;scalable source localization;acoustic imaging;Direction-of-arrival estimation;Time-frequency analysis;Microphone arrays;Acoustics;Computational modeling;source localization;acoustic modeling;α-stable random variables;spectral measure;sketching},\n  doi = {10.23919/EUSIPCO.2017.8081159},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this paper, we focus on the problem of sound source localization and we propose a technique that exploits the known and arbitrary geometry of the microphone array. While most probabilistic techniques presented in the past rely on Gaussian models, we go further in this direction and detail a method for source localization that is based on the recently proposed α-stable harmonizable processes. They include Cauchy and Gaussian as special cases and their remarkable feature is to allow a simple modeling of impulsive and real world sounds with few parameters. The approach we present builds on the classical convolutive mixing model and has the particularities of requiring going through the data only once, to also work in the underdetermined case of more sources than microphones and to allow massively parallelizable implementations operating in the time-frequency domain. We show that the method yields interesting performance for acoustic imaging in realistic simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Angle-of-arrival-based gesture recognition using ultrasonic multi-frequency signals.\n \n \n \n \n\n\n \n Chen, H.; Ballal, T.; Saad, M.; and Al-Naffouri, T. Y.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 16-20, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Angle-of-arrival-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081160,\n  author = {H. Chen and T. Ballal and M. Saad and T. Y. Al-Naffouri},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Angle-of-arrival-based gesture recognition using ultrasonic multi-frequency signals},\n  year = {2017},\n  pages = {16-20},\n  abstract = {Hand gestures are tools for conveying information, expressing emotion, interacting with electronic devices or even serving disabled people as a second language. A gesture can be recognized by capturing the movement of the hand, in real time, and classifying the collected data. Several commercial products such as Microsoft Kinect, Leap Motion Sensor, Synertial Gloves and HTC Vive have been released and new solutions have been proposed by researchers to handle this task. These systems are mainly based on optical measurements, inertial measurements, ultrasound signals and radio signals. This paper proposes an ultrasonic-based gesture recognition system using AOA (Angle of Arrival) information of ultrasonic signals emitted from a wearable ultrasound transducer. The 2-D angles of the moving hand are estimated using multi-frequency signals captured by a fixed receiver array. A simple redundant dictionary matching classifier is designed to recognize gestures representing the numbers from `0' to `9' and compared with a neural network classifier. Average classification accuracies of 95.5% and 94.4% are obtained, respectively, using the two classification methods.},\n  keywords = {acoustic signal processing;gesture recognition;handicapped aids;radio signals;gesture recognition system;ultrasonic signals;wearable ultrasound transducer;moving hand;fixed receiver array;simple redundant dictionary matching classifier;neural network classifier;ultrasonic multifrequency signals;hand gestures;disabled people;Microsoft Kinect;Synertial Gloves;HTC Vive;optical measurements;inertial measurements;ultrasound signals;angle-of-arrival;leap motion sensor;AOA information;Dictionaries;Gesture recognition;Receivers;Acoustics;Estimation;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081160},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347782.pdf},\n}\n\n
\n
\n\n\n
\n Hand gestures are tools for conveying information, expressing emotion, interacting with electronic devices or even serving disabled people as a second language. A gesture can be recognized by capturing the movement of the hand, in real time, and classifying the collected data. Several commercial products such as Microsoft Kinect, Leap Motion Sensor, Synertial Gloves and HTC Vive have been released and new solutions have been proposed by researchers to handle this task. These systems are mainly based on optical measurements, inertial measurements, ultrasound signals and radio signals. This paper proposes an ultrasonic-based gesture recognition system using AOA (Angle of Arrival) information of ultrasonic signals emitted from a wearable ultrasound transducer. The 2-D angles of the moving hand are estimated using multi-frequency signals captured by a fixed receiver array. A simple redundant dictionary matching classifier is designed to recognize gestures representing the numbers from `0' to `9' and compared with a neural network classifier. Average classification accuracies of 95.5% and 94.4% are obtained, respectively, using the two classification methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Study of widely linear multichannel wiener filter for binaural noise reduction.\n \n \n \n \n\n\n \n Leng, X.; Chen, J.; Cohen, I.; and Benesty, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 21-25, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"StudyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081161,\n  author = {X. Leng and J. Chen and I. Cohen and J. Benesty},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Study of widely linear multichannel wiener filter for binaural noise reduction},\n  year = {2017},\n  pages = {21-25},\n  abstract = {In this paper, we study the binaural noise-reduction problem using an array of microphones. The widely linear (WL) framework in the short-time-Fourier-transform (STFT) domain is adopted. In such a framework, the microphone array signals and binaural outputs are first merged into complex signals. These complex signals are subsequently transformed into the STFT domain. The WL estimation theory is then applied in STFT subbands with interband correlation to form the optimal WL Wiener filter, which exploits the noncircular properties of the input complex signals to achieve noise reduction and meanwhile to preserve the sound spatial realism. Finally, the time-domain binaural output is reconstructed from the output of the WL Wiener filter using the inverse STFT. The effectiveness of the developed STFT-domain WL Wiener filter for binaural noise reduction is justified using experiments.},\n  keywords = {array signal processing;estimation theory;Fourier transforms;microphone arrays;signal denoising;Wiener filters;widely linear multichannel wiener filter;binaural noise-reduction problem;widely linear framework;microphone array signals;binaural outputs;WL estimation theory;STFT subbands;optimal WL Wiener filter;input complex signals;time-domain binaural output;inverse STFT;STFT-domain WL Wiener filter;sound spatial realism;noise reduction;interband correlation;short-time-Fourier-transform;Noise reduction;Europe;Signal processing;Microphone arrays;Speech;Noise measurement},\n  doi = {10.23919/EUSIPCO.2017.8081161},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340929.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we study the binaural noise-reduction problem using an array of microphones. The widely linear (WL) framework in the short-time-Fourier-transform (STFT) domain is adopted. In such a framework, the microphone array signals and binaural outputs are first merged into complex signals. These complex signals are subsequently transformed into the STFT domain. The WL estimation theory is then applied in STFT subbands with interband correlation to form the optimal WL Wiener filter, which exploits the noncircular properties of the input complex signals to achieve noise reduction and meanwhile to preserve the sound spatial realism. Finally, the time-domain binaural output is reconstructed from the output of the WL Wiener filter using the inverse STFT. The effectiveness of the developed STFT-domain WL Wiener filter for binaural noise reduction is justified using experiments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Flexible fusion of electroencephalography and functional magnetic resonance imaging: Revealing neural-hemodynamic coupling through structured matrix-tensor factorization.\n \n \n \n \n\n\n \n Van Eyndhoven, S.; Hunyadi, B.; De Lathauwer, L.; and Van Huffel, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 26-30, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"FlexiblePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081162,\n  author = {S. {Van Eyndhoven} and B. Hunyadi and L. {De Lathauwer} and S. {Van Huffel}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Flexible fusion of electroencephalography and functional magnetic resonance imaging: Revealing neural-hemodynamic coupling through structured matrix-tensor factorization},\n  year = {2017},\n  pages = {26-30},\n  abstract = {Simultaneous recording of electroencephalographic (EEG) signals and functional magnetic resonance images (fMRI) has gained wide interest in brain research, thanks to the highly complementary spatiotemporal nature of both modalities. We propose a novel technique to extract sources of neural activity from the multimodal measurements, which relies on a structured form of coupled matrix-tensor factorization (CMTF). In a data-symmetric fashion, we characterize these underlying sources in the spatial, temporal and spectral domain, and estimate how the observations in EEG and fMRI are related through neurovascular coupling. That is, we explicitly account for the intrinsically variable nature of this coupling, allowing more accurate localization of the neural activity in time and space. We illustrate the effectiveness of this approach, which is shown to be robust to noise, by means of a simulation study. Hence, this provides a conceptually simple, yet effective alternative to other data-driven analysis methods in event-related or resting-state EEG-fMRI studies.},\n  keywords = {biomedical MRI;blood vessels;electroencephalography;haemodynamics;matrix decomposition;medical image processing;neurophysiology;functional magnetic resonance imaging;neural-hemodynamic coupling;structured matrix-tensor factorization;electroencephalographic signals;brain research;neural activity;multimodal measurements;coupled matrix-tensor factorization;data-symmetric fashion;neurovascular coupling;data-driven analysis method;resting-state EEG-fMRI studies;Electroencephalography;Brain modeling;Couplings;Data models;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081162},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347053.pdf},\n}\n\n
\n
\n\n\n
\n Simultaneous recording of electroencephalographic (EEG) signals and functional magnetic resonance images (fMRI) has gained wide interest in brain research, thanks to the highly complementary spatiotemporal nature of both modalities. We propose a novel technique to extract sources of neural activity from the multimodal measurements, which relies on a structured form of coupled matrix-tensor factorization (CMTF). In a data-symmetric fashion, we characterize these underlying sources in the spatial, temporal and spectral domain, and estimate how the observations in EEG and fMRI are related through neurovascular coupling. That is, we explicitly account for the intrinsically variable nature of this coupling, allowing more accurate localization of the neural activity in time and space. We illustrate the effectiveness of this approach, which is shown to be robust to noise, by means of a simulation study. Hence, this provides a conceptually simple, yet effective alternative to other data-driven analysis methods in event-related or resting-state EEG-fMRI studies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A robust algorithm for gait cycle segmentation.\n \n \n \n \n\n\n \n Jiang, S.; Wang, X.; Kyrarini, M.; and Gräser, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 31-35, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081163,\n  author = {S. Jiang and X. Wang and M. Kyrarini and A. Gräser},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A robust algorithm for gait cycle segmentation},\n  year = {2017},\n  pages = {31-35},\n  abstract = {In this paper, a robust algorithm for gait cycle segmentation is proposed based on a peak detection approach. The proposed algorithm is less influenced by noise and outliers and is capable of segmenting gait cycles from different types of gait signals recorded using different sensor systems. The presented algorithm has enhanced ability to segment gait cycles by eliminating the false peaks and interpolating the missing peaks. The variance of segmented cycles' lengths is computed as a criterion for evaluating the performance of segmentation. The proposed algorithm is tested on gait signals of patients diagnosed with Parkinson's disease collected from three databases. The segmentation results on three types of gait signals demonstrate the capability of the proposed algorithm to segment gait cycles accurately, and have achieved better performance than the original peak detection methods.},\n  keywords = {diseases;gait analysis;medical signal detection;medical signal processing;patient diagnosis;gait cycle segmentation;peak detection approach;gait signals;original peak detection methods;false peaks;Parkinson's disease;Signal processing algorithms;Detection algorithms;Signal processing;Algorithm design and analysis;Europe;Robustness;Foot;Biomedical signal processing;gait analysis;gait cycle segmentation;peak detection},\n  doi = {10.23919/EUSIPCO.2017.8081163},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347326.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a robust algorithm for gait cycle segmentation is proposed based on a peak detection approach. The proposed algorithm is less influenced by noise and outliers and is capable of segmenting gait cycles from different types of gait signals recorded using different sensor systems. The presented algorithm has enhanced ability to segment gait cycles by eliminating the false peaks and interpolating the missing peaks. The variance of segmented cycles' lengths is computed as a criterion for evaluating the performance of segmentation. The proposed algorithm is tested on gait signals of patients diagnosed with Parkinson's disease collected from three databases. The segmentation results on three types of gait signals demonstrate the capability of the proposed algorithm to segment gait cycles accurately, and have achieved better performance than the original peak detection methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Volumetric segmentation of human eye blood vessels based on OCT images.\n \n \n \n \n\n\n \n Stankiewicz, A.; Marciniak, T.; Dąbrowski, A.; Stopa, M.; and Marciniak, E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 36-40, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"VolumetricPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081164,\n  author = {A. Stankiewicz and T. Marciniak and A. Dąbrowski and M. Stopa and E. Marciniak},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Volumetric segmentation of human eye blood vessels based on OCT images},\n  year = {2017},\n  pages = {36-40},\n  abstract = {In this paper we present a method for volumetric segmentation of retinal vessels based on 3D OCT images of human macula. The proposed hybrid method is comprised of two steps: detailed extraction of superficial blood vessels indicators visible in 2D projection of retina layers followed by an axial inspection of inner retina to determine exact depth position of each vessel. The segmentation procedure is improved by application of block-matching and 4D filtering (BM4D) algorithm for noise reduction. The 3D reconstruction of vascular structure was performed for 10 normal subjects examined with Avanti AngioVue OCT device. The automated segmentation results were validated against the manual segmentation performed by an expert giving the accuracy of 95.2%.},\n  keywords = {biomedical optical imaging;blood vessels;eye;image denoising;image filtering;image matching;image reconstruction;image segmentation;medical image processing;volumetric segmentation;human eye blood vessels;OCT images;retinal vessels;retina layers;axial inspection;inner retina;4D filtering algorithm;manual segmentation;automated segmentation;human macula images;superficial blood vessels indicator extraction;block-matching algorithm;noise reduction;3D vascular structure reconstruction;avanti angiovue OCT device;Image segmentation;Retina;Three-dimensional displays;Signal processing algorithms;Biomedical imaging;Two dimensional displays;Blood vessels;retina vessels segmentation;fundus reconstruction;optical coherence tomography (OCT);3D visualization},\n  doi = {10.23919/EUSIPCO.2017.8081164},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347185.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we present a method for volumetric segmentation of retinal vessels based on 3D OCT images of human macula. The proposed hybrid method is comprised of two steps: detailed extraction of superficial blood vessels indicators visible in 2D projection of retina layers followed by an axial inspection of inner retina to determine exact depth position of each vessel. The segmentation procedure is improved by application of block-matching and 4D filtering (BM4D) algorithm for noise reduction. The 3D reconstruction of vascular structure was performed for 10 normal subjects examined with Avanti AngioVue OCT device. The automated segmentation results were validated against the manual segmentation performed by an expert giving the accuracy of 95.2%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic prediction of spirometry readings from cough and wheeze for monitoring of asthma severity.\n \n \n \n \n\n\n \n Rao, M. V. A.; Kausthubha, N. K.; Yadav, S.; Gope, D.; Krishnaswamy, U. M.; and Ghosh, P. K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 41-45, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081165,\n  author = {M. V. A. Rao and N. K. Kausthubha and S. Yadav and D. Gope and U. M. Krishnaswamy and P. K. Ghosh},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic prediction of spirometry readings from cough and wheeze for monitoring of asthma severity},\n  year = {2017},\n  pages = {41-45},\n  abstract = {We consider the task of automatically predicting spirometry readings from cough and wheeze audio signals for asthma severity monitoring. Spirometry is a pulmonary function test used to measure forced expiratory volume in one second (FEV1) and forced vital capacity (FVC) when a subject exhales in the spirometry sensor after taking a deep breath. FEV1%, FVC% and their ratio are typically used to determine the asthma severity. Accurate prediction of these spirometry readings from cough and wheeze could help patients to non-invasively monitor their asthma severity in the absence of spirometry. We use statistical spectrum description (SSD) as the cue from cough and wheeze signal to predict the spirometry readings using support vector regression (SVR). We perform experiments with cough and wheeze recordings from 16 healthy persons and 12 patients. We find that the coughs are better predictor of spirometry readings compared to the wheeze signal. FEV1%, FVC% and their ratio are predicted with root mean squared error of 11.06%, 10.3% and 0.08 respectively. We also perform a three class asthma severity level classification with predicted FEV1% and obtain an accuracy of 77.77%.},\n  keywords = {audio signal processing;biomedical measurement;diseases;lung;mean square error methods;medical signal processing;patient diagnosis;patient monitoring;pneumodynamics;regression analysis;support vector machines;spirometry readings;wheeze audio signals;asthma severity monitoring;spirometry sensor;wheeze recordings;cough audio signals;pulmonary function test;forced expiratory volume;forced vital capacity;statistical spectrum description;support vector regression;root mean squared error;three class asthma severity level classification;Monitoring;Mel frequency cepstral coefficient;Lungs;Europe;Signal processing;Feature extraction},\n  doi = {10.23919/EUSIPCO.2017.8081165},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346852.pdf},\n}\n\n
\n
\n\n\n
\n We consider the task of automatically predicting spirometry readings from cough and wheeze audio signals for asthma severity monitoring. Spirometry is a pulmonary function test used to measure forced expiratory volume in one second (FEV1) and forced vital capacity (FVC) when a subject exhales in the spirometry sensor after taking a deep breath. FEV1%, FVC% and their ratio are typically used to determine the asthma severity. Accurate prediction of these spirometry readings from cough and wheeze could help patients to non-invasively monitor their asthma severity in the absence of spirometry. We use statistical spectrum description (SSD) as the cue from cough and wheeze signal to predict the spirometry readings using support vector regression (SVR). We perform experiments with cough and wheeze recordings from 16 healthy persons and 12 patients. We find that the coughs are better predictor of spirometry readings compared to the wheeze signal. FEV1%, FVC% and their ratio are predicted with root mean squared error of 11.06%, 10.3% and 0.08 respectively. We also perform a three class asthma severity level classification with predicted FEV1% and obtain an accuracy of 77.77%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pseudo-CT generation by conditional inference random forest for MRI-based radiotherapy treatment planning.\n \n \n \n \n\n\n \n Largent, A.; Nunes, J.; Saint-Jalmes, H.; Simon, A.; Perichon, N.; Barateau, A.; Hervé, C.; Lafond, C.; Greer, P. B.; Dowling, J. A.; de Crevoisier , R.; and Acosta, O.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 46-50, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Pseudo-CTPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081166,\n  author = {A. Largent and J. Nunes and H. Saint-Jalmes and A. Simon and N. Perichon and A. Barateau and C. Hervé and C. Lafond and P. B. Greer and J. A. Dowling and R. {de Crevoisier} and O. Acosta},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Pseudo-CT generation by conditional inference random forest for MRI-based radiotherapy treatment planning},\n  year = {2017},\n  pages = {46-50},\n  abstract = {Dose calculation from MRI is a topical issue. New treatment systems combining a linear accelerator with a MRI have been recently being developed. MRI has good soft tissue contrast without ionizing radiation exposure. However, unlike CT, MRI does not provide electron density information necessary for dose calculation. We propose in this paper a machine learning method to simulate a CT from a target MRI and co-registered CT-MRI training set. Ten prostate MR and CT images have been considered. Firstly, a reference image was randomly selected in the training set. A common space has been built thanks to affine registrations between the training set and the reference image. Multiscale image descriptors such as spatial information, gradients and texture features were extracted from MRI patches at dilïerent levels of a Gaussian pyramid and used as voxel-wise characteristics in the learning scheme. A Conditional Inference Random Forest (CIRF) modelled the relation between MRI descriptors and CT patches. For validation, test images were spatially normalized and the same descriptors were computed to generate a new pCT. Leave-one out experiments were performed. We obtained a MAE = 45.79 (pCT vs CT). Dose volume histograms inside PTV and organs at risk are in close agreement. The D98% was 0.45 % (inside PTV) and the 3D gamma pass rate (1mm, 1%) was 99,2%. Our method has better results than direct bulk assignment. And the results suggest that the method may be used for dose calculations in an MR based planning system.},\n  keywords = {biological organs;biological tissues;biomedical MRI;cancer;computerised tomography;dosimetry;feature extraction;image registration;image texture;inference mechanisms;learning (artificial intelligence);medical image processing;radiation therapy;random processes;pseudoCT generation;soft tissue contrast;machine learning method;prostate magnetic resonance images;prostate computed tomography images;spatial information extraction;texture feature extraction;dose volume histograms;multiscale image descriptors;co-registered CT-MRI training set;dose calculation;conditional inference random forest;Pseudo-CT;Radiotherapy;Magnetic Resonance Imaging;Treatment planning;Random Forest},\n  doi = {10.23919/EUSIPCO.2017.8081166},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347582.pdf},\n}\n\n
\n
\n\n\n
\n Dose calculation from MRI is a topical issue. New treatment systems combining a linear accelerator with a MRI have been recently being developed. MRI has good soft tissue contrast without ionizing radiation exposure. However, unlike CT, MRI does not provide electron density information necessary for dose calculation. We propose in this paper a machine learning method to simulate a CT from a target MRI and co-registered CT-MRI training set. Ten prostate MR and CT images have been considered. Firstly, a reference image was randomly selected in the training set. A common space has been built thanks to affine registrations between the training set and the reference image. Multiscale image descriptors such as spatial information, gradients and texture features were extracted from MRI patches at dilïerent levels of a Gaussian pyramid and used as voxel-wise characteristics in the learning scheme. A Conditional Inference Random Forest (CIRF) modelled the relation between MRI descriptors and CT patches. For validation, test images were spatially normalized and the same descriptors were computed to generate a new pCT. Leave-one out experiments were performed. We obtained a MAE = 45.79 (pCT vs CT). Dose volume histograms inside PTV and organs at risk are in close agreement. The D98% was 0.45 % (inside PTV) and the 3D gamma pass rate (1mm, 1%) was 99,2%. Our method has better results than direct bulk assignment. And the results suggest that the method may be used for dose calculations in an MR based planning system.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning feature extractors for AMD classification in OCT using convolutional neural networks.\n \n \n \n \n\n\n \n Ravenscroft, D.; Deng, J.; Xie, X.; Terry, L.; Margrain, T. H.; North, R. V.; and Wood, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 51-55, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081167,\n  author = {D. Ravenscroft and J. Deng and X. Xie and L. Terry and T. H. Margrain and R. V. North and A. Wood},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Learning feature extractors for AMD classification in OCT using convolutional neural networks},\n  year = {2017},\n  pages = {51-55},\n  abstract = {In this paper, we propose a two-step textural feature extraction method, which utilizes the feature learning ability of Convolutional Neural Networks (CNN) to extract a set of low level primitive filter kernels, and then generalizes the discriminative power by forming a histogram based descriptor. The proposed method is applied to a practical medical diagnosis problem of classifying different stages of Age-Related Macular Degeneration (AMD) using a dataset comprising long-wavelength Optical Coherence Tomography (OCT) images of the choroid. The experimental results show that the proposed method extracts more discriminative features than the features learnt through CNN only. It also suggests the feasibility of classifying different AMD stages using the textural information of the choroid region.},\n  keywords = {biomedical optical imaging;eye;feature extraction;feedforward neural nets;filtering theory;image classification;image representation;image texture;learning (artificial intelligence);medical image processing;optical tomography;OCT;long-wavelength optical coherence tomography images;AMD stages;discriminative features;Age-Related Macular Degeneration;practical medical diagnosis problem;histogram based descriptor;discriminative power;low level primitive filter kernels;CNN;feature learning ability;two-step textural feature extraction method;convolutional neural networks;AMD classification;feature extractors;Feature extraction;Kernel;Histograms;Convolution;Neural networks;Shape;Training},\n  doi = {10.23919/EUSIPCO.2017.8081167},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343588.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a two-step textural feature extraction method, which utilizes the feature learning ability of Convolutional Neural Networks (CNN) to extract a set of low level primitive filter kernels, and then generalizes the discriminative power by forming a histogram based descriptor. The proposed method is applied to a practical medical diagnosis problem of classifying different stages of Age-Related Macular Degeneration (AMD) using a dataset comprising long-wavelength Optical Coherence Tomography (OCT) images of the choroid. The experimental results show that the proposed method extracts more discriminative features than the features learnt through CNN only. It also suggests the feasibility of classifying different AMD stages using the textural information of the choroid region.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A new method for breast cancer identification using multi-modal features in quaternionic form.\n \n \n \n \n\n\n \n Apostolopoulos, G.; Koutras, A.; Christoyianni, I.; and Dermatas, E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 56-60, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081168,\n  author = {G. Apostolopoulos and A. Koutras and I. Christoyianni and E. Dermatas},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A new method for breast cancer identification using multi-modal features in quaternionic form},\n  year = {2017},\n  pages = {56-60},\n  abstract = {Mammography is still the most effective procedure for early diagnosis of the breast cancer. Computer-aided Diagnosis (CAD) systems can be very helpful in this direction for radiologists to recognize abnormal and normal regions of interest in digital mammograms faster than traditional screening program. In this work, we propose a new method for breast cancer identification of all types of lesions in digital mammograms using multimodal features in a quaternionic representation. The proposed method consists of two steps: First, a novel feature extraction module utilizes two dimensional discrete transforms based on ART, Shapelets, Zernike moments and Gabor filters to decompose Regions of Suspicion (ROS) into a set of localized basis functions with different shapes. The extracted features are then fused and presented in quaternionic representation to the classification module in the second step. For the classification task, we propose a new type of classifier (Q-classifier) that successfully, accurately, with low computational cost and higher speed of diagnosis, recognizes normal and abnormal ROS from mammograms. The proposed method is evaluated on the Mini-MIAS database. The methods' performance is evaluated using Receiver Operating Characteristics (ROC) curve. The achieved result AUC = 0.934 shows that the proposed method can be quite effective and can be used as a tool for efficiently diagnosing breast cancer compared to similar techniques presented in the literature that use SVM classifiers and unimodal features.},\n  keywords = {cancer;diagnostic radiography;discrete transforms;feature extraction;Gabor filters;image classification;image filtering;mammography;medical image processing;localized basis functions;classification module;breast cancer identification;multimodal features;quaternionic form;digital mammograms;computer-aided diagnosis systems;feature extraction module;lesion types;two dimensional discrete transforms;shapelets;Zernike moments;Gabor filters;regions-of-suspicion;Q-classifier;mini-MIAS database;receiver operating characteristics curve;Quaternions;Feature extraction;Subspace constraints;Gabor filters;Breast cancer;Transforms;Shapelets;ART;Zernike-Moments;Gabor-filter banks;Quaternion;Breast cancer;Computer-aided diagnosis (CAD)},\n  doi = {10.23919/EUSIPCO.2017.8081168},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347613.pdf},\n}\n\n
\n
\n\n\n
\n Mammography is still the most effective procedure for early diagnosis of the breast cancer. Computer-aided Diagnosis (CAD) systems can be very helpful in this direction for radiologists to recognize abnormal and normal regions of interest in digital mammograms faster than traditional screening program. In this work, we propose a new method for breast cancer identification of all types of lesions in digital mammograms using multimodal features in a quaternionic representation. The proposed method consists of two steps: First, a novel feature extraction module utilizes two dimensional discrete transforms based on ART, Shapelets, Zernike moments and Gabor filters to decompose Regions of Suspicion (ROS) into a set of localized basis functions with different shapes. The extracted features are then fused and presented in quaternionic representation to the classification module in the second step. For the classification task, we propose a new type of classifier (Q-classifier) that successfully, accurately, with low computational cost and higher speed of diagnosis, recognizes normal and abnormal ROS from mammograms. The proposed method is evaluated on the Mini-MIAS database. The methods' performance is evaluated using Receiver Operating Characteristics (ROC) curve. The achieved result AUC = 0.934 shows that the proposed method can be quite effective and can be used as a tool for efficiently diagnosing breast cancer compared to similar techniques presented in the literature that use SVM classifiers and unimodal features.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep structured features for semantic segmentation.\n \n \n \n \n\n\n \n Tschannen, M.; Cavigelli, L.; Mentzer, F.; Wiatowski, T.; and Benini, L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 61-65, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081169,\n  author = {M. Tschannen and L. Cavigelli and F. Mentzer and T. Wiatowski and L. Benini},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Deep structured features for semantic segmentation},\n  year = {2017},\n  pages = {61-65},\n  abstract = {We propose a highly structured neural network architecture for semantic segmentation with an extremely small model size, suitable for low-power embedded and mobile platforms. Specifically, our architecture combines i) a Haar wavelet-based tree-like convolutional neural network (CNN), ii) a random layer realizing a radial basis function kernel approximation, and iii) a linear classifier. While stages i) and ii) are completely pre-specified, only the linear classifier is learned from data. We apply the proposed architecture to outdoor scene and aerial image semantic segmentation and show that the accuracy of our architecture is competitive with conventional pixel classification CNNs. Furthermore, we demonstrate that the proposed architecture is data efficient in the sense of matching the accuracy of pixel classification CNNs when trained on a much smaller data set.},\n  keywords = {feedforward neural nets;image classification;image segmentation;learning (artificial intelligence);neural net architecture;radial basis function networks;trees (mathematics);deep structured features;highly structured neural network architecture;Haar wavelet;tree-like convolutional neural network;random layer;radial basis function kernel approximation;linear classifier;aerial image semantic segmentation;conventional pixel classification CNNs;outdoor scene;CNN;Feature extraction;Computer architecture;Semantics;Convolution;Kernel;Image segmentation;Deconvolution},\n  doi = {10.23919/EUSIPCO.2017.8081169},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342819.pdf},\n}\n\n
\n
\n\n\n
\n We propose a highly structured neural network architecture for semantic segmentation with an extremely small model size, suitable for low-power embedded and mobile platforms. Specifically, our architecture combines i) a Haar wavelet-based tree-like convolutional neural network (CNN), ii) a random layer realizing a radial basis function kernel approximation, and iii) a linear classifier. While stages i) and ii) are completely pre-specified, only the linear classifier is learned from data. We apply the proposed architecture to outdoor scene and aerial image semantic segmentation and show that the accuracy of our architecture is competitive with conventional pixel classification CNNs. Furthermore, we demonstrate that the proposed architecture is data efficient in the sense of matching the accuracy of pixel classification CNNs when trained on a much smaller data set.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D point cloud segmentation using a fully connected conditional random field.\n \n \n \n \n\n\n \n Lin, X.; Casas, J. R.; and Pardas, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 66-70, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081170,\n  author = {X. Lin and J. R. Casas and M. Pardas},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {3D point cloud segmentation using a fully connected conditional random field},\n  year = {2017},\n  pages = {66-70},\n  abstract = {Traditional image segmentation methods working with low level image features are usually difficult to adapt to higher level tasks, such as object recognition and scene understanding. Object segmentation emerges as a new challenge in this research field. It aims at obtaining more meaningful segments related to semantic objects in the scene by analyzing a combination of different information. 3D point cloud data obtained from consumer depth sensors has been exploited to tackle many computer vision problems due to its richer information about the geometry of 3D scenes compared to 2D images. Meanwhile, new challenges have also emerged as the depth information is usually noisy, sparse and unorganized. In this paper, we present a novel point cloud segmentation approach for segmenting interacting objects in a stream of point clouds by exploiting spatio-temporal coherence. We pose the problem as an energy minimization task in a fully connected conditional random field with the energy function defined based on both current and previous information. We compare different methods and prove the improved segmentation performance and robustness of the proposed approach in sequences with over 2k frames.},\n  keywords = {computer vision;image segmentation;image sequences;minimisation;object recognition;computer vision;image segmentation;scene understanding;image sequences;consumer depth sensors;3D point cloud data;object segmentation;object recognition;low level image features;3D point cloud segmentation;energy minimization task;Three-dimensional displays;Image segmentation;Object segmentation;Image color analysis;Two dimensional displays;Labeling},\n  doi = {10.23919/EUSIPCO.2017.8081170},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347494.pdf},\n}\n\n
\n
\n\n\n
\n Traditional image segmentation methods working with low level image features are usually difficult to adapt to higher level tasks, such as object recognition and scene understanding. Object segmentation emerges as a new challenge in this research field. It aims at obtaining more meaningful segments related to semantic objects in the scene by analyzing a combination of different information. 3D point cloud data obtained from consumer depth sensors has been exploited to tackle many computer vision problems due to its richer information about the geometry of 3D scenes compared to 2D images. Meanwhile, new challenges have also emerged as the depth information is usually noisy, sparse and unorganized. In this paper, we present a novel point cloud segmentation approach for segmenting interacting objects in a stream of point clouds by exploiting spatio-temporal coherence. We pose the problem as an energy minimization task in a fully connected conditional random field with the energy function defined based on both current and previous information. We compare different methods and prove the improved segmentation performance and robustness of the proposed approach in sequences with over 2k frames.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Concept detection and face pose estimation using lightweight convolutional neural networks for steering drone video shooting.\n \n \n \n \n\n\n \n Passalis, N.; and Tefas, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 71-75, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ConceptPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081171,\n  author = {N. Passalis and A. Tefas},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Concept detection and face pose estimation using lightweight convolutional neural networks for steering drone video shooting},\n  year = {2017},\n  pages = {71-75},\n  abstract = {Unmanned Aerial Vehicles, also known as drones, are becoming increasingly popular for video shooting tasks since they are capable of capturing spectacular aerial shots. Deep learning techniques, such as Convolutional Neural Networks (CNNs), can be utilized to assist various aspects of the flying and the shooting process allowing one human to operate one or more drones at once. However, using deep learning techniques on drones is not straightforward since computational power and memory constraints exist. In this work, a quantization-based method for learning lightweight convolutional networks is proposed. The ability of the proposed approach to significantly reduce the model size and increase both the feed-forward speed and the accuracy is demonstrated on two different drone-related tasks, i.e., human concept detection and face pose estimation.},\n  keywords = {autonomous aerial vehicles;feedforward neural nets;learning (artificial intelligence);mobile robots;neural nets;pose estimation;remotely operated vehicles;video signal processing;different drone-related tasks;human concept detection;lightweight convolutional neural networks;steering drone video shooting;Unmanned Aerial Vehicles;drones;video shooting tasks;deep learning techniques;memory constraints;face pose estimation;aerial shots;CNNs;feedforward speed;Feature extraction;Drones;Convolutional codes;Convolution;Histograms;Machine learning},\n  doi = {10.23919/EUSIPCO.2017.8081171},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346788.pdf},\n}\n\n
\n
\n\n\n
\n Unmanned Aerial Vehicles, also known as drones, are becoming increasingly popular for video shooting tasks since they are capable of capturing spectacular aerial shots. Deep learning techniques, such as Convolutional Neural Networks (CNNs), can be utilized to assist various aspects of the flying and the shooting process allowing one human to operate one or more drones at once. However, using deep learning techniques on drones is not straightforward since computational power and memory constraints exist. In this work, a quantization-based method for learning lightweight convolutional networks is proposed. The ability of the proposed approach to significantly reduce the model size and increase both the feed-forward speed and the accuracy is demonstrated on two different drone-related tasks, i.e., human concept detection and face pose estimation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Detecting conversational gaze aversion using unsupervised learning.\n \n \n \n \n\n\n \n Roddy, M.; and Harte, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 76-80, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DetectingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081172,\n  author = {M. Roddy and N. Harte},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Detecting conversational gaze aversion using unsupervised learning},\n  year = {2017},\n  pages = {76-80},\n  abstract = {The aversion of gaze during dyadic conversations is a social signal that contains information relevant to the detection of interest, turn-taking cues, and conversational engagement. The understanding and modeling of such behavior has implications for the design of embodied conversational agents, as well as computational approaches to conversational analysis. Recent approaches to extracting gaze directions from monocular camera footage have achieved accurate results. We investigate ways of processing the extracted gaze signals from videos to perform gaze aversion detection. We present novel approaches that are based on unsupervised classification using spectral clustering as well as optimization methods. Three approaches that vary in their input parameters and their complexity are proposed and evaluated.},\n  keywords = {feature extraction;gaze tracking;pattern clustering;unsupervised learning;unsupervised learning;dyadic conversations;social signal;conversational engagement;embodied conversational agents;computational approaches;conversational analysis;monocular camera footage;extracted gaze signals;unsupervised classification;conversational gaze aversion detection;detection-of-interest;turn-taking cues;spectral clustering;optimization method;Cameras;Feature extraction;Videos;Data mining;Gaze tracking;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081172},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347614.pdf},\n}\n\n
\n
\n\n\n
\n The aversion of gaze during dyadic conversations is a social signal that contains information relevant to the detection of interest, turn-taking cues, and conversational engagement. The understanding and modeling of such behavior has implications for the design of embodied conversational agents, as well as computational approaches to conversational analysis. Recent approaches to extracting gaze directions from monocular camera footage have achieved accurate results. We investigate ways of processing the extracted gaze signals from videos to perform gaze aversion detection. We present novel approaches that are based on unsupervised classification using spectral clustering as well as optimization methods. Three approaches that vary in their input parameters and their complexity are proposed and evaluated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Registration of images to unorganized 3D point clouds using contour cues.\n \n \n \n \n\n\n \n Pujol-Miro, A.; Ruiz-Hidalgo, J.; and Casas, J. R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 81-85, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RegistrationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081173,\n  author = {A. Pujol-Miro and J. Ruiz-Hidalgo and J. R. Casas},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Registration of images to unorganized 3D point clouds using contour cues},\n  year = {2017},\n  pages = {81-85},\n  abstract = {Low resolution commercial 3D sensors contribute to computer vision tasks even better when the analysis is carried out in a combination with higher resolution image data. This requires registration of 2D images to unorganized 3D point clouds. In this paper we present a framework for 2D-3D data fusion to obtain directly the camera pose of a 2D color image in relation to a 3D point cloud. It includes a novel multiscale intensity feature detection algorithm and a modified ICP procedure based on point-to-line distances. The framework is generic for several data types (such as CAD designs or LiDAR data without photometric information), and results show that performance is comparable to the state of the art, while avoiding manual markers or specific patterns on the data.},\n  keywords = {cameras;feature extraction;image colour analysis;image reconstruction;image registration;image resolution;image sensors;object detection;sensor fusion;point-to-line distances;unorganized 3D point clouds;low resolution commercial 3D sensors;higher resolution image data;2D color image;multiscale intensity feature detection algorithm;image registration;computer vision;contour cues;modified ICP procedure;camera pose;Three-dimensional displays;Feature extraction;Signal processing algorithms;Iterative closest point algorithm;Cameras;Sensors;Europe;feature extraction;image registration;iterative closest point algorithm;stereo vision},\n  doi = {10.23919/EUSIPCO.2017.8081173},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341236.pdf},\n}\n\n
\n
\n\n\n
\n Low resolution commercial 3D sensors contribute to computer vision tasks even better when the analysis is carried out in a combination with higher resolution image data. This requires registration of 2D images to unorganized 3D point clouds. In this paper we present a framework for 2D-3D data fusion to obtain directly the camera pose of a 2D color image in relation to a 3D point cloud. It includes a novel multiscale intensity feature detection algorithm and a modified ICP procedure based on point-to-line distances. The framework is generic for several data types (such as CAD designs or LiDAR data without photometric information), and results show that performance is comparable to the state of the art, while avoiding manual markers or specific patterns on the data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust statistical processing of TDOA estimates for distant speaker diarization.\n \n \n \n \n\n\n \n Parada, P. P.; Sharma, D.; van Waterschoot , T.; and Naylor, P. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 86-90, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081174,\n  author = {P. P. Parada and D. Sharma and T. {van Waterschoot} and P. A. Naylor},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust statistical processing of TDOA estimates for distant speaker diarization},\n  year = {2017},\n  pages = {86-90},\n  abstract = {Speaker diarization systems aim to segment an audio signal into homogeneous sections with only one active speaker and answer the question {"}who spoke when?{"} We present a novel approach to speaker diarization exploiting spatial information through robust statistical modeling of Time Difference of Arrival (TDOA) estimates obtained using pairs of microphones. The TDOAs are modeled with Gaussian Mixture Models (GMM) trained in a robust manner with the expectation-conditional maximization algorithm and minorization-maximization approach. In situations of multiple microphone deployment, our method allows for the selection of the best microphone pair as part of the modeling and supports ad-hoc microphone placement. Such information can be useful for subsequent speech processing algorithms. We show that our method, which uses only spatial information, achieves up to 36.1% relative reduction in speaker error time compared to an open source toolkit using TDOA features and tested on the NIST RT05 multiparty meeting database.},\n  keywords = {expectation-maximisation algorithm;Gaussian processes;microphones;speaker recognition;speech processing;time-of-arrival estimation;robust statistical processing;TDOA estimates;distant speaker diarization;speaker diarization systems;audio signal;active speaker;spatial information;robust statistical modeling;expectation-conditional maximization algorithm;minorization-maximization approach;multiple microphone deployment;microphone pair;ad-hoc microphone placement;speaker error time;TDOA features;time difference of arrival estimates;speech processing algorithms;Gaussian mixture models;Microphones;Feature extraction;Standards;Computational modeling;Robustness;Hidden Markov models;Indexes},\n  doi = {10.23919/EUSIPCO.2017.8081174},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342330.pdf},\n}\n\n
\n
\n\n\n
\n Speaker diarization systems aim to segment an audio signal into homogeneous sections with only one active speaker and answer the question \"who spoke when?\" We present a novel approach to speaker diarization exploiting spatial information through robust statistical modeling of Time Difference of Arrival (TDOA) estimates obtained using pairs of microphones. The TDOAs are modeled with Gaussian Mixture Models (GMM) trained in a robust manner with the expectation-conditional maximization algorithm and minorization-maximization approach. In situations of multiple microphone deployment, our method allows for the selection of the best microphone pair as part of the modeling and supports ad-hoc microphone placement. Such information can be useful for subsequent speech processing algorithms. We show that our method, which uses only spatial information, achieves up to 36.1% relative reduction in speaker error time compared to an open source toolkit using TDOA features and tested on the NIST RT05 multiparty meeting database.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Variants of mel-frequency cepstral coefficients for improved whispered speech speaker verification in mismatched conditions.\n \n \n \n \n\n\n \n Sarria-Paja, M.; and Falk, T. H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 91-95, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"VariantsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081175,\n  author = {M. Sarria-Paja and T. H. Falk},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Variants of mel-frequency cepstral coefficients for improved whispered speech speaker verification in mismatched conditions},\n  year = {2017},\n  pages = {91-95},\n  abstract = {In this paper, automatic speaker verification using normal and whispered speech is explored. Typically, for speaker verification systems, varying vocal effort inputs during the testing stage significantly degrades system performance. Solutions such as feature mapping or addition of multi-style data during training and enrollment stages have been proposed but do not show similar advantages for the involved speaking styles. Herein, we focus attention on the extraction of invariant speaker-dependent information from normal and whispered speech, thus allowing for improved multi vocal effort speaker verification. We base our search on previously reported perceptual and acoustic insights and propose variants of the mel-frequency cepstral coefficients (MFCC). We show the complementarity of the proposed features via three fusion schemes. Gains as high as 39% and 43% can be achieved for normal and whispered speech, respectively, relative to the existing systems based on conventional MFCC features.},\n  keywords = {cepstral analysis;feature extraction;speaker recognition;mel-frequency cepstral coefficients;automatic speaker verification;feature mapping;invariant speaker-dependent information;whispered speech speaker verification;invariant speaker-dependent information extraction;Speech;Mel frequency cepstral coefficient;Feature extraction;Databases;Data mining;Speech processing;Whispered speech;speaker verification;fusion;i-vector extraction;MFCC},\n  doi = {10.23919/EUSIPCO.2017.8081175},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345615.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, automatic speaker verification using normal and whispered speech is explored. Typically, for speaker verification systems, varying vocal effort inputs during the testing stage significantly degrades system performance. Solutions such as feature mapping or addition of multi-style data during training and enrollment stages have been proposed but do not show similar advantages for the involved speaking styles. Herein, we focus attention on the extraction of invariant speaker-dependent information from normal and whispered speech, thus allowing for improved multi vocal effort speaker verification. We base our search on previously reported perceptual and acoustic insights and propose variants of the mel-frequency cepstral coefficients (MFCC). We show the complementarity of the proposed features via three fusion schemes. Gains as high as 39% and 43% can be achieved for normal and whispered speech, respectively, relative to the existing systems based on conventional MFCC features.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Speaker verification anti-spoofing using linear prediction residual phase features.\n \n \n \n \n\n\n \n Hanilci, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 96-100, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SpeakerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081176,\n  author = {C. Hanilci},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Speaker verification anti-spoofing using linear prediction residual phase features},\n  year = {2017},\n  pages = {96-100},\n  abstract = {The vulnerability of automatic speaker verification (ASV) systems against spoofing attacks is an important security concern about the reliability of ASV technology. Recently, various countermeasures have been developed for spoofing detection. In this paper, we propose to use features derived from linear prediction (LP) residual signal for spoofing detection using simple Gaussian mixture model (GMM) classifier. Experiments conducted on recently released ASVspoof 2015 database show that LP residual phase cepstral coefficients (LPRPC) outperforms standard MFCC features and considerably improves the spoofing detection performance. With the LPRPC features 97% relative improvement is observed over standard MFCC features on known attacks.},\n  keywords = {cepstral analysis;feature extraction;Gaussian processes;speaker recognition;automatic speaker verification systems;spoofing attacks;ASV technology;linear prediction residual signal;residual phase cepstral coefficients;standard MFCC features;spoofing detection performance;LPRPC features;Gaussian mixture model classifier;speaker verification antispoofing;linear prediction residual phase features;LP residual signal;GMM;Feature extraction;Speech;Mel frequency cepstral coefficient;Training;Discrete cosine transforms;Databases;Standards},\n  doi = {10.23919/EUSIPCO.2017.8081176},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341434.pdf},\n}\n\n
\n
\n\n\n
\n The vulnerability of automatic speaker verification (ASV) systems against spoofing attacks is an important security concern about the reliability of ASV technology. Recently, various countermeasures have been developed for spoofing detection. In this paper, we propose to use features derived from linear prediction (LP) residual signal for spoofing detection using simple Gaussian mixture model (GMM) classifier. Experiments conducted on recently released ASVspoof 2015 database show that LP residual phase cepstral coefficients (LPRPC) outperforms standard MFCC features and considerably improves the spoofing detection performance. With the LPRPC features 97% relative improvement is observed over standard MFCC features on known attacks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Spoofing detection employing infinite impulse response — constant Q transform-based feature representations.\n \n \n \n\n\n \n Alam, J.; and Kenny, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 101-105, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081177,\n  author = {J. Alam and P. Kenny},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Spoofing detection employing infinite impulse response — constant Q transform-based feature representations},\n  year = {2017},\n  pages = {101-105},\n  abstract = {Speaker recognition researchers acknowledge that systems which aim to verify speakers automatically based on their pronunciation of an utterance are vulnerable to spoofing attacks using voice conversion and speech synthesis technologies. The first automatic speaker verification spoofing and countermeasures challenge (ASVspoof2015) was designed to stimulate interest in this problem among the speaker recognition communities. In the course of the challenge and subsequently, it became clear that the most effective countermeasures against spoofing attacks are low-level acoustic features (typically extracted at 10 ms intervals) designed to detect artifacts in synthetic or voice converted speech. In this work, we demonstrate the effectiveness of the infinite impulse response - constant Q transform (IIR-CQT) spectrum-based cepstral coefficients (ICQC) as anti-spoofing front-end. The IIR-CQT spectrum is estimated by filtering the multi-resolution fast Fourier transform with an infinite impulse response filter. These features can be used on their own with a standard Gaussian mixture model backend to detect spoofing attacks or they can be used in tandem with bottleneck features which are extracted from a bottleneck layer in a deep neural network designed to discriminate between synthetic and natural speech. We show that the ICQC features are capable of producing very low equal error rates on the individual spoofing attacks in the ASVspoof2015 data set (0.02% on the known attacks, 0.23% on the unknown attacks, and 0.13% on average). Moreover, with a single decision threshold (common to all of the attacks), the ICQC front end yielded an equal error rate of 0.20%.},\n  keywords = {fast Fourier transforms;feature extraction;Gaussian processes;IIR filters;neural nets;speaker recognition;speech synthesis;low-level acoustic features;speaker recognition communities;countermeasures challenge;automatic speaker verification spoofing;voice conversion;speaker recognition researchers;feature representations;unknown attacks;individual spoofing attacks;ICQC features;natural speech;infinite impulse response filter;IIR-CQT spectrum;anti-spoofing front-end;time 10.0 ms;Feature extraction;Cepstral analysis;Time-frequency analysis;Signal resolution;Speech;Fast Fourier transforms;spoofing detection;ASVspoof2015;GMM;bottleneck features;ICQC},\n  doi = {10.23919/EUSIPCO.2017.8081177},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Speaker recognition researchers acknowledge that systems which aim to verify speakers automatically based on their pronunciation of an utterance are vulnerable to spoofing attacks using voice conversion and speech synthesis technologies. The first automatic speaker verification spoofing and countermeasures challenge (ASVspoof2015) was designed to stimulate interest in this problem among the speaker recognition communities. In the course of the challenge and subsequently, it became clear that the most effective countermeasures against spoofing attacks are low-level acoustic features (typically extracted at 10 ms intervals) designed to detect artifacts in synthetic or voice converted speech. In this work, we demonstrate the effectiveness of the infinite impulse response - constant Q transform (IIR-CQT) spectrum-based cepstral coefficients (ICQC) as anti-spoofing front-end. The IIR-CQT spectrum is estimated by filtering the multi-resolution fast Fourier transform with an infinite impulse response filter. These features can be used on their own with a standard Gaussian mixture model backend to detect spoofing attacks or they can be used in tandem with bottleneck features which are extracted from a bottleneck layer in a deep neural network designed to discriminate between synthetic and natural speech. We show that the ICQC features are capable of producing very low equal error rates on the individual spoofing attacks in the ASVspoof2015 data set (0.02% on the known attacks, 0.23% on the unknown attacks, and 0.13% on average). Moreover, with a single decision threshold (common to all of the attacks), the ICQC front end yielded an equal error rate of 0.20%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Novel energy separation based instantaneous frequency features for spoof speech detection.\n \n \n \n \n\n\n \n Kamble, M. R.; and Patil, H. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 106-110, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"NovelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081178,\n  author = {M. R. Kamble and H. A. Patil},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Novel energy separation based instantaneous frequency features for spoof speech detection},\n  year = {2017},\n  pages = {106-110},\n  abstract = {Speech Synthesis (SS) and Voice Conversion (VC) presents a genuine risk of attacks for Automatic Speaker Verification (ASV) technology. In this paper, we evaluate front-end anti-spoofing technique to protect ASV system for SS and VC attack using a standard benchmarking database. In particular, we propose a novel feature set, namely, Energy Separation Algorithm-based Instantaneous Frequency Cosine Coefficients (ESA-IFCC) to detect the genuine and impostor speech. The experiments are carried out on ASV Spoof 2015 Challenge database. On the development set, the score-level fusion of proposed ESA-IFCC feature set with Mel Frequency Cepstral Coefficients (MFCC) gave an EER of 3.45 %, which reduced significantly from MFCC (6.98 %) and ESA-IFCC (5.43 %) with 13-D static features. The EER decreases further to 2.01 % and 1.89 % for Δ and ΔΔ features derived from proposed ESA-IFCC features, respectively. The overall average error rate for known and unknown attacks in evaluation set was 6.79 % for ESA-IFCC and was significantly better than the MFCC (9.15 %) features.},\n  keywords = {feature extraction;speech recognition;speech synthesis;score-level fusion;proposed ESA-IFCC feature;Mel Frequency Cepstral Coefficients;MFCC;13-D static features;evaluation set;novel energy separation;instantaneous frequency features;spoof speech detection;SS;front-end anti-spoofing technique;ASV system;VC attack;standard benchmarking database;genuine speech;impostor speech;ASV Spoof 2015 Challenge database;speech synthesis;voice conversion;automatic speaker verification technology;energy separation algorithm;instantaneous frequency cosine coefficients;VC;Speech;Mel frequency cepstral coefficient;Feature extraction;Signal processing algorithms;Discrete cosine transforms;Narrowband;Databases;Teager Energy Operator;Energy Separation Algorithm;Instantaneous Frequency Cosine Coefficients;SSD;GMM;EER},\n  doi = {10.23919/EUSIPCO.2017.8081178},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347756.pdf},\n}\n\n
\n
\n\n\n
\n Speech Synthesis (SS) and Voice Conversion (VC) presents a genuine risk of attacks for Automatic Speaker Verification (ASV) technology. In this paper, we evaluate front-end anti-spoofing technique to protect ASV system for SS and VC attack using a standard benchmarking database. In particular, we propose a novel feature set, namely, Energy Separation Algorithm-based Instantaneous Frequency Cosine Coefficients (ESA-IFCC) to detect the genuine and impostor speech. The experiments are carried out on ASV Spoof 2015 Challenge database. On the development set, the score-level fusion of proposed ESA-IFCC feature set with Mel Frequency Cepstral Coefficients (MFCC) gave an EER of 3.45 %, which reduced significantly from MFCC (6.98 %) and ESA-IFCC (5.43 %) with 13-D static features. The EER decreases further to 2.01 % and 1.89 % for Δ and ΔΔ features derived from proposed ESA-IFCC features, respectively. The overall average error rate for known and unknown attacks in evaluation set was 6.79 % for ESA-IFCC and was significantly better than the MFCC (9.15 %) features.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Implementation aspects of nonlinear precoding for G.fast — coding and legacy receivers.\n \n \n \n\n\n \n Strobel, R.; Barthelme, A.; and Utschick, W.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 111-115, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081179,\n  author = {R. Strobel and A. Barthelme and W. Utschick},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Implementation aspects of nonlinear precoding for G.fast — coding and legacy receivers},\n  year = {2017},\n  pages = {111-115},\n  abstract = {Hybrid copper/fiber networks bridge the gap between the fiber link at the distribution point and the customer by using copper wires over the last meters. The G.fast technology has been designed to be used in such a fiber to the distribution point (FTTdp) network. Crosstalk management using MIMO precoding is a key to the required performance of FTTdp. With higher frequencies used on copper wires, nonlinear precoding schemes such as Tomlinson Harashima precoding are discussed as an alternative to linear precoding. This paper focuses on the advantages and losses of Tomlinson Harashima precoding used for coded transmission on twisted pair cable bundles. A performance loss model for the Modulo loss in coded transmission is presented. Interoperability between linear and nonlinear precoding is discussed.},\n  keywords = {crosstalk;MIMO communication;optical fibre networks;precoding;subscriber loops;twisted pair cables;crosstalk management;MIMO precoding;nonlinear precoding schemes;Tomlinson Harashima precoding;coded transmission;legacy receivers;G.fast-coding;fiber to the distribution point network;twisted pair cable;Precoding;Receivers;Signal to noise ratio;Quadrature amplitude modulation;Bit error rate;Propagation losses},\n  doi = {10.23919/EUSIPCO.2017.8081179},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Hybrid copper/fiber networks bridge the gap between the fiber link at the distribution point and the customer by using copper wires over the last meters. The G.fast technology has been designed to be used in such a fiber to the distribution point (FTTdp) network. Crosstalk management using MIMO precoding is a key to the required performance of FTTdp. With higher frequencies used on copper wires, nonlinear precoding schemes such as Tomlinson Harashima precoding are discussed as an alternative to linear precoding. This paper focuses on the advantages and losses of Tomlinson Harashima precoding used for coded transmission on twisted pair cable bundles. A performance loss model for the Modulo loss in coded transmission is presented. Interoperability between linear and nonlinear precoding is discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Collision resolution and interference elimination in multiaccess communication networks.\n \n \n \n \n\n\n \n Akl, N.; and Tewfik, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 116-120, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CollisionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081180,\n  author = {N. Akl and A. Tewfik},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Collision resolution and interference elimination in multiaccess communication networks},\n  year = {2017},\n  pages = {116-120},\n  abstract = {We define a multiaccess communication scheme that effectively eliminates interference and resolves collisions in many-to-one and many-to-many communication scenarios. Each transmitter is uniquely identified by a coding vector. Using these vectors, all signals issued from a specific transmitter will be aligned along a unique dimension at all receivers hearing this transmission. This dimension is characteristic of the transmitter. It also lies within a signal-and-noise subspace that is orthogonal to the noise-only subspace at the receiver. Signals along each dimension of the signal-and-noise subspace can be extracted separately using the properties of the Vandermonde matrix. The decoding algorithm is thus able to asymptotically achieve full network capacity at high signal-to-noise ratio (SNR) compared to 50% and 36.79% asymptotic throughputs for interference alignment and Ethernet respectively. Synchronization is assumed between the transmitters and the receiver(s). The number of transmitters is not necessarily known to each receiver.},\n  keywords = {interference suppression;matrix algebra;MIMO communication;multi-access systems;radio receivers;radio transmitters;collision resolution;multiaccess communication networks;communication scenarios;coding vector;receivers;noise-only subspace;network capacity;high signal-to-noise ratio;interference alignment;transmitter;interference elimination;Vandermonde matrix;signal-and-noise subspace;decoding algorithm;full network capacity;Receivers;Transmitters;Interference;Signal to noise ratio;Multiaccess communication;Encoding;Decoding;collision resolution;interference elimination;interference alignment;Vandermonde matrix;coding vectors},\n  doi = {10.23919/EUSIPCO.2017.8081180},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347575.pdf},\n}\n\n
\n
\n\n\n
\n We define a multiaccess communication scheme that effectively eliminates interference and resolves collisions in many-to-one and many-to-many communication scenarios. Each transmitter is uniquely identified by a coding vector. Using these vectors, all signals issued from a specific transmitter will be aligned along a unique dimension at all receivers hearing this transmission. This dimension is characteristic of the transmitter. It also lies within a signal-and-noise subspace that is orthogonal to the noise-only subspace at the receiver. Signals along each dimension of the signal-and-noise subspace can be extracted separately using the properties of the Vandermonde matrix. The decoding algorithm is thus able to asymptotically achieve full network capacity at high signal-to-noise ratio (SNR) compared to 50% and 36.79% asymptotic throughputs for interference alignment and Ethernet respectively. Synchronization is assumed between the transmitters and the receiver(s). The number of transmitters is not necessarily known to each receiver.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance analysis of PLNC using hierarchical modulation.\n \n \n \n \n\n\n \n Awny, S. N.; Tsimenidis, C. C.; Chambers, J.; and Le Goff, S. Y.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 121-125, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081181,\n  author = {S. N. Awny and C. C. Tsimenidis and J. Chambers and S. Y. {Le Goff}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Performance analysis of PLNC using hierarchical modulation},\n  year = {2017},\n  pages = {121-125},\n  abstract = {In this paper, we propose and analyze the performance of a two-way relay network (TWRN) using physical layer network coding (PLNC) with hierarchical modulation (HM). The performance is evaluated for additive white Gaussian noise (AWGN) channels. The TWRN consists of two end nodes and a relay, where each node has a single antenna and operates in a half-duplex mode. Using an analytical approach, we derive the end-to-end symbol error rate (SER) performance expression of the 4/16-QAM HM for the high priority (HP) and the low priority (LP) streams, respectively, as a function of signal-to-noise ratio (SNR). It has been found the analytical and simulation results are in close agreement. The utilization of the proposed HM-PLNC system minimizes the computational complexity of the denoise and forward (DNF) operations at the relay by reducing the number of Euclidean distance computations required to 18 compared to the 49 required in an equivalent 16-QAM based PLNC system.},\n  keywords = {AWGN channels;computational complexity;network coding;quadrature amplitude modulation;relay networks (telecommunication);TWRN;physical layer network coding;additive white Gaussian noise channels;end nodes;single antenna;half-duplex mode;end-to-end symbol error rate performance expression;4/16-QAM HM;two-way relay network;hierarchical modulation;16-QAM based PLNC system;forward operations;HM-PLNC system;signal-to-noise ratio;Relays;Modulation;Error probability;Uplink;Europe;Physical layer;Hierarchical modulation (HM);physical layer network coding (PLNC);denoise and forward (DNF);two-way relay network (TWRN);half-duplex (HD)},\n  doi = {10.23919/EUSIPCO.2017.8081181},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347305.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose and analyze the performance of a two-way relay network (TWRN) using physical layer network coding (PLNC) with hierarchical modulation (HM). The performance is evaluated for additive white Gaussian noise (AWGN) channels. The TWRN consists of two end nodes and a relay, where each node has a single antenna and operates in a half-duplex mode. Using an analytical approach, we derive the end-to-end symbol error rate (SER) performance expression of the 4/16-QAM HM for the high priority (HP) and the low priority (LP) streams, respectively, as a function of signal-to-noise ratio (SNR). It has been found the analytical and simulation results are in close agreement. The utilization of the proposed HM-PLNC system minimizes the computational complexity of the denoise and forward (DNF) operations at the relay by reducing the number of Euclidean distance computations required to 18 compared to the 49 required in an equivalent 16-QAM based PLNC system.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Topology-aware space-time network coding.\n \n \n \n \n\n\n \n Torrea-Duran, R.; Morales-Céspedes, M.; Plata-Chaves, J.; Vandendorpe, L.; and Moonen, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 126-130, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Topology-awarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081182,\n  author = {R. Torrea-Duran and M. Morales-Céspedes and J. Plata-Chaves and L. Vandendorpe and M. Moonen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Topology-aware space-time network coding},\n  year = {2017},\n  pages = {126-130},\n  abstract = {Space-Time Network Coding (STNC) is a time division multiple access (TDMA)-based scheme that combines network coding and space-time coding by allowing relays to combine the information received from different sources during the transmission phase and to forward the combined signal to a destination in the relaying phase. However, STNC schemes require all the relays to overhear the signal transmitted from all the sources in the network and also a large number of time-slots to achieve full diversity in a multipoint-to-multipoint transmission, which is particularly challenging for large cellular networks. In this paper, we exploit a basic knowledge of the network topology, i.e. the knowledge of the base stations overheard by other base stations and users, to reduce drastically the number of time-slots. Our results show that our scheme is able to increase the spectral efficiency with a marginal decrease of the spatial diversity compared to traditional STNC.},\n  keywords = {cellular radio;network coding;relay networks (telecommunication);space-time codes;telecommunication network topology;time division multiple access;topology-aware space-time network coding;time division multiple access;transmission phase;relaying phase;STNC schemes;time-slots;multipoint-to-multipoint transmission;cellular networks;network topology;base stations;TDMA-based scheme;Relays;Interference;Network coding;Network topology;Signal processing;Knowledge engineering;Bit error rate;Space-time network coding;network topology;cooperative communications;relaying},\n  doi = {10.23919/EUSIPCO.2017.8081182},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346941.pdf},\n}\n\n
\n
\n\n\n
\n Space-Time Network Coding (STNC) is a time division multiple access (TDMA)-based scheme that combines network coding and space-time coding by allowing relays to combine the information received from different sources during the transmission phase and to forward the combined signal to a destination in the relaying phase. However, STNC schemes require all the relays to overhear the signal transmitted from all the sources in the network and also a large number of time-slots to achieve full diversity in a multipoint-to-multipoint transmission, which is particularly challenging for large cellular networks. In this paper, we exploit a basic knowledge of the network topology, i.e. the knowledge of the base stations overheard by other base stations and users, to reduce drastically the number of time-slots. Our results show that our scheme is able to increase the spectral efficiency with a marginal decrease of the spatial diversity compared to traditional STNC.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Path planning and localization for mobile anchor based wireless sensor networks.\n \n \n \n \n\n\n \n Erdemir, E.; and Tuncer, T. E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 131-135, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PathPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081183,\n  author = {E. Erdemir and T. E. Tuncer},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Path planning and localization for mobile anchor based wireless sensor networks},\n  year = {2017},\n  pages = {131-135},\n  abstract = {In wireless sensor networks, anchor positions play an important role for accurate localization. For mobile anchor (MA) based scenarios, both the efficiency of the path planning algorithm and the accuracy of the localization mechanism are critical for the best performance. In this work, an adaptive path planning algorithm is proposed based on Gauss-Markov mobility model, while the sensors are localized using alternating minimization approach. Path planning, which combines the velocity adjustment, the perpendicular bisector and the virtual repulsive strategies, is improved by developing virtual attractive force strategy. The surveillance area is divided into grids and a virtual attractive force is applied to the MA in sparsely and densely populated areas. For localization, the non-convex optimization problem is converted into a bi-convex form and solved by alternating minimization algorithm leading to a shorter MA path. The simulation results show that introducing the virtual attractive strategy increases the path planning accuracy and cover more surveillance region using less energy. Furthermore, compared to the linear localization method, the localization accuracy increases when the alternating minimization is used.},\n  keywords = {Gaussian processes;Markov processes;minimisation;mobile radio;path planning;sensor placement;wireless sensor networks;wireless sensor networks;adaptive path planning algorithm;Gauss-Markov mobility model;alternating minimization approach;virtual repulsive strategies;virtual attractive force strategy;nonconvex optimization problem;minimization algorithm;virtual attractive strategy;linear localization method;localization accuracy increases;WSN localization;mobile anchor position;velocity adjustment;perpendicular bisector;virtual repulsive strategy;biconvex optimization;alternating minimization algorithm;shorter mobileanchor path;Sensors;Force;Path planning;Surveillance;Mobile communication;Signal processing algorithms;Wireless sensor networks;Dynamic path planning;mobility model;mobile-anchor;sensor network localization;alternating minimization},\n  doi = {10.23919/EUSIPCO.2017.8081183},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347372.pdf},\n}\n\n
\n
\n\n\n
\n In wireless sensor networks, anchor positions play an important role for accurate localization. For mobile anchor (MA) based scenarios, both the efficiency of the path planning algorithm and the accuracy of the localization mechanism are critical for the best performance. In this work, an adaptive path planning algorithm is proposed based on Gauss-Markov mobility model, while the sensors are localized using alternating minimization approach. Path planning, which combines the velocity adjustment, the perpendicular bisector and the virtual repulsive strategies, is improved by developing virtual attractive force strategy. The surveillance area is divided into grids and a virtual attractive force is applied to the MA in sparsely and densely populated areas. For localization, the non-convex optimization problem is converted into a bi-convex form and solved by alternating minimization algorithm leading to a shorter MA path. The simulation results show that introducing the virtual attractive strategy increases the path planning accuracy and cover more surveillance region using less energy. Furthermore, compared to the linear localization method, the localization accuracy increases when the alternating minimization is used.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online convex optimization for dynamic network resource allocation.\n \n \n \n \n\n\n \n Chen, T.; Ling, Q.; and Giannakis, G. B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 136-140, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081184,\n  author = {T. Chen and Q. Ling and G. B. Giannakis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Online convex optimization for dynamic network resource allocation},\n  year = {2017},\n  pages = {136-140},\n  abstract = {The present paper deals with online convex optimization involving adversarial loss functions and adversarial constraints, where the constraints are revealed after making decisions, and can be tolerable to instantaneous violations but must be satisfied in the long term. Performance of an online algorithm in this setting is assessed by: i) the difference of its losses relative to the best dynamic solution with one-slot-ahead information of the loss function and the constraint (that is here termed dynamic regret); and, ii) the accumulated amount of constraint violations (that is here termed dynamic fit). In this context, a modified online saddle-point (MOSP) scheme is developed, and proved to simultaneously yield sub-linear dynamic regret and fit, provided that the accumulated variations of per-slot minimizers and constraints are sub-linearly growing with time. MOSP is applied to the dynamic network resource allocation task, and shown to outperform the well-known stochastic dual gradient method.},\n  keywords = {computer networks;convex programming;gradient methods;minimisation;resource allocation;stochastic processes;online convex optimization;adversarial loss functions;instantaneous violations;online algorithm;dynamic solution;one-slot-ahead information;loss function;constraint violations;saddle-point scheme;sub-linear dynamic regret;dynamic network resource allocation task;decision making;modified online saddle-point;MOSP;per-slot minimizers;stochastic dual gradient method;Heuristic algorithms;Resource management;Dynamic scheduling;Benchmark testing;Measurement;Signal processing algorithms;Optimization;Online convex optimization;online learning;non-stationary optimization;network resource allocation},\n  doi = {10.23919/EUSIPCO.2017.8081184},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341737.pdf},\n}\n\n
\n
\n\n\n
\n The present paper deals with online convex optimization involving adversarial loss functions and adversarial constraints, where the constraints are revealed after making decisions, and can be tolerable to instantaneous violations but must be satisfied in the long term. Performance of an online algorithm in this setting is assessed by: i) the difference of its losses relative to the best dynamic solution with one-slot-ahead information of the loss function and the constraint (that is here termed dynamic regret); and, ii) the accumulated amount of constraint violations (that is here termed dynamic fit). In this context, a modified online saddle-point (MOSP) scheme is developed, and proved to simultaneously yield sub-linear dynamic regret and fit, provided that the accumulated variations of per-slot minimizers and constraints are sub-linearly growing with time. MOSP is applied to the dynamic network resource allocation task, and shown to outperform the well-known stochastic dual gradient method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exact diffusion strategy for optimization by networked agents.\n \n \n \n \n\n\n \n Yuan, K.; Ying, B.; Zhao, X.; and Sayed, A. H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 141-145, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ExactPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081185,\n  author = {K. Yuan and B. Ying and X. Zhao and A. H. Sayed},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Exact diffusion strategy for optimization by networked agents},\n  year = {2017},\n  pages = {141-145},\n  abstract = {This work develops a distributed optimization algorithm with guaranteed exact convergence for a broad class of left-stochastic combination policies. The resulting exact diffusion strategy is shown to have a wider stability range and superior convergence performance than the EXTRA consensus strategy. The exact diffusion solution is also applicable to non-symmetric left-stochastic combination matrices, while most earlier developments on exact consensus implementations are limited to doubly-stochastic matrices or right-stochastic matrices; these latter policies impose stringent constraints on the network topology. Stability and convergence results are noted, along with numerical simulations to illustrate the conclusions.},\n  keywords = {convergence;convergence of numerical methods;distributed algorithms;matrix algebra;optimisation;stochastic processes;networked agents;distributed optimization algorithm;left-stochastic combination policies;EXTRA consensus strategy;exact diffusion solution;left-stochastic combination matrices;doubly-stochastic matrices;right-stochastic matrices;network topology;convergence results;exact diffusion strategy;Convergence;Symmetric matrices;Optimization;Signal processing algorithms;Aggregates;Europe;Signal processing;distributed optimization;diffusion;con-sensus;exact convergence;stochastic matrix;balanced policy},\n  doi = {10.23919/EUSIPCO.2017.8081185},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343671.pdf},\n}\n\n
\n
\n\n\n
\n This work develops a distributed optimization algorithm with guaranteed exact convergence for a broad class of left-stochastic combination policies. The resulting exact diffusion strategy is shown to have a wider stability range and superior convergence performance than the EXTRA consensus strategy. The exact diffusion solution is also applicable to non-symmetric left-stochastic combination matrices, while most earlier developments on exact consensus implementations are limited to doubly-stochastic matrices or right-stochastic matrices; these latter policies impose stringent constraints on the network topology. Stability and convergence results are noted, along with numerical simulations to illustrate the conclusions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Network topology inference via elastic net structural equation models.\n \n \n \n \n\n\n \n Traganitis, P. A.; Shen, Y.; and Giannakis, G. B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 146-150, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"NetworkPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081186,\n  author = {P. A. Traganitis and Y. Shen and G. B. Giannakis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Network topology inference via elastic net structural equation models},\n  year = {2017},\n  pages = {146-150},\n  abstract = {Linear structural equation models (SEMs) have been very successful in identifying the topology of complex graphs, such as those representing social and brain networks. In many cases however, the presence of highly correlated nodes hinders performance of the available SEM estimators that rely on the least-absolute shrinkage and selection operator (LASSO). To this end, an elastic net based SEM is put forth, to infer causal relations between nodes belonging to networks, in the presence of highly correlated data. An efficient algorithm based on the alternating direction method of multipliers (ADMM) is developed, and preliminary tests on synthetic as well as real data demonstrate the effectiveness of the proposed approach.},\n  keywords = {mathematical operators;network theory (graphs);statistical analysis;linear structural equation models;complex graphs;network topology inference;elastic net structural equation models;SEM estimators;least-absolute shrinkage and selection operator;LASSO;alternating direction method of multipliers;ADMM;elastic net based SEM;Network topology;Topology;Mathematical model;Numerical analysis;Signal processing;Brain modeling;Europe;Networks;Topology inference;Structural Equation Models;Elastic Net},\n  doi = {10.23919/EUSIPCO.2017.8081186},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347324.pdf},\n}\n\n
\n
\n\n\n
\n Linear structural equation models (SEMs) have been very successful in identifying the topology of complex graphs, such as those representing social and brain networks. In many cases however, the presence of highly correlated nodes hinders performance of the available SEM estimators that rely on the least-absolute shrinkage and selection operator (LASSO). To this end, an elastic net based SEM is put forth, to infer causal relations between nodes belonging to networks, in the presence of highly correlated data. An efficient algorithm based on the alternating direction method of multipliers (ADMM) is developed, and preliminary tests on synthetic as well as real data demonstrate the effectiveness of the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A connectedness constraint for learning sparse graphs.\n \n \n \n \n\n\n \n Sundin, M.; Venkitaraman, A.; Jansson, M.; and Chatterjee, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 151-155, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081187,\n  author = {M. Sundin and A. Venkitaraman and M. Jansson and S. Chatterjee},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A connectedness constraint for learning sparse graphs},\n  year = {2017},\n  pages = {151-155},\n  abstract = {Graphs are naturally sparse objects that are used to study many problems involving networks, for example, distributed learning and graph signal processing. In some cases, the graph is not given, but must be learned from the problem and available data. Often it is desirable to learn sparse graphs. However, making a graph highly sparse can split the graph into several disconnected components, leading to several separate networks. The main difficulty is that connectedness is often treated as a combinatorial property, making it hard to enforce in e.g. convex optimization problems. In this article, we show how connectedness of undirected graphs can be formulated as an analytical property and can be enforced as a convex constraint. We especially show how the constraint relates to the distributed consensus problem and graph Laplacian learning. Using simulated and real data, we perform experiments to learn sparse and connected graphs from data.},\n  keywords = {convex programming;graph theory;learning (artificial intelligence);connectedness constraint;naturally sparse objects;distributed learning;convex optimization problems;undirected graphs;convex constraint;distributed consensus problem;graph Laplacian learning;sparse graph learning;graph signal processing;Laplace equations;Signal processing;Symmetric matrices;Sparse matrices;Optimization;Europe;Eigenvalues and eigenfunctions},\n  doi = {10.23919/EUSIPCO.2017.8081187},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345277.pdf},\n}\n\n
\n
\n\n\n
\n Graphs are naturally sparse objects that are used to study many problems involving networks, for example, distributed learning and graph signal processing. In some cases, the graph is not given, but must be learned from the problem and available data. Often it is desirable to learn sparse graphs. However, making a graph highly sparse can split the graph into several disconnected components, leading to several separate networks. The main difficulty is that connectedness is often treated as a combinatorial property, making it hard to enforce in e.g. convex optimization problems. In this article, we show how connectedness of undirected graphs can be formulated as an analytical property and can be enforced as a convex constraint. We especially show how the constraint relates to the distributed consensus problem and graph Laplacian learning. Using simulated and real data, we perform experiments to learn sparse and connected graphs from data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A global optimization approach for rational sparsity promoting criteria.\n \n \n \n \n\n\n \n Castella, M.; and Pesquet, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 156-160, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081188,\n  author = {M. Castella and J. Pesquet},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A global optimization approach for rational sparsity promoting criteria},\n  year = {2017},\n  pages = {156-160},\n  abstract = {We consider the problem of recovering an unknown signal observed through a nonlinear model and corrupted with additive noise. More precisely, the nonlinear degradation consists of a convolution followed by a nonlinear rational transform. As a prior information, the original signal is assumed to be sparse. We tackle the problem by minimizing a least-squares fit criterion penalized by a Geman-McClure like potential. In order to find a globally optimal solution to this rational minimization problem, we transform it in a generalized moment problem, for which a hierarchy of semidefinite programming relaxations can be used. To overcome computational limitations on the number of involved variables, the structure of the problem is carefully addressed, yielding a sparse relaxation able to deal with up to several hundreds of optimized variables. Our experiments show the good performance of the proposed approach.},\n  keywords = {convex programming;least squares approximations;minimisation;signal processing;rational minimization problem;generalized moment problem;semidefinite programming relaxations;sparse relaxation;optimized variables;global optimization approach;rational sparsity;nonlinear model;additive noise;nonlinear degradation;globally optimal solution;least-squares fit criterion minimization;nonlinear rational transform;unknown signal recovery;Optimization;Convolution;Mathematical model;Minimization;Europe;Transforms},\n  doi = {10.23919/EUSIPCO.2017.8081188},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347120.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of recovering an unknown signal observed through a nonlinear model and corrupted with additive noise. More precisely, the nonlinear degradation consists of a convolution followed by a nonlinear rational transform. As a prior information, the original signal is assumed to be sparse. We tackle the problem by minimizing a least-squares fit criterion penalized by a Geman-McClure like potential. In order to find a globally optimal solution to this rational minimization problem, we transform it in a generalized moment problem, for which a hierarchy of semidefinite programming relaxations can be used. To overcome computational limitations on the number of involved variables, the structure of the problem is carefully addressed, yielding a sparse relaxation able to deal with up to several hundreds of optimized variables. Our experiments show the good performance of the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust distributed multi-speaker voice activity detection using stability selection for sparse non-negative feature extraction.\n \n \n \n \n\n\n \n Hamaidi, L. K.; Muma, M.; and Zoubir, A. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 161-165, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081189,\n  author = {L. K. Hamaidi and M. Muma and A. M. Zoubir},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust distributed multi-speaker voice activity detection using stability selection for sparse non-negative feature extraction},\n  year = {2017},\n  pages = {161-165},\n  abstract = {In this paper, we propose a robust multi-speaker voice activity detection approach for wireless acoustic sensor networks (WASN). Each node of the WASN receives a mixture of sound sources. We propose a non-negative feature extraction using stability selection that exploits the sparsity of the speech energy signals. The strongest right singular vectors serve as source-specific features for the subsequent voice activity detection (VAD). To separate active speech frames from silent frames, we propose a robust Mahalanobis classifier that is based on an M-estimator of the covariance matrix. The proposed approach can also be applied to a distributed setting, where no fusion center is available. Highly accurate VAD results are obtained in a challenging WASN of 20 nodes observing 6 sources in a reverberant environment.},\n  keywords = {acoustic communication (telecommunication);acoustic signal processing;covariance matrices;estimation theory;feature extraction;signal classification;voice activity detection;wireless sensor networks;sound sources;stability selection;strongest right singular vectors;source-specific features;active speech frames;robust Mahalanobis classifier;wireless acoustic sensor networks;WASN;robust distributed multispeaker voice activity detection approach;sparse nonnegative feature extraction;speech energy signal sparsity;VAD;M-estimator;covariance matrix;Robustness;Feature extraction;Speech;Microphones;Stability analysis},\n  doi = {10.23919/EUSIPCO.2017.8081189},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346815.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a robust multi-speaker voice activity detection approach for wireless acoustic sensor networks (WASN). Each node of the WASN receives a mixture of sound sources. We propose a non-negative feature extraction using stability selection that exploits the sparsity of the speech energy signals. The strongest right singular vectors serve as source-specific features for the subsequent voice activity detection (VAD). To separate active speech frames from silent frames, we propose a robust Mahalanobis classifier that is based on an M-estimator of the covariance matrix. The proposed approach can also be applied to a distributed setting, where no fusion center is available. Highly accurate VAD results are obtained in a challenging WASN of 20 nodes observing 6 sources in a reverberant environment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust distributed sequential hypothesis testing for detecting a random signal in non-Gaussian noise.\n \n \n \n \n\n\n \n Leonard, M. R.; and Zoubir, A. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 166-170, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081190,\n  author = {M. R. Leonard and A. M. Zoubir},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust distributed sequential hypothesis testing for detecting a random signal in non-Gaussian noise},\n  year = {2017},\n  pages = {166-170},\n  abstract = {This paper addresses the problem of sequential binary hypothesis testing in a multi-agent network to detect a random signal in non-Gaussian noise. To this end, the con-sensus+innovations sequential probability ratio test (ciSPRT) is generalized for arbitrary binary hypothesis tests and a robust version is developed. Simulations are performed to validate the performance of the proposed algorithms in terms of the average run length (ARL) and the error probabilities.},\n  keywords = {error statistics;Gaussian noise;multi-agent systems;probability;signal detection;signal detection;robust sequential hypothesis testing;consensus-innovations sequential probability ratio test;sequential binary hypothesis testing;robust version;arbitrary binary hypothesis tests;nonGaussian noise;random signal;multiagent network;Robustness;Signal processing;Europe;Uncertainty;Noise measurement;Histograms},\n  doi = {10.23919/EUSIPCO.2017.8081190},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346551.pdf},\n}\n\n
\n
\n\n\n
\n This paper addresses the problem of sequential binary hypothesis testing in a multi-agent network to detect a random signal in non-Gaussian noise. To this end, the con-sensus+innovations sequential probability ratio test (ciSPRT) is generalized for arbitrary binary hypothesis tests and a robust version is developed. Simulations are performed to validate the performance of the proposed algorithms in terms of the average run length (ARL) and the error probabilities.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Recycling Gibbs sampling.\n \n \n \n \n\n\n \n Martino, L.; Elvira, V.; and Camps-Valls, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 171-175, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RecyclingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081191,\n  author = {L. Martino and V. Elvira and G. Camps-Valls},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Recycling Gibbs sampling},\n  year = {2017},\n  pages = {171-175},\n  abstract = {Gibbs sampling is a well-known Markov chain Monte Carlo (MCMC) algorithm, extensively used in signal processing, machine learning and statistics. The key point for the successful application of the Gibbs sampler is the ability to draw samples from the full-conditional probability density functions efficiently. In the general case this is not possible, so in order to speed up the convergence of the chain, it is required to generate auxiliary samples. However, such intermediate information is finally disregarded. In this work, we show that these auxiliary samples can be recycled within the Gibbs estimators, improving their efficiency with no extra cost. Theoretical and exhaustive numerical comparisons show the validity of the approach.},\n  keywords = {Bayes methods;Markov processes;Monte Carlo methods;Gibbs sampling;Markov chain Monte Carlo algorithm;signal processing;machine learning;statistics;Gibbs sampler;full-conditional probability density functions;auxiliary samples;Gibbs estimators;Signal processing algorithms;Monte Carlo methods;Standards;Signal processing;Recycling;Markov processes;Machine learning algorithms;Bayesian inference;Markov Chain Monte Carlo (MCMC);Gibbs sampling;Gaussian Processes (GP)},\n  doi = {10.23919/EUSIPCO.2017.8081191},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347055.pdf},\n}\n\n
\n
\n\n\n
\n Gibbs sampling is a well-known Markov chain Monte Carlo (MCMC) algorithm, extensively used in signal processing, machine learning and statistics. The key point for the successful application of the Gibbs sampler is the ability to draw samples from the full-conditional probability density functions efficiently. In the general case this is not possible, so in order to speed up the convergence of the chain, it is required to generate auxiliary samples. However, such intermediate information is finally disregarded. In this work, we show that these auxiliary samples can be recycled within the Gibbs estimators, improving their efficiency with no extra cost. Theoretical and exhaustive numerical comparisons show the validity of the approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n H∞ estimator for gearbox diagnosis in variable speed conditions.\n \n \n \n\n\n \n Assoumane, A.; Ravier, P.; Capdessus, C.; and Sekko, E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 176-180, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081192,\n  author = {A. Assoumane and P. Ravier and C. Capdessus and E. Sekko},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {H∞ estimator for gearbox diagnosis in variable speed conditions},\n  year = {2017},\n  pages = {176-180},\n  abstract = {It is well known that a faulty gearbox vibration signal exhibits an amplitude modulation (AM) as well as a phase modulation (PM). These modulation carry out a lot of useful information about health condition. This paper presents two approaches for modeling amplitude and phase modulation in gearbox vibration signal. These last are used to describe the vibration signal by a state space model. Then, the H∞ estimator is designed to estimate the modulation appearing in the vibration signal. This estimator is obtained by minimizing the worst possible amplification effects of disturbances (measurement and modeling noises) on the estimation error. Such an estimator does not require any assumption on the statistic properties of the noises. Since additive noises in gearbox vibration signal are non Gaussian and non white, this estimator is more suitable in practical gearbox diagnosis. To evaluate the performance of the two approaches, we use a synthetic and an experimental gearbox vibration signal.},\n  keywords = {amplitude modulation;condition monitoring;fault diagnosis;Gaussian noise;gears;vibrational signal processing;vibrations;variable speed conditions;faulty gearbox vibration signal;amplitude modulation;phase modulation;health condition;state space model;estimation error;practical gearbox diagnosis;experimental gearbox vibration signal;H∞ estimator;statistic properties;additive noises;Vibrations;Modulation;Gears;Signal to noise ratio;Estimation error;Time-frequency analysis;Gearbox diagnosis;variable speed conditions;Htc estimator;Legendre polynomials},\n  doi = {10.23919/EUSIPCO.2017.8081192},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n It is well known that a faulty gearbox vibration signal exhibits an amplitude modulation (AM) as well as a phase modulation (PM). These modulation carry out a lot of useful information about health condition. This paper presents two approaches for modeling amplitude and phase modulation in gearbox vibration signal. These last are used to describe the vibration signal by a state space model. Then, the H∞ estimator is designed to estimate the modulation appearing in the vibration signal. This estimator is obtained by minimizing the worst possible amplification effects of disturbances (measurement and modeling noises) on the estimation error. Such an estimator does not require any assumption on the statistic properties of the noises. Since additive noises in gearbox vibration signal are non Gaussian and non white, this estimator is more suitable in practical gearbox diagnosis. To evaluate the performance of the two approaches, we use a synthetic and an experimental gearbox vibration signal.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the number of iterations for the matching pursuit algorithm.\n \n \n \n \n\n\n \n Li, F.; Triggs, C. M.; Dumitrescu, B.; and Giurcăneanu, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 181-185, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081193,\n  author = {F. Li and C. M. Triggs and B. Dumitrescu and C. Giurcăneanu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {On the number of iterations for the matching pursuit algorithm},\n  year = {2017},\n  pages = {181-185},\n  abstract = {We address the problem of selecting, from a given dictionary, a subset of predictors whose linear combination provides the best description for the vector of measurements. To this end, we apply the well-known matching pursuit algorithm (MPA). Even if there are theoretical results on the performance of MPA, there is no widely accepted rule for stopping the algorithm. In this work, we focus on stopping rules based on information theoretic criteria (ITC). The key point is to evaluate the degrees of freedom (df) for the model produced at each iteration. This is traditionally done by computing the trace of the hat matrix which maps the data vector to its estimate. We prove some theoretical results concerning the hat matrix. One of them provides an upper bound on the increase of df from the m-th to the (m + 1)-th iteration. Based on the properties of the hat matrix, we propose novel ITC for selecting the number of iterations. All of them are obtained by modifying criteria designed for variable selection in the classical linear model. For assessing the performance of the novel criteria, we conduct a simulation study.},\n  keywords = {information theory;iterative methods;matrix algebra;MPA;stopping rules;information theoretic criteria;ITC;iteration;hat matrix;data vector;variable selection;classical linear model;matching pursuit algorithm;linear combination;Matching pursuit algorithms;Signal processing algorithms;Prediction algorithms;Signal processing;Computational modeling;Europe;Upper bound;Matching pursuit algorithm;hat matrix;projector;information theoretic criteria;number of iterations},\n  doi = {10.23919/EUSIPCO.2017.8081193},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346793.pdf},\n}\n\n
\n
\n\n\n
\n We address the problem of selecting, from a given dictionary, a subset of predictors whose linear combination provides the best description for the vector of measurements. To this end, we apply the well-known matching pursuit algorithm (MPA). Even if there are theoretical results on the performance of MPA, there is no widely accepted rule for stopping the algorithm. In this work, we focus on stopping rules based on information theoretic criteria (ITC). The key point is to evaluate the degrees of freedom (df) for the model produced at each iteration. This is traditionally done by computing the trace of the hat matrix which maps the data vector to its estimate. We prove some theoretical results concerning the hat matrix. One of them provides an upper bound on the increase of df from the m-th to the (m + 1)-th iteration. Based on the properties of the hat matrix, we propose novel ITC for selecting the number of iterations. All of them are obtained by modifying criteria designed for variable selection in the classical linear model. For assessing the performance of the novel criteria, we conduct a simulation study.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance bounds for cooperative RSS emitter tracking using diffusion particle filters.\n \n \n \n \n\n\n \n Dias, S. S.; and Bruno, M. G. S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 186-190, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081194,\n  author = {S. S. Dias and M. G. S. Bruno},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Performance bounds for cooperative RSS emitter tracking using diffusion particle filters},\n  year = {2017},\n  pages = {186-190},\n  abstract = {This paper introduces a methodology for numerical computation of the Posterior Cramér-Rao Lower Bound (PCRLB) for the position estimate mean-square error when a moving emitter is tracked by a network of received-signal-strength (RSS) sensors using a distributed, random exchange diffusion filter. The square root of the PCRLB is compared to the empirical root-mean-square error curve for a particle filter implementation of the diffusion filter, referred to as RndEx-PF, and to the square root of the PCRLB for the optimal centralized filter that assimilates all network measurements at each time instant. In addition, we also compare the proposed RndEx-PF algorithm to three alternative distributed trackers based on Kullback-Leibler fusion using both iterative consensus and non-iterative diffusion strategies.},\n  keywords = {mean square error methods;particle filtering (numerical methods);RSSI;PCRLB;empirical root-mean-square error curve;optimal centralized filter;RndEx-PF algorithm;iterative consensus;diffusion particle filters;Posterior Cramér-Rao Lower Bound;received-signal-strength sensors;distributed exchange diffusion filter;random exchange diffusion filter;cooperative RSS emitter tracking;position estimate mean-square error;noniterative diffusion strategies;Kullback-Leibler fusion;Sensors;Protocols;Atmospheric measurements;Particle measurements;Europe;Particle filters;Posterior Cramér-Rao Lower Bound;Particle Filters;Diffusion;RSS Sensors;Emitter Tracking},\n  doi = {10.23919/EUSIPCO.2017.8081194},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346588.pdf},\n}\n\n
\n
\n\n\n
\n This paper introduces a methodology for numerical computation of the Posterior Cramér-Rao Lower Bound (PCRLB) for the position estimate mean-square error when a moving emitter is tracked by a network of received-signal-strength (RSS) sensors using a distributed, random exchange diffusion filter. The square root of the PCRLB is compared to the empirical root-mean-square error curve for a particle filter implementation of the diffusion filter, referred to as RndEx-PF, and to the square root of the PCRLB for the optimal centralized filter that assimilates all network measurements at each time instant. In addition, we also compare the proposed RndEx-PF algorithm to three alternative distributed trackers based on Kullback-Leibler fusion using both iterative consensus and non-iterative diffusion strategies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deconvolution-segmentation for textured images.\n \n \n \n \n\n\n \n Giovannelli, J.; and Vacar, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 191-195, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Deconvolution-segmentationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081195,\n  author = {J. Giovannelli and C. Vacar},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Deconvolution-segmentation for textured images},\n  year = {2017},\n  pages = {191-195},\n  abstract = {The paper tackles the problem of joint deconvolution and segmentation specifically for textured images. The images are composed of patches of textures that belong to a set of K possible classes. Each class of image is described by a Gaussian random field and the classes are modelled by a Potts field. The method relies on a hierarchical model and a Bayesian strategy to jointly estimate the labels, the textured images as well as the hyperparameters. An important point is that the parameter of the Potts field is also estimated. The estimators are designed in an optimal manner (marginal posterior maximizer for the labels and posterior mean for the other unknowns). They are computed based on a convergent procedure, from samples of the posterior obtained through an MCMC algorithm (Gibbs sampler including Perturbation-Optimization). A first numerical evaluation provides encouraging results despite the strong difficulty of the problem.},\n  keywords = {Bayes methods;deconvolution;estimation theory;image segmentation;image texture;Markov processes;Monte Carlo methods;optimisation;Gaussian random field;Potts field;textured images;image deconvolution;image segmentation;joint deconvolution-segmentation;marginal posterior maximizer;labels maximizers;MCMC algorithm;Gibbs sampling;K-possible class;Perturbation Optimization;Image segmentation;Bayes methods;Europe;Signal processing;Signal processing algorithms;Markov processes;Optimization;Deconvolution;segmentation;texture;Bayes;Potts;sampling;optimization},\n  doi = {10.23919/EUSIPCO.2017.8081195},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343213.pdf},\n}\n\n
\n
\n\n\n
\n The paper tackles the problem of joint deconvolution and segmentation specifically for textured images. The images are composed of patches of textures that belong to a set of K possible classes. Each class of image is described by a Gaussian random field and the classes are modelled by a Potts field. The method relies on a hierarchical model and a Bayesian strategy to jointly estimate the labels, the textured images as well as the hyperparameters. An important point is that the parameter of the Potts field is also estimated. The estimators are designed in an optimal manner (marginal posterior maximizer for the labels and posterior mean for the other unknowns). They are computed based on a convergent procedure, from samples of the posterior obtained through an MCMC algorithm (Gibbs sampler including Perturbation-Optimization). A first numerical evaluation provides encouraging results despite the strong difficulty of the problem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wheels coating process monitoring in the presence of nuisance parameters using sequential change-point detection method.\n \n \n \n \n\n\n \n Tout, K.; Retraint, F.; and Cogranne, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 196-200, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"WheelsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081196,\n  author = {K. Tout and F. Retraint and R. Cogranne},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Wheels coating process monitoring in the presence of nuisance parameters using sequential change-point detection method},\n  year = {2017},\n  pages = {196-200},\n  abstract = {The paper addresses the problem of monitoring a non-stationary online process to detect an abrupt failure. The process studied in this paper is the one of wheels coating, but the proposed method can be extended to a broad range of processes. Using a camera, a picture of every wheel is captured for traceability. This image is used, in our problem, to measure the coating intensity via pixels mean value. In our operational context, it is wished to control the false alarm probability over a long period (typically a day) as well as to keep the detection delay under a given number of observations, which corresponds to a small number of wheels with defective coating. The problem of abrupt coating problem detection is addressed using a sequential method that takes into account those two requirements while it is also able to adapt to the non-stationnarity of the process. Numerical results on a large set of wheels images show the efficiency of the proposed approach.},\n  keywords = {cameras;coatings;condition monitoring;failure (mechanical);object detection;probability;process monitoring;production engineering computing;statistical analysis;wheels;wheels coating process monitoring;nuisance parameters;sequential change-point detection method;nonstationary online process;abrupt failure;wheel;coating intensity;false alarm probability;detection delay;defective coating;abrupt coating problem detection;wheels images;Wheels;Paints;Monitoring;Surface treatment;Delays;Parametric statistics;Industrial monitoring systems;Hypothesis testing theory;Sequential detection;Parametric model},\n  doi = {10.23919/EUSIPCO.2017.8081196},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347824.pdf},\n}\n\n
\n
\n\n\n
\n The paper addresses the problem of monitoring a non-stationary online process to detect an abrupt failure. The process studied in this paper is the one of wheels coating, but the proposed method can be extended to a broad range of processes. Using a camera, a picture of every wheel is captured for traceability. This image is used, in our problem, to measure the coating intensity via pixels mean value. In our operational context, it is wished to control the false alarm probability over a long period (typically a day) as well as to keep the detection delay under a given number of observations, which corresponds to a small number of wheels with defective coating. The problem of abrupt coating problem detection is addressed using a sequential method that takes into account those two requirements while it is also able to adapt to the non-stationnarity of the process. Numerical results on a large set of wheels images show the efficiency of the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Group metropolis sampling.\n \n \n \n \n\n\n \n Martino, L.; Elvira, V.; and Camps-Valls, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 201-205, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"GroupPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081197,\n  author = {L. Martino and V. Elvira and G. Camps-Valls},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Group metropolis sampling},\n  year = {2017},\n  pages = {201-205},\n  abstract = {Monte Carlo (MC) methods are widely used for Bayesian inference and optimization in statistics, signal processing and machine learning. Two well-known class of MC methods are the Importance Sampling (IS) techniques and the Markov Chain Monte Carlo (MCMC) algorithms. In this work, we introduce the Group Importance Sampling (GIS) framework where different sets of weighted samples are properly summarized with one summary particle and one summary weight. GIS facilitates the design of novel efficient MC techniques. For instance, we present the Group Metropolis Sampling (GMS) algorithm which produces a Markov chain of sets of weighted samples. GMS in general outperforms other multiple try schemes as shown by means of numerical simulations.},\n  keywords = {Bayes methods;importance sampling;learning (artificial intelligence);Markov processes;medical signal processing;Monte Carlo methods;optimization;signal processing;machine learning;weighted samples;summary particle;summary weight;Markov chain Monte Carlo algorithms;group metropolis sampling algorithm;Bayesian inference;numerical simulations;Monte Carlo methods;Signal processing algorithms;Markov processes;Algorithm design and analysis;Signal processing;Probability density function;Europe;Bayesian inference;Importance Sampling;Markov Chain Monte Carlo (MCMC);Gaussian Processes (GP)},\n  doi = {10.23919/EUSIPCO.2017.8081197},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347879.pdf},\n}\n\n
\n
\n\n\n
\n Monte Carlo (MC) methods are widely used for Bayesian inference and optimization in statistics, signal processing and machine learning. Two well-known class of MC methods are the Importance Sampling (IS) techniques and the Markov Chain Monte Carlo (MCMC) algorithms. In this work, we introduce the Group Importance Sampling (GIS) framework where different sets of weighted samples are properly summarized with one summary particle and one summary weight. GIS facilitates the design of novel efficient MC techniques. For instance, we present the Group Metropolis Sampling (GMS) algorithm which produces a Markov chain of sets of weighted samples. GMS in general outperforms other multiple try schemes as shown by means of numerical simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Global error control procedure for spatially structured targets.\n \n \n \n \n\n\n \n Bacher, R.; Chatelain, F.; and Michel, O.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 206-210, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"GlobalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081198,\n  author = {R. Bacher and F. Chatelain and O. Michel},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Global error control procedure for spatially structured targets},\n  year = {2017},\n  pages = {206-210},\n  abstract = {In this paper, a target detection procedure with global error control is proposed. The novelty of this approach consists in taking into account spatial structures of the target while ensuring proper error control over pixelwise errors. A generic framework is discussed and a method based on this framework is implemented. Results on simulated data show conclusive gains in detection power for a nominal control level. The method is also applied on real data produced by the astronomical instrument MUSE.},\n  keywords = {astronomical instruments;object detection;signal detection;global error control procedure;spatially structured targets;target detection procedure;proper error control;pixelwise errors;generic framework;detection power;nominal control level;spatial target structures;conclusive gains;astronomical instrument MUSE;Error correction;Hyperspectral imaging;Europe;Testing;Robustness;Correlation},\n  doi = {10.23919/EUSIPCO.2017.8081198},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342767.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a target detection procedure with global error control is proposed. The novelty of this approach consists in taking into account spatial structures of the target while ensuring proper error control over pixelwise errors. A generic framework is discussed and a method based on this framework is implemented. Results on simulated data show conclusive gains in detection power for a nominal control level. The method is also applied on real data produced by the astronomical instrument MUSE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Event-based particle filtering with point and set-valued measurements.\n \n \n \n \n\n\n \n Davar, S.; and Mohammadi, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 211-215, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Event-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081199,\n  author = {S. Davar and A. Mohammadi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Event-based particle filtering with point and set-valued measurements},\n  year = {2017},\n  pages = {211-215},\n  abstract = {The paper is motivated by recent and rapid growth of Cyber-Physical Systems (CPS) and the critical necessity for preserving restricted communication resources in their application domains. In this context, a distributed state estimation architecture is considered where a remote sensor communicates its measurements to the fusion centre (FC) in an event-based fashion. We propose a systematic and intuitively pleasing distributed state estimation algorithm which jointly incorporates point and set-valued measurements within the particle filtering framework. Referred to as the event-based particle filter (EBPF), point-valued measurements are incorporated in the estimation recursion via a conventional particle filter formulation, while set-valued measurements are incorporated by developing an observation update step similar in nature to quantized particle filtering approach. More specifically, in the absence of an observation (i.e., having a set-valued measurement), the proposed EBPF evaluates the probability that the unknown observation belongs to the event-triggering set based on its particles which is then used to update the corresponding particle weights. The simulation results show that the proposed EBPF outperforms its counterparts specifically in low communication rates, and confirms the effectiveness of the proposed hybrid estimation algorithm.},\n  keywords = {cyber-physical systems;particle filtering (numerical methods);state estimation;set-valued measurements;cyberphysical systems;event-based particle filtering;event-based particle filter;event-triggering set;quantized particle filtering approach;point-valued measurements;distributed state estimation algorithm;Atmospheric measurements;Particle measurements;Current measurement;State estimation;Weight measurement;Extraterrestrial measurements;Cyber-physical systems;Event triggering;Event-based estimation;Particle filtering;Set-valued measurements;Non-Gaussian state estimation},\n  doi = {10.23919/EUSIPCO.2017.8081199},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346295.pdf},\n}\n\n
\n
\n\n\n
\n The paper is motivated by recent and rapid growth of Cyber-Physical Systems (CPS) and the critical necessity for preserving restricted communication resources in their application domains. In this context, a distributed state estimation architecture is considered where a remote sensor communicates its measurements to the fusion centre (FC) in an event-based fashion. We propose a systematic and intuitively pleasing distributed state estimation algorithm which jointly incorporates point and set-valued measurements within the particle filtering framework. Referred to as the event-based particle filter (EBPF), point-valued measurements are incorporated in the estimation recursion via a conventional particle filter formulation, while set-valued measurements are incorporated by developing an observation update step similar in nature to quantized particle filtering approach. More specifically, in the absence of an observation (i.e., having a set-valued measurement), the proposed EBPF evaluates the probability that the unknown observation belongs to the event-triggering set based on its particles which is then used to update the corresponding particle weights. The simulation results show that the proposed EBPF outperforms its counterparts specifically in low communication rates, and confirms the effectiveness of the proposed hybrid estimation algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non-intrusive intelligibility prediction using a codebook-based approach.\n \n \n \n \n\n\n \n S⊘rensen, C.; Kavalekalam, M. S.; Xenaki, A.; Boldt, J. B.; and Christensen, M. G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 216-220, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Non-intrusivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081200,\n  author = {C. S⊘rensen and M. S. Kavalekalam and A. Xenaki and J. B. Boldt and M. G. Christensen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Non-intrusive intelligibility prediction using a codebook-based approach},\n  year = {2017},\n  pages = {216-220},\n  abstract = {It could be beneficial for users of hearing aids if these were able to automatically adjust the processing according to the speech intelligibility in the specific acoustic environment. Most speech intelligibility metrics are intrusive, i.e., they require a clean reference signal, which is rarely available in real-life applications. This paper proposes a method, which allows using an intrusive short-time objective intelligibility (STOI) metric without requiring access to a clean signal. The clean speech reference signal is replaced by the clean speech envelope spectrum estimated from the noisy signal. The spectral envelope has been shown to be an important cue for speech intelligibility and is used as the reference signal inside STOI. The spectral envelopes are estimated as a combination of predefined dictionaries, i.e., code-books, that best fits the noisy speech signal. The simulations show a high correlation between the proposed non-intrusive codebook-based STOI (NIC-STOI) and the intrusive STOI indicating that NIC-STOI is a suitable metric for automatic classification of speech signals.},\n  keywords = {acoustic signal processing;hearing aids;signal classification;speech intelligibility;speech processing;nonintrusive intelligibility prediction;codebook-based approach;hearing aids;speech intelligibility metrics;clean speech reference signal;clean speech envelope spectrum;intrusive STOI;acoustic environment;intrusive short-time objective intelligibility;nonintrusive codebook-based STOI;speech signals automatic classification;Speech;Noise measurement;Speech coding;Time-frequency analysis;Frequency estimation;Speech processing},\n  doi = {10.23919/EUSIPCO.2017.8081200},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343356.pdf},\n}\n\n
\n
\n\n\n
\n It could be beneficial for users of hearing aids if these were able to automatically adjust the processing according to the speech intelligibility in the specific acoustic environment. Most speech intelligibility metrics are intrusive, i.e., they require a clean reference signal, which is rarely available in real-life applications. This paper proposes a method, which allows using an intrusive short-time objective intelligibility (STOI) metric without requiring access to a clean signal. The clean speech reference signal is replaced by the clean speech envelope spectrum estimated from the noisy signal. The spectral envelope has been shown to be an important cue for speech intelligibility and is used as the reference signal inside STOI. The spectral envelopes are estimated as a combination of predefined dictionaries, i.e., code-books, that best fits the noisy speech signal. The simulations show a high correlation between the proposed non-intrusive codebook-based STOI (NIC-STOI) and the intrusive STOI indicating that NIC-STOI is a suitable metric for automatic classification of speech signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n New insights into the role of the head radius in model-based binaural speaker localization.\n \n \n \n \n\n\n \n Zohourian, M.; Martin, R.; and Madhu, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 221-225, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"NewPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081201,\n  author = {M. Zohourian and R. Martin and N. Madhu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {New insights into the role of the head radius in model-based binaural speaker localization},\n  year = {2017},\n  pages = {221-225},\n  abstract = {In this work we evaluate the effects of the head radius on binaural localization algorithms. We employ a spherical head model and the null-steering beamforming localization method. The model characterizes the binaural cues in the form of HRTFs. One of the main parameters in this model is the head radius. We propose to optimize jointly for both the source location and the head radius. In contrast to the free-field configuration where it is difficult to estimate the source location and microphone distance simultaneously, the binaural algorithm yields a unique solution to the head radius. Moreover, for real recordings we show that the commonly-assumed size of the head achieves a fairly reliable performance. For applications with non-typical size of the head, e.g., hearing-impaired children the adaptation of the head radius using the proposed algorithm would improve the accuracy of the binaural localization algorithm.},\n  keywords = {array signal processing;hearing;microphones;speaker recognition;speech processing;head radius;binaural localization algorithm;binaural speaker localization;spherical head model;null-steering beamforming localization method;Mathematical model;Direction-of-arrival estimation;Microphones;Signal processing algorithms;Magnetic heads;Cost function;Europe;Binaural speaker localization;beamforming;hearing aid;DOA},\n  doi = {10.23919/EUSIPCO.2017.8081201},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347295.pdf},\n}\n\n
\n
\n\n\n
\n In this work we evaluate the effects of the head radius on binaural localization algorithms. We employ a spherical head model and the null-steering beamforming localization method. The model characterizes the binaural cues in the form of HRTFs. One of the main parameters in this model is the head radius. We propose to optimize jointly for both the source location and the head radius. In contrast to the free-field configuration where it is difficult to estimate the source location and microphone distance simultaneously, the binaural algorithm yields a unique solution to the head radius. Moreover, for real recordings we show that the commonly-assumed size of the head achieves a fairly reliable performance. For applications with non-typical size of the head, e.g., hearing-impaired children the adaptation of the head radius using the proposed algorithm would improve the accuracy of the binaural localization algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparsity promoting LMS for adaptive feedback cancellation.\n \n \n \n \n\n\n \n Lee, C.; Rao, B. D.; and Garudadri, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 226-230, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SparsityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081202,\n  author = {C. Lee and B. D. Rao and H. Garudadri},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Sparsity promoting LMS for adaptive feedback cancellation},\n  year = {2017},\n  pages = {226-230},\n  abstract = {In hearing aids (HAs), the acoustic coupling between the microphone and the receiver results in the system becoming unstable under certain conditions and causes artifacts commonly referred to as whistling or howling. The least mean square (LMS) class of algorithms is commonly used to mitigate this by providing adaptive feedback cancellation (AFC). The speech quality after AFC and the amount of added stable gain (ASG) with AFC are used to assess these algorithms. In this paper, we introduce a variant of the LMS that promotes sparsity in estimating the acoustic feedback path. By using the lp norm as a diversity measure, the approach does not enforce, but takes advantage of sparsity when it exists. The performance in terms of speech quality, misalignment, and ASG of the proposed algorithm is compared with other proportionate-type LMS algorithms which also leverage sparsity in the feedback path. We demonstrate faster convergence compared with those algorithms, quality improvement of about 0.25 (on a 0-1 objective scale of the hearing-aid speech quality index (HASQI)), and about 5 dB ASG improvement compared with the normalized LMS (NLMS).},\n  keywords = {acoustic signal processing;adaptive filters;hearing aids;least mean squares methods;microphones;speech processing;ASG improvement;least mean square class;microphone;HASQI;NLMS;acoustic coupling;hearing aids;normalized LMS;hearing-aid speech quality index;leverage sparsity;proportionate-type LMS algorithms;added stable gain;adaptive feedback cancellation;mean square class;Signal processing algorithms;Acoustics;Hearing aids;Convergence;Europe;Signal processing;Microphones;Adaptive feedback cancellation;LMS;proportionate adaptation;sparsity;hearing aids},\n  doi = {10.23919/EUSIPCO.2017.8081202},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347785.pdf},\n}\n\n
\n
\n\n\n
\n In hearing aids (HAs), the acoustic coupling between the microphone and the receiver results in the system becoming unstable under certain conditions and causes artifacts commonly referred to as whistling or howling. The least mean square (LMS) class of algorithms is commonly used to mitigate this by providing adaptive feedback cancellation (AFC). The speech quality after AFC and the amount of added stable gain (ASG) with AFC are used to assess these algorithms. In this paper, we introduce a variant of the LMS that promotes sparsity in estimating the acoustic feedback path. By using the lp norm as a diversity measure, the approach does not enforce, but takes advantage of sparsity when it exists. The performance in terms of speech quality, misalignment, and ASG of the proposed algorithm is compared with other proportionate-type LMS algorithms which also leverage sparsity in the feedback path. We demonstrate faster convergence compared with those algorithms, quality improvement of about 0.25 (on a 0-1 objective scale of the hearing-aid speech quality index (HASQI)), and about 5 dB ASG improvement compared with the normalized LMS (NLMS).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Combining null-steering and adaptive filtering for acoustic feedback cancellation in a multi-microphone earpiece.\n \n \n \n \n\n\n \n Schepker, H.; Tran, L. T. T.; Nordholm, S.; and Doclo, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 231-235, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CombiningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081203,\n  author = {H. Schepker and L. T. T. Tran and S. Nordholm and S. Doclo},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Combining null-steering and adaptive filtering for acoustic feedback cancellation in a multi-microphone earpiece},\n  year = {2017},\n  pages = {231-235},\n  abstract = {Commonly adaptive filters are used to reduce the acoustic feedback in hearing aids. While theoretically allowing for perfect cancellation of the feedback signal, in practice the adaptive filter solution is typically biased due to the closed-loop hearing aid system. In contrast to conventional behind-the-ear hearing aids, in this paper we consider an earpiece with multiple integrated microphones. For such an earpiece it has previously been proposed to use a fixed null-steering beamformer to reduce the acoustic feedback in the microphones. In this paper we propose to combine the fixed null-steering beamformer with an additional adaptive filter to cancel the residual feedback component in the beamformer output. We compare the combination of the fixed null-steering beamformer and different adaptive filtering algorithms including subband adaptive filtering and the prediction-error-method based fullband adaptive filtering with using either of the two approaches alone. Experimental results using measured acoustic feedback show the benefit of using the combined approach compared to using either of the two approaches to cancel the acoustic feedback.},\n  keywords = {acoustic signal processing;adaptive filters;array signal processing;hearing aids;microphone arrays;feedback signal cancellation;fixed null-steering beamformer;prediction-error-method;fullband adaptive filtering;multiple integrated microphones;behind-the-ear hearing aids;closed-loop hearing aid system;multimicrophone earpiece;acoustic feedback cancellation;measured acoustic feedback;subband adaptive filtering;beamformer output;residual feedback component;Acoustics;Microphones;Hearing aids;Loudspeakers;Signal processing algorithms;Acoustic measurements;Adaptive systems},\n  doi = {10.23919/EUSIPCO.2017.8081203},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346967.pdf},\n}\n\n
\n
\n\n\n
\n Commonly adaptive filters are used to reduce the acoustic feedback in hearing aids. While theoretically allowing for perfect cancellation of the feedback signal, in practice the adaptive filter solution is typically biased due to the closed-loop hearing aid system. In contrast to conventional behind-the-ear hearing aids, in this paper we consider an earpiece with multiple integrated microphones. For such an earpiece it has previously been proposed to use a fixed null-steering beamformer to reduce the acoustic feedback in the microphones. In this paper we propose to combine the fixed null-steering beamformer with an additional adaptive filter to cancel the residual feedback component in the beamformer output. We compare the combination of the fixed null-steering beamformer and different adaptive filtering algorithms including subband adaptive filtering and the prediction-error-method based fullband adaptive filtering with using either of the two approaches alone. Experimental results using measured acoustic feedback show the benefit of using the combined approach compared to using either of the two approaches to cancel the acoustic feedback.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A sparsity-aware proportionate normalized maximum correntropy criterion algorithm for sparse system identification in non-Gaussian environment.\n \n \n \n \n\n\n \n Wang, Y.; Li, Y.; and Yang, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 236-240, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081204,\n  author = {Y. Wang and Y. Li and R. Yang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A sparsity-aware proportionate normalized maximum correntropy criterion algorithm for sparse system identification in non-Gaussian environment},\n  year = {2017},\n  pages = {236-240},\n  abstract = {A sparsity-aware proportionate normalized maximum correntropy criterion (PNMCC) algorithm with lp-norm penalty, which is named as lp-norm constraint PNMCC (LP-PNMCC), is proposed and its crucial parameters, convergence speed rate and steady-state performance are discussed via estimating a typical sparse multipath channel and an typical echo channel. The LP-PNMCC algorithm is realized by integrating a lp-norm into the PNMCC's cost function to create an expected zero attraction term in the iterations of the presented LP-PNMCC algorithm, which aims to further exploit the sparsity property of the sparse channels. The presented LP-PNMCC algorithm has been derived and analyzed in detail. Experimental results obtained from sparse channel estimations demonstrate that the proposed LP-PNMCC algorithm is superior to the PNMCC, PNLMS, RZA-MCC, ZA-MCC, NMCC and MCC algorithms according to the convergence speed rate and steady-state mean square deviation.},\n  keywords = {channel estimation;least mean squares methods;multipath channels;PNMCC cost function;LP-PNMCC algorithm;sparse multipath channel;proportionate normalized maximum correntropy criterion;sparsity-aware PNMCC algorithm;echo channel;sparsity property;nonGaussian environment;sparse system identification;convergence speed rate;sparse channel estimations;Signal processing algorithms;Channel estimation;Convergence;Steady-state;Cost function;Mathematical model;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081204},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342386.pdf},\n}\n\n
\n
\n\n\n
\n A sparsity-aware proportionate normalized maximum correntropy criterion (PNMCC) algorithm with lp-norm penalty, which is named as lp-norm constraint PNMCC (LP-PNMCC), is proposed and its crucial parameters, convergence speed rate and steady-state performance are discussed via estimating a typical sparse multipath channel and an typical echo channel. The LP-PNMCC algorithm is realized by integrating a lp-norm into the PNMCC's cost function to create an expected zero attraction term in the iterations of the presented LP-PNMCC algorithm, which aims to further exploit the sparsity property of the sparse channels. The presented LP-PNMCC algorithm has been derived and analyzed in detail. Experimental results obtained from sparse channel estimations demonstrate that the proposed LP-PNMCC algorithm is superior to the PNMCC, PNLMS, RZA-MCC, ZA-MCC, NMCC and MCC algorithms according to the convergence speed rate and steady-state mean square deviation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An unsupervised Bayesian approach for the joint reconstruction and classification of cutaneous reflectance confocal microscopy images.\n \n \n \n \n\n\n \n Halimi, A.; Batatia, H.; Le Digabel, J.; Josse, G.; and Tourneret, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 241-245, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081205,\n  author = {A. Halimi and H. Batatia and J. {Le Digabel} and G. Josse and J. Tourneret},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An unsupervised Bayesian approach for the joint reconstruction and classification of cutaneous reflectance confocal microscopy images},\n  year = {2017},\n  pages = {241-245},\n  abstract = {This paper studies a new Bayesian algorithm for the joint reconstruction and classification of reflectance confocal microscopy (RCM) images, with application to the identification of human skin lentigo. The proposed Bayesian approach takes advantage of the distribution of the multiplicative speckle noise affecting the true reflectivity of these images and of appropriate priors for the unknown model rameters. A Markov chain Monte Carlo (MCMC) algorithm is proposed to jointly estimate the model parameters and the image of true reflectivity while classifying images according to the distribution of their reflectivity. Precisely, a Metropolis-within-Gibbs sampler is investigated to sample the posterior distribution of the Bayesian model associated with RCM images and to build estimators of its parameters, including labels indicating the class of each RCM image. The resulting algorithm is applied to synthetic data and to real images from a clinical study containing healthy and lentigo patients.},\n  keywords = {Bayes methods;biomedical optical imaging;image classification;Markov processes;medical image processing;Monte Carlo methods;skin;speckle;human skin;multiplicative speckle noise;Markov chain Monte Carlo algorithm;Bayesian model;RCM image;unsupervised Bayesian approach;cutaneous reflectance confocal microscopy images;posterior distribution;image classification;image reconstruction;Bayesian algorithm;Bayes methods;Skin;Gaussian distribution;Signal processing algorithms;Proposals;Microscopy;Computational modeling;Reflectance confocal microscopy;Bayesian algorithm;Classification;Metropolis-within-Gibbs sampler},\n  doi = {10.23919/EUSIPCO.2017.8081205},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347087.pdf},\n}\n\n
\n
\n\n\n
\n This paper studies a new Bayesian algorithm for the joint reconstruction and classification of reflectance confocal microscopy (RCM) images, with application to the identification of human skin lentigo. The proposed Bayesian approach takes advantage of the distribution of the multiplicative speckle noise affecting the true reflectivity of these images and of appropriate priors for the unknown model rameters. A Markov chain Monte Carlo (MCMC) algorithm is proposed to jointly estimate the model parameters and the image of true reflectivity while classifying images according to the distribution of their reflectivity. Precisely, a Metropolis-within-Gibbs sampler is investigated to sample the posterior distribution of the Bayesian model associated with RCM images and to build estimators of its parameters, including labels indicating the class of each RCM image. The resulting algorithm is applied to synthetic data and to real images from a clinical study containing healthy and lentigo patients.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hyperparameter estimation in maximum a posteriori regression using group sparsity with an application to brain imaging.\n \n \n \n \n\n\n \n Bekhti, Y.; Badeau, R.; and Gramfort, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 246-250, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"HyperparameterPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081206,\n  author = {Y. Bekhti and R. Badeau and A. Gramfort},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Hyperparameter estimation in maximum a posteriori regression using group sparsity with an application to brain imaging},\n  year = {2017},\n  pages = {246-250},\n  abstract = {Hyperparameter estimation is a recurrent problem in the signal and statistics literature. Popular strategies are cross-validation or Bayesian inference, yet it remains an active topic of research in order to offer better or faster algorithms. The models considered here are sparse regression models with convex or non-convex group-Lasso-like penalties. Following the recent work of Pereyra et al. [1] we study the fixed point iteration algorithm they propose and show that, while it may be suitable for an analysis prior, it suffers from limitations when using high-dimensional sparse synthesis models. The first contribution of this paper is to show how to overcome this issue. Secondly, we demonstrate how one can extend the model to estimate a vector of regularization parameters. We illustrate this on models with group sparsity reporting improved support recovery and reduced amplitude bias on the estimated coefficients. This approach is compared with an alternative method that uses a single parameter but a non-convex penalty. Results are presented on simulations and an inverse problem relevant for neuroscience which is the localization of brain activations using magneto/electroencephalography.},\n  keywords = {Bayes methods;brain;convex programming;electroencephalography;inverse problems;iterative methods;maximum likelihood estimation;medical image processing;regression analysis;hyperparameter estimation;brain imaging;recurrent problem;faster algorithms;sparse regression models;high-dimensional sparse synthesis models;nonconvex penalty;inverse problem;brain activations;posteriori regression;Bayesian inference;Brain modeling;Estimation;Bayes methods;Signal processing;Inverse problems;Sensors;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081206},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347015.pdf},\n}\n\n
\n
\n\n\n
\n Hyperparameter estimation is a recurrent problem in the signal and statistics literature. Popular strategies are cross-validation or Bayesian inference, yet it remains an active topic of research in order to offer better or faster algorithms. The models considered here are sparse regression models with convex or non-convex group-Lasso-like penalties. Following the recent work of Pereyra et al. [1] we study the fixed point iteration algorithm they propose and show that, while it may be suitable for an analysis prior, it suffers from limitations when using high-dimensional sparse synthesis models. The first contribution of this paper is to show how to overcome this issue. Secondly, we demonstrate how one can extend the model to estimate a vector of regularization parameters. We illustrate this on models with group sparsity reporting improved support recovery and reduced amplitude bias on the estimated coefficients. This approach is compared with an alternative method that uses a single parameter but a non-convex penalty. Results are presented on simulations and an inverse problem relevant for neuroscience which is the localization of brain activations using magneto/electroencephalography.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic atlas-guided constrained random Walker algorithm for 3D segmentation of muscles on water magnetic resonance images.\n \n \n \n \n\n\n \n Fallah, F.; Yang, B.; and Bamberg, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 251-255, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081207,\n  author = {F. Fallah and B. Yang and F. Bamberg},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic atlas-guided constrained random Walker algorithm for 3D segmentation of muscles on water magnetic resonance images},\n  year = {2017},\n  pages = {251-255},\n  abstract = {Automatic segmentation of distinct muscles is a crucial step for quantitative analysis of muscle's tissue properties. Magnetic resonance (MR) imaging provides a superior soft tissue contrast and noninvasive means for assessing muscular characteristics. However, automatic segmentation of muscles using common morphological MR imaging is very challenging as the intensities and textures of adjacent muscles are similar and the boundaries between them are mostly invisible or discontinuous. In this paper, we propose a novel fully automatic framework for 3D segmentation of muscles on water MR images. This framework generates the 3D average and probabilistic atlases of the targeted muscle to automatically define the labeled seeds, the edges weights, and the constraints of a constrained Random Walker algorithm. Also, the low-pass filtered atlas-derived muscle probability map is used to augment the intensities prior to the graph-based segmentation. This enables automatic localization of the targeted muscle and enforces dissimilarities between its intensities and the intensities of adjacent lean tissues. The proposed algorithm outperforms the original random Walker algorithm and the conventional multi-atlas registration for muscle segmentation and is less sensitive to errors in the manually segmented muscle masks used for training (atlas computation).},\n  keywords = {biomedical MRI;image filtering;image registration;image segmentation;image texture;low-pass filters;medical image processing;muscle;probability;muscular characteristics;adjacent muscles;probabilistic atlases;conventional multiatlas registration;muscle masks;atlas computation;water magnetic resonance images;superior soft tissue contrast;morphological MR imaging;automatic atlas-guided constrained random Walker algorithm;3D muscle segmentation;low-pass filtered atlas-derived muscle probability map;graph-based segmentation;lean tissues;Muscles;Image segmentation;Three-dimensional displays;Signal processing algorithms;Image edge detection;Spatial resolution;Probabilistic logic},\n  doi = {10.23919/EUSIPCO.2017.8081207},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570339605.pdf},\n}\n\n
\n
\n\n\n
\n Automatic segmentation of distinct muscles is a crucial step for quantitative analysis of muscle's tissue properties. Magnetic resonance (MR) imaging provides a superior soft tissue contrast and noninvasive means for assessing muscular characteristics. However, automatic segmentation of muscles using common morphological MR imaging is very challenging as the intensities and textures of adjacent muscles are similar and the boundaries between them are mostly invisible or discontinuous. In this paper, we propose a novel fully automatic framework for 3D segmentation of muscles on water MR images. This framework generates the 3D average and probabilistic atlases of the targeted muscle to automatically define the labeled seeds, the edges weights, and the constraints of a constrained Random Walker algorithm. Also, the low-pass filtered atlas-derived muscle probability map is used to augment the intensities prior to the graph-based segmentation. This enables automatic localization of the targeted muscle and enforces dissimilarities between its intensities and the intensities of adjacent lean tissues. The proposed algorithm outperforms the original random Walker algorithm and the conventional multi-atlas registration for muscle segmentation and is less sensitive to errors in the manually segmented muscle masks used for training (atlas computation).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n RSS-based respiratory rate monitoring using periodic Gaussian processes and Kalman filtering.\n \n \n \n \n\n\n \n Hostettler, R.; Kaltiokallio, O.; Yiğitler, H.; Särkkä, S.; and Jäntti, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 256-260, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RSS-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081208,\n  author = {R. Hostettler and O. Kaltiokallio and H. Yiğitler and S. Särkkä and R. Jäntti},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {RSS-based respiratory rate monitoring using periodic Gaussian processes and Kalman filtering},\n  year = {2017},\n  pages = {256-260},\n  abstract = {In this paper, we propose a method for respiratory rate estimation based on the received signal strength of narrowband radio frequency transceivers. We employ a state-space formulation of periodic Gaussian processes to model the observed variations in the signal strength. This is then used in a Rao-Blackwellized unscented Kalman filter which exploits the linear substructure of the proposed model and thereby greatly improves computational efficiency. The proposed method is evaluated on measurement data from commercially available off the shelf transceivers. It is found that the proposed method accurately estimates the respiratory rate and provides a systematic way of fusing the measurements of asynchronous frequency channels.},\n  keywords = {Gaussian processes;Kalman filters;medical signal processing;nonlinear filters;patient monitoring;pneumodynamics;radio transceivers;RSSI;RSS-based respiratory rate monitoring;Rao-Blackwellized unscented Kalman filter;state-space formulation;narrowband radio frequency transceivers;received signal strength;respiratory rate estimation;Kalman filtering;periodic Gaussian processes;Kalman filters;Monitoring;Gaussian processes;Frequency measurement;Europe;Estimation},\n  doi = {10.23919/EUSIPCO.2017.8081208},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347506.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a method for respiratory rate estimation based on the received signal strength of narrowband radio frequency transceivers. We employ a state-space formulation of periodic Gaussian processes to model the observed variations in the signal strength. This is then used in a Rao-Blackwellized unscented Kalman filter which exploits the linear substructure of the proposed model and thereby greatly improves computational efficiency. The proposed method is evaluated on measurement data from commercially available off the shelf transceivers. It is found that the proposed method accurately estimates the respiratory rate and provides a systematic way of fusing the measurements of asynchronous frequency channels.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Recovery of missing samples in fetal heart rate recordings with Gaussian processes.\n \n \n \n \n\n\n \n Feng, G.; Quirk, J. G.; and Djurić, P. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 261-265, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RecoveryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081209,\n  author = {G. Feng and J. G. Quirk and P. M. Djurić},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Recovery of missing samples in fetal heart rate recordings with Gaussian processes},\n  year = {2017},\n  pages = {261-265},\n  abstract = {Missing samples are very common in fetal heart rate (FHR) recordings due to various reasons including fetal or maternal movements and misplaced electrodes. They introduce distortions and cause difficulties in their analysis. In this paper, we propose a Gaussian process-based method that can utilize other intrapartum signals (e.g., uterine activity and maternal heart rate) to recover the missing samples in FHR recordings. The proposed approach was tested on a short real FHR recording segment and its performance was compared with that of cubic spline interpolation which is widely used in pre-processing of FHR recordings. Our results show that the proposed approach, with utilization of UA signals, achieves 2.35 dB to 14.85 dB better recovery performance. Furthermore, even when the percentage of missing samples is more than 50%, the mean square error of this approach is still below one beat per minute.},\n  keywords = {cardiology;Gaussian processes;interpolation;mean square error methods;medical signal processing;patient monitoring;splines (mathematics);fetal heart rate recordings;distortions;Gaussian process;maternal heart rate;missing samples;FHR recordings;intrapartum signals;cubic spline interpolation;mean square error;Fetal heart rate;Gaussian processes;Monitoring;Gaussian distribution;Splines (mathematics);Interpolation;Inspection},\n  doi = {10.23919/EUSIPCO.2017.8081209},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347673.pdf},\n}\n\n
\n
\n\n\n
\n Missing samples are very common in fetal heart rate (FHR) recordings due to various reasons including fetal or maternal movements and misplaced electrodes. They introduce distortions and cause difficulties in their analysis. In this paper, we propose a Gaussian process-based method that can utilize other intrapartum signals (e.g., uterine activity and maternal heart rate) to recover the missing samples in FHR recordings. The proposed approach was tested on a short real FHR recording segment and its performance was compared with that of cubic spline interpolation which is widely used in pre-processing of FHR recordings. Our results show that the proposed approach, with utilization of UA signals, achieves 2.35 dB to 14.85 dB better recovery performance. Furthermore, even when the percentage of missing samples is more than 50%, the mean square error of this approach is still below one beat per minute.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Malicious users discrimination in organizec attacks using structured sparsity.\n \n \n \n\n\n \n Yamacc, M.; Sankur, B.; and Cemgil, A. T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 266-270, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081210,\n  author = {M. Yamacc and B. Sankur and A. T. Cemgil},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Malicious users discrimination in organizec attacks using structured sparsity},\n  year = {2017},\n  pages = {266-270},\n  abstract = {Communication networks can be the targets of organized and distributed attacks such as flooding-type DDOS attack in which malicious users aim to cripple a network server or a network domain. For the attack to have a major effect on the network, malicious users must act in a coordinated and time correlated manner. For instance, the members of the flooding attack increase their message transmission rates rapidly but also synchronously. Even though detection and prevention of the flooding attacks are well studied at network and transport layers, the emergence and wide deployment of new systems such as VoIP (Voice over IP) have turned flooding attacks at the session layer into a new defense challenge. In this study a structured sparsity based group anomaly detection system is proposed that not only can detect synchronized attacks, but also identify the malicious groups from normal users by jointly estimating their members, structure, starting and end points. Although we mainly focus on security on SIP (Session Initiation Protocol) servers/proxies which are widely used for signaling in VoIP systems, the proposed scheme can be easily adapted for any type of communication network system at any layer.},\n  keywords = {computer network security;Internet telephony;signalling protocols;malicious users discrimination;organized attacks;distributed attacks;flooding-type DDOS attack;network server;network domain;message transmission rate;flooding attack detection;flooding attack prevention;transport layer;Voice over IP;structured sparsity based group anomaly detection system;synchronized attack detection;malicious groups;security;Session Initiation Protocol;SIP servers;SIP proxies;signaling;VoIP systems;communication network system;Signal processing algorithms;Computer crime;Internet telephony;Synchronization;Indexes;Servers;Current measurement;Compressive Sensing;Network Security;DDOS;Voice over IP},\n  doi = {10.23919/EUSIPCO.2017.8081210},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Communication networks can be the targets of organized and distributed attacks such as flooding-type DDOS attack in which malicious users aim to cripple a network server or a network domain. For the attack to have a major effect on the network, malicious users must act in a coordinated and time correlated manner. For instance, the members of the flooding attack increase their message transmission rates rapidly but also synchronously. Even though detection and prevention of the flooding attacks are well studied at network and transport layers, the emergence and wide deployment of new systems such as VoIP (Voice over IP) have turned flooding attacks at the session layer into a new defense challenge. In this study a structured sparsity based group anomaly detection system is proposed that not only can detect synchronized attacks, but also identify the malicious groups from normal users by jointly estimating their members, structure, starting and end points. Although we mainly focus on security on SIP (Session Initiation Protocol) servers/proxies which are widely used for signaling in VoIP systems, the proposed scheme can be easily adapted for any type of communication network system at any layer.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Log-likelihood score level fusion for improved cross-sensor smartphone periocular recognition.\n \n \n \n \n\n\n \n Alonso-Fernandez, F.; Raja, K. B.; Busch, C.; and Bigun, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 271-275, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Log-likelihoodPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081211,\n  author = {F. Alonso-Fernandez and K. B. Raja and C. Busch and J. Bigun},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Log-likelihood score level fusion for improved cross-sensor smartphone periocular recognition},\n  year = {2017},\n  pages = {271-275},\n  abstract = {The proliferation of cameras and personal devices results in a wide variability of imaging conditions, producing large intra-class variations and a significant performance drop when images from heterogeneous environments are compared. However, many applications require to deal with data from different sources regularly, thus needing to overcome these interoperability problems. Here, we employ fusion of several comparators to improve periocular performance when images from different smartphones are compared. We use a probabilistic fusion framework based on linear logistic regression, in which fused scores tend to be log-likelihood ratios, obtaining a reduction in cross-sensor EER of up to 40% due to the fusion. Our framework also provides an elegant and simple solution to handle signals from different devices, since same-sensor and cross-sensor score distributions are aligned and mapped to a common probabilistic domain. This allows the use of Bayes thresholds for optimal decision making, eliminating the need of sensor-specific thresholds, which is essential in operational conditions because the threshold setting critically determines the accuracy of the authentication process in many applications.},\n  keywords = {Bayes methods;biometrics (access control);decision making;feature extraction;image fusion;image recognition;open systems;probability;regression analysis;smart phones;intraclass variations;cross-sensor smartphone periocular recognition;heterogeneous environments;improved cross-sensor smartphone periocular recognition;log-likelihood score level fusion;sensor-specific thresholds;common probabilistic domain;cross-sensor score distributions;cross-sensor EER;log-likelihood ratios;linear logistic regression;probabilistic fusion framework;different smartphones;periocular performance;interoperability problems;Training;Databases;Performance evaluation;Europe;Signal processing;Probabilistic logic;Logistics},\n  doi = {10.23919/EUSIPCO.2017.8081211},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341874.pdf},\n}\n\n
\n
\n\n\n
\n The proliferation of cameras and personal devices results in a wide variability of imaging conditions, producing large intra-class variations and a significant performance drop when images from heterogeneous environments are compared. However, many applications require to deal with data from different sources regularly, thus needing to overcome these interoperability problems. Here, we employ fusion of several comparators to improve periocular performance when images from different smartphones are compared. We use a probabilistic fusion framework based on linear logistic regression, in which fused scores tend to be log-likelihood ratios, obtaining a reduction in cross-sensor EER of up to 40% due to the fusion. Our framework also provides an elegant and simple solution to handle signals from different devices, since same-sensor and cross-sensor score distributions are aligned and mapped to a common probabilistic domain. This allows the use of Bayes thresholds for optimal decision making, eliminating the need of sensor-specific thresholds, which is essential in operational conditions because the threshold setting critically determines the accuracy of the authentication process in many applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint learning of local fingerprint and content modulation.\n \n \n \n \n\n\n \n Kostadinov, D.; Voloshynovskiy, S.; and Ferdowsi, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 276-280, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081212,\n  author = {D. Kostadinov and S. Voloshynovskiy and S. Ferdowsi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Joint learning of local fingerprint and content modulation},\n  year = {2017},\n  pages = {276-280},\n  abstract = {This paper proposes learning a linear map with local content modulation for robust content fingerprinting. The goal is to estimate a data adapted linear map that provides bounded modulation distortion and features with targeted properties. A novel problem formulation is presented that jointly addresses the fingerprint learning and the content modulation. A solution by iterative alternating algorithm is proposed. The algorithm alternates between liner map update step and linear modulation estimate step. Global optimal solutions for the respective iterative steps are proposed, resulting in convergent algorithm with locally optimal solution. A computer simulation using local image patches, extracted from publicly available data set is provided. The advantages under additive white Gaussian noise (AWGN), lossy JPEG compression and projective geometrical transform distortions are demonstrated.},\n  keywords = {AWGN;AWGN channels;data compression;feature extraction;fingerprint identification;image coding;iterative methods;learning (artificial intelligence);optimisation;robust content fingerprinting;computer simulation;additive white Gaussian noise;AWGN;lossy JPEG compression;projective geometrical transform distortions;local content modulation;local fingerprint;local image patches;locally optimal solution;convergent algorithm;respective iterative steps;global optimal solutions;iterative alternating algorithm;fingerprint learning;modulation distortion;Modulation;Distortion;Signal processing algorithms;Feature extraction;Quantization (signal);Computer simulation;active content fingerprint;modulation;feature map learning;robustness},\n  doi = {10.23919/EUSIPCO.2017.8081212},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347514.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes learning a linear map with local content modulation for robust content fingerprinting. The goal is to estimate a data adapted linear map that provides bounded modulation distortion and features with targeted properties. A novel problem formulation is presented that jointly addresses the fingerprint learning and the content modulation. A solution by iterative alternating algorithm is proposed. The algorithm alternates between liner map update step and linear modulation estimate step. Global optimal solutions for the respective iterative steps are proposed, resulting in convergent algorithm with locally optimal solution. A computer simulation using local image patches, extracted from publicly available data set is provided. The advantages under additive white Gaussian noise (AWGN), lossy JPEG compression and projective geometrical transform distortions are demonstrated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Higher-order, adversary-aware, double JPEG-detection via selected training on attacked samples.\n \n \n \n \n\n\n \n Barni, M.; Nowroozi, E.; and Tondi, B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 281-285, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Higher-order,Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081213,\n  author = {M. Barni and E. Nowroozi and B. Tondi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Higher-order, adversary-aware, double JPEG-detection via selected training on attacked samples},\n  year = {2017},\n  pages = {281-285},\n  abstract = {In this paper we present an adversary-aware double JPEG detector which is capable of detecting the presence of two JPEG compression steps even in the presence of heterogeneous processing and counter-forensic (C-F) attacks. The detector is based on an SVM classifier fed with a large number of features and trained to recognise the traces left by double JPEG detection in the presence of attacks. Since it is not possible to train the SVM on all possible kinds of processing and C-F attacks, a selected set of images, manipulated with a limited number of attacks is added to the training set. The processing tools used for training are chosen among those that proved to be most effective in disabling double JPEG detection. Experimental results prove that training on such a kind of most powerful attacks allows good detection in the presence of a much wider variety of attacks and processing. Good performance are retained over a wide range of compression quality factors.},\n  keywords = {data compression;image coding;security of data;support vector machines;adversary-aware double JPEG detector;JPEG compression steps;heterogeneous processing;SVM classifier;double JPEG detection;counter-forensic attacks;C-F attacks;Image coding;Detectors;Transform coding;Training;Support vector machines;Discrete cosine transforms;Feature extraction},\n  doi = {10.23919/EUSIPCO.2017.8081213},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347426.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we present an adversary-aware double JPEG detector which is capable of detecting the presence of two JPEG compression steps even in the presence of heterogeneous processing and counter-forensic (C-F) attacks. The detector is based on an SVM classifier fed with a large number of features and trained to recognise the traces left by double JPEG detection in the presence of attacks. Since it is not possible to train the SVM on all possible kinds of processing and C-F attacks, a selected set of images, manipulated with a limited number of attacks is added to the training set. The processing tools used for training are chosen among those that proved to be most effective in disabling double JPEG detection. Experimental results prove that training on such a kind of most powerful attacks allows good detection in the presence of a much wider variety of attacks and processing. Good performance are retained over a wide range of compression quality factors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Electric network frequency estimation based on linear canonical transform for audio signal authentication.\n \n \n \n \n\n\n \n Zhong, W.; You, X.; Kong, X.; and Wang, B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 286-290, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ElectricPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081214,\n  author = {W. Zhong and X. You and X. Kong and B. Wang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Electric network frequency estimation based on linear canonical transform for audio signal authentication},\n  year = {2017},\n  pages = {286-290},\n  abstract = {As electric network frequency is sometimes embedded in audio signals when the recording is carried out with the equipment connected to an electrical outlet, electric network frequency estimation is an important task in audio authenticity. After the theoretical analysis, a novel electric network frequency estimation method based on linear canonical transform is proposed from anti-multipath interference point of view. The experimental results demonstrate that this model performs well with high precision in complex noisy environment.},\n  keywords = {audio recording;audio signal processing;interference (signal);security of data;transforms;audio signal authentication;electrical outlet;audio authenticity;novel electric network frequency estimation method;linear canonical transform;antimultipath interference;Frequency estimation;Signal processing algorithms;Discrete Fourier transforms;Estimation;Noise measurement;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081214},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347245.pdf},\n}\n\n
\n
\n\n\n
\n As electric network frequency is sometimes embedded in audio signals when the recording is carried out with the equipment connected to an electrical outlet, electric network frequency estimation is an important task in audio authenticity. After the theoretical analysis, a novel electric network frequency estimation method based on linear canonical transform is proposed from anti-multipath interference point of view. The experimental results demonstrate that this model performs well with high precision in complex noisy environment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A two-stage subspace trust region approach for deep neural network training.\n \n \n \n \n\n\n \n Dudar, V.; Chierchia, G.; Chouzenoux, E.; Pesquet, J.; and Semenov, V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 291-295, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081215,\n  author = {V. Dudar and G. Chierchia and E. Chouzenoux and J. Pesquet and V. Semenov},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A two-stage subspace trust region approach for deep neural network training},\n  year = {2017},\n  pages = {291-295},\n  abstract = {In this paper, we develop a novel second-order method for training feed-forward neural nets. At each iteration, we construct a quadratic approximation to the cost function in a low-dimensional subspace. We minimize this approximation inside a trust region through a two-stage procedure: first inside the embedded positive curvature subspace, followed by a gradient descent step. This approach leads to a fast objective function decay, prevents convergence to saddle points, and alleviates the need for manually tuning parameters. We show the good performance of the proposed algorithm on benchmark datasets.},\n  keywords = {approximation theory;convergence of numerical methods;feedforward neural nets;gradient methods;learning (artificial intelligence);Newton method;embedded positive curvature subspace;gradient descent step;fast objective function decay;two-stage subspace trust region approach;deep neural network training;second-order method;feed-forward neural nets;quadratic approximation;cost function;low-dimensional subspace;Training;Neural networks;Signal processing algorithms;Optimization;Eigenvalues and eigenfunctions;Europe;Signal processing;Deep learning;second-order approach;noncon-vex optimization;trust region;subspace method},\n  doi = {10.23919/EUSIPCO.2017.8081215},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347632.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we develop a novel second-order method for training feed-forward neural nets. At each iteration, we construct a quadratic approximation to the cost function in a low-dimensional subspace. We minimize this approximation inside a trust region through a two-stage procedure: first inside the embedded positive curvature subspace, followed by a gradient descent step. This approach leads to a fast objective function decay, prevents convergence to saddle points, and alleviates the need for manually tuning parameters. We show the good performance of the proposed algorithm on benchmark datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Active learning with cross-dataset validation in event-based non-intrusive load monitoring.\n \n \n \n \n\n\n \n Liebgott, F.; and Yang, B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 296-300, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ActivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081216,\n  author = {F. Liebgott and B. Yang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Active learning with cross-dataset validation in event-based non-intrusive load monitoring},\n  year = {2017},\n  pages = {296-300},\n  abstract = {Supervised event-based NILM systems usually require a large set of labeled training data to achieve high classification accuracies. To minimize the cost of labeling a sufficient amount of events, active learning can be employed. By using only a small set of labeled samples for initial training followed by selecting only the most informative samples to be labeled, the total number of labeled training samples can be reduced significantly. The performance of an active learning system strongly depends on the choice of the initial training set and the used query strategy. We thus investigated the impact of different methods to select the dataset for initial training as well as various query strategies on the resulting classification accuracy in an event-based NILM framework. For evaluation we used two datasets, BLUED and ISS kitchen, on which we were able to achieve high classification accuracies with significantly less training samples compared to conventional training without active learning.},\n  keywords = {computerised monitoring;learning (artificial intelligence);pattern classification;power engineering computing;power system measurement;cross-dataset validation;nonintrusive load monitoring;supervised event;NILM systems;labeled training data;high classification accuracies;informative samples;labeled training samples;active learning system;initial training set;query strategy;classification accuracy;BLUED;ISS kitchen;Training;Labeling;Learning systems;Uncertainty;Training data;Voltage measurement;Feature extraction},\n  doi = {10.23919/EUSIPCO.2017.8081216},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347572.pdf},\n}\n\n
\n
\n\n\n
\n Supervised event-based NILM systems usually require a large set of labeled training data to achieve high classification accuracies. To minimize the cost of labeling a sufficient amount of events, active learning can be employed. By using only a small set of labeled samples for initial training followed by selecting only the most informative samples to be labeled, the total number of labeled training samples can be reduced significantly. The performance of an active learning system strongly depends on the choice of the initial training set and the used query strategy. We thus investigated the impact of different methods to select the dataset for initial training as well as various query strategies on the resulting classification accuracy in an event-based NILM framework. For evaluation we used two datasets, BLUED and ISS kitchen, on which we were able to achieve high classification accuracies with significantly less training samples compared to conventional training without active learning.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Time-series classification using neural Bag-of-Features.\n \n \n \n \n\n\n \n Passalis, N.; Tsantekidis, A.; Tefas, A.; Kanniainen, J.; Gabbouj, M.; and Iosifidis, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 301-305, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Time-seriesPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081217,\n  author = {N. Passalis and A. Tsantekidis and A. Tefas and J. Kanniainen and M. Gabbouj and A. Iosifidis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Time-series classification using neural Bag-of-Features},\n  year = {2017},\n  pages = {301-305},\n  abstract = {Classification of time-series data is a challenging problem with many real-world applications, ranging from identifying medical conditions from electroencephalography (EEG) measurements to forecasting the stock market. The well known Bag-of-Features (BoF) model was recently adapted towards time-series representation. In this work, a neural generalization of the BoF model, composed of an RBF layer and an accumulation layer, is proposed as a neural layer that receives the features extracted from a time-series and gradually builds its representation. The proposed method can be combined with any other layer or classifier, such as fully connected layers or feature transformation layers, to form deep neural networks for time-series classification. The resulting networks are end-to-end differentiable and they can be trained using regular back-propagation. It is demonstrated, using two time-series datasets, including a large-scale financial dataset, that the proposed approach can significantly increase the classification metrics over other baseline and state-of-the-art techniques.},\n  keywords = {feature extraction;generalisation (artificial intelligence);learning (artificial intelligence);pattern classification;radial basis function networks;time series;time-series classification;Bag-of-Features model;time-series representation;neural generalization;BoF model;RBF layer;accumulation layer;neural layer;fully connected layers;feature transformation layers;deep neural networks;time-series datasets;classification metrics;backpropagation;Feature extraction;Brain modeling;Neurons;Electroencephalography;Histograms;Hidden Markov models},\n  doi = {10.23919/EUSIPCO.2017.8081217},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346787.pdf},\n}\n\n
\n
\n\n\n
\n Classification of time-series data is a challenging problem with many real-world applications, ranging from identifying medical conditions from electroencephalography (EEG) measurements to forecasting the stock market. The well known Bag-of-Features (BoF) model was recently adapted towards time-series representation. In this work, a neural generalization of the BoF model, composed of an RBF layer and an accumulation layer, is proposed as a neural layer that receives the features extracted from a time-series and gradually builds its representation. The proposed method can be combined with any other layer or classifier, such as fully connected layers or feature transformation layers, to form deep neural networks for time-series classification. The resulting networks are end-to-end differentiable and they can be trained using regular back-propagation. It is demonstrated, using two time-series datasets, including a large-scale financial dataset, that the proposed approach can significantly increase the classification metrics over other baseline and state-of-the-art techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Qualitative assessment of recurrent human motion.\n \n \n \n \n\n\n \n Ebert, A.; Beck, M. T.; Mattausch, A.; Belzner, L.; and Linnhoff-Popien, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 306-310, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"QualitativePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081218,\n  author = {A. Ebert and M. T. Beck and A. Mattausch and L. Belzner and C. Linnhoff-Popien},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Qualitative assessment of recurrent human motion},\n  year = {2017},\n  pages = {306-310},\n  abstract = {Smartphone applications designed to track human motion in combination with wearable sensors, e.g., during physical exercising, raised huge attention recently. Commonly, they provide quantitative services, such as personalized training instructions or the counting of distances. But qualitative monitoring and assessment is still missing, e.g., to detect malpositions, to prevent injuries, or to optimize training success. We address this issue by presenting a concept for qualitative as well as generic assessment of recurrent human motion by processing multi-dimensional, continuous time series tracked with motion sensors. Therefore, our segmentation procedure extracts individual events of specific length and we propose expressive features to accomplish a qualitative motion assessment by supervised classification. We verified our approach within a comprehensive study encompassing 27 athletes undertaking different body weight exercises. We are able to recognize six different exercise types with a success rate of 100% and to assess them qualitatively with an average success rate of 99.3%.},\n  keywords = {biomechanics;feature extraction;learning (artificial intelligence);medical computing;pattern classification;smart phones;sport;time series;recurrent human motion;smartphone applications;physical exercising;quantitative services;personalized training instructions;qualitative monitoring;training success;continuous time series;motion sensors;qualitative motion assessment;generic assessment;body weight exercises;exercise types;Motion segmentation;Feature extraction;Sensors;Tracking;Training;Europe;Signal processing;Motion assessment;Activity recognition;Physical exercises;Segmentation},\n  doi = {10.23919/EUSIPCO.2017.8081218},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346528.pdf},\n}\n\n
\n
\n\n\n
\n Smartphone applications designed to track human motion in combination with wearable sensors, e.g., during physical exercising, raised huge attention recently. Commonly, they provide quantitative services, such as personalized training instructions or the counting of distances. But qualitative monitoring and assessment is still missing, e.g., to detect malpositions, to prevent injuries, or to optimize training success. We address this issue by presenting a concept for qualitative as well as generic assessment of recurrent human motion by processing multi-dimensional, continuous time series tracked with motion sensors. Therefore, our segmentation procedure extracts individual events of specific length and we propose expressive features to accomplish a qualitative motion assessment by supervised classification. We verified our approach within a comprehensive study encompassing 27 athletes undertaking different body weight exercises. We are able to recognize six different exercise types with a success rate of 100% and to assess them qualitatively with an average success rate of 99.3%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Visualization of feature evolution during convolutional neural network training.\n \n \n \n \n\n\n \n Punjabi, A.; and Katsaggelos, A. K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 311-315, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"VisualizationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081219,\n  author = {A. Punjabi and A. K. Katsaggelos},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Visualization of feature evolution during convolutional neural network training},\n  year = {2017},\n  pages = {311-315},\n  abstract = {Convolutional neural networks (CNNs) are a staple in the fields of computer vision and image processing. These networks perform visual tasks with state-of-the-art accuracy; yet, the understanding behind the success of these algorithms is still lacking. In particular, the process by which CNNs learn effective task-specific features is still unclear. This work elucidates such phenomena by applying recent deep visualization techniques during different stages of the training process. Additionally, this investigation provides visual justification to the benefits of transfer learning. The results are in line with previously discussed notions of feature specificity, and show a new facet of a particularly vexing machine learning pitfall: overfitting.},\n  keywords = {computer vision;learning (artificial intelligence);neural nets;convolutional neural network training;computer vision;image processing;visual tasks;visual justification;feature specificity;CNN;deep visualization techniques;vexing machine learning pitfall;Visualization;Training;Signal processing algorithms;Neural networks;Convolution;Europe;deep learning;convolutional neural network;feature visualization;transfer learning},\n  doi = {10.23919/EUSIPCO.2017.8081219},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347731.pdf},\n}\n\n
\n
\n\n\n
\n Convolutional neural networks (CNNs) are a staple in the fields of computer vision and image processing. These networks perform visual tasks with state-of-the-art accuracy; yet, the understanding behind the success of these algorithms is still lacking. In particular, the process by which CNNs learn effective task-specific features is still unclear. This work elucidates such phenomena by applying recent deep visualization techniques during different stages of the training process. Additionally, this investigation provides visual justification to the benefits of transfer learning. The results are in line with previously discussed notions of feature specificity, and show a new facet of a particularly vexing machine learning pitfall: overfitting.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An elliptical-shaped density-based classification algorithm for detection of entangled clusters.\n \n \n \n \n\n\n \n Smith, S.; Pischella, M.; and Terré, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 316-320, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081220,\n  author = {S. Smith and M. Pischella and M. Terré},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An elliptical-shaped density-based classification algorithm for detection of entangled clusters},\n  year = {2017},\n  pages = {316-320},\n  abstract = {We present a density-based clustering method producing a covering of the dataset by ellipsoidal structures in order to detect possibly entangled clusters. We first introduce an unconstrained version of the algorithm which does not require any assumption on the number of clusters. Then a constrained version using a priori knowledge to improve the bare clustering is discussed. We evaluate the performance of our algorithm and several other well-known clustering methods using existing cluster validity techniques on randomly-generated bi-dimensional gaussian mixtures. Our simulation results show that both versions of our algorithm compare well with the reference algorithms according to the used metrics, foreseeing future improvements of our method.},\n  keywords = {Gaussian processes;pattern classification;pattern clustering;ellipsoidal structures;unconstrained version;constrained version;clustering methods;existing cluster validity techniques;elliptical-shaped density-based classification algorithm;bi-dimensional Gaussian mixtures;Clustering algorithms;Signal processing algorithms;Clustering methods;Algorithm design and analysis;Density functional theory;Estimation;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081220},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347541.pdf},\n}\n\n
\n
\n\n\n
\n We present a density-based clustering method producing a covering of the dataset by ellipsoidal structures in order to detect possibly entangled clusters. We first introduce an unconstrained version of the algorithm which does not require any assumption on the number of clusters. Then a constrained version using a priori knowledge to improve the bare clustering is discussed. We evaluate the performance of our algorithm and several other well-known clustering methods using existing cluster validity techniques on randomly-generated bi-dimensional gaussian mixtures. Our simulation results show that both versions of our algorithm compare well with the reference algorithms according to the used metrics, foreseeing future improvements of our method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A post-nonlinear mixture model approach to binary matrix factorization.\n \n \n \n \n\n\n \n Diop, M.; Larue, A.; Miron, S.; and Brie, D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 321-325, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081221,\n  author = {M. Diop and A. Larue and S. Miron and D. Brie},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A post-nonlinear mixture model approach to binary matrix factorization},\n  year = {2017},\n  pages = {321-325},\n  abstract = {In this paper, we address the Binary Matrix Factorization (BMF) problem which is the restriction of the nonnegative matrix factorization (NMF) to the binary matrix case. A necessary and sufficient condition for the identifiability for the BFM model is given. We propose to approach the BMF problem by the NMF problem using a nonlinear function which guarantees the binarity of the reconstructed data. Two new algorithms are introduced and compared in simulations with the state of art BMF algorithms.},\n  keywords = {matrix decomposition;mixture models;post-nonlinear mixture model approach;Binary Matrix Factorization problem;nonnegative matrix factorization;necessary condition;sufficient condition;BFM model;NMF problem;nonlinear function;Matrix decomposition;Inverse problems;Signal processing algorithms;Mixture models;Cost function;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081221},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346081.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we address the Binary Matrix Factorization (BMF) problem which is the restriction of the nonnegative matrix factorization (NMF) to the binary matrix case. A necessary and sufficient condition for the identifiability for the BFM model is given. We propose to approach the BMF problem by the NMF problem using a nonlinear function which guarantees the binarity of the reconstructed data. Two new algorithms are introduced and compared in simulations with the state of art BMF algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Classification of one-dimensional non-stationary signals using the Wigner-Ville distribution in convolutional neural networks.\n \n \n \n \n\n\n \n Brynolfsson, J.; and Sandsten, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 326-330, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ClassificationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081222,\n  author = {J. Brynolfsson and M. Sandsten},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Classification of one-dimensional non-stationary signals using the Wigner-Ville distribution in convolutional neural networks},\n  year = {2017},\n  pages = {326-330},\n  abstract = {In this paper we argue that the Wigner-Ville distribution (WVD), instead of the spectrogram, should be used as basic input into convolutional neural network (CNN) based classification schemes. The WVD has superior resolution and localization as compared to other time-frequency representations. We present a method where a large-size kernel may be learned from the data, to enhance features important for classification. We back up our claims with theory, as well as application on simulated examples and show superior performance as compared to the commonly used spectrogram.},\n  keywords = {convolution;feature extraction;learning (artificial intelligence);neural nets;signal classification;Wigner distribution;Wigner-Ville distribution;convolutional neural networks;WVD;nonstationary signals classification;CNN;kernel learning;features enhancement;Kernel;Spectrogram;Convolution;Time-frequency analysis;Neural networks;Smoothing methods;Chirp},\n  doi = {10.23919/EUSIPCO.2017.8081222},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343828.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we argue that the Wigner-Ville distribution (WVD), instead of the spectrogram, should be used as basic input into convolutional neural network (CNN) based classification schemes. The WVD has superior resolution and localization as compared to other time-frequency representations. We present a method where a large-size kernel may be learned from the data, to enhance features important for classification. We back up our claims with theory, as well as application on simulated examples and show superior performance as compared to the commonly used spectrogram.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Spectral detection and localization of radio events with learned convolutional neural features.\n \n \n \n \n\n\n \n O'Shea, T. J.; Roy, T.; and Erpek, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 331-335, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SpectralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081223,\n  author = {T. J. O'Shea and T. Roy and T. Erpek},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Spectral detection and localization of radio events with learned convolutional neural features},\n  year = {2017},\n  pages = {331-335},\n  abstract = {We introduce a method for detecting, localizing and identifying radio transmissions within wide-band time-frequency power spectrograms using feature learning using convolutional neural networks on their 2D image representation. By doing so we build a foundation for higher level contextual radio spectrum event understanding, labeling, and reasoning in complex shared spectrum and many-user environments by developing tools which can rapidly understand and label sequences of events based on experience and labeled data rather than signal-specific detection algorithms such as matched filters.},\n  keywords = {feature extraction;image recognition;image representation;learning (artificial intelligence);neural nets;object detection;time-frequency analysis;higher level contextual radio spectrum event;radio transmission identification;radio transmission localization;radio transmission detection;radio event localization;signal-specific detection algorithms;complex shared spectrum;2D image representation;convolutional neural networks;feature learning;wide-band time-frequency power spectrograms;learned convolutional neural features;Convolution;Spectrogram;Radio frequency;Training;Feature extraction;Neural networks},\n  doi = {10.23919/EUSIPCO.2017.8081223},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347300.pdf},\n}\n\n
\n
\n\n\n
\n We introduce a method for detecting, localizing and identifying radio transmissions within wide-band time-frequency power spectrograms using feature learning using convolutional neural networks on their 2D image representation. By doing so we build a foundation for higher level contextual radio spectrum event understanding, labeling, and reasoning in complex shared spectrum and many-user environments by developing tools which can rapidly understand and label sequences of events based on experience and labeled data rather than signal-specific detection algorithms such as matched filters.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low-rank and nonlinear model approach to image inpainting.\n \n \n \n \n\n\n \n Sasaki, R.; Konishi, K.; Takahashi, T.; and Furukawa, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 336-340, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Low-rankPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081224,\n  author = {R. Sasaki and K. Konishi and T. Takahashi and T. Furukawa},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Low-rank and nonlinear model approach to image inpainting},\n  year = {2017},\n  pages = {336-340},\n  abstract = {This paper proposes a new algorithm for image inpainting algorithm based on the matrix rank minimization with nonlinear mapping function. Assuming that each intensity value of a nonlinear mapped image can be modeled by the autoregressive (AR) model, the image inpainting problem is formulated as a kind of the matrix rank minimization problem, and this paper modifies the iterative partial matrix shrinkage (IPMS) algorithm and provides an inpainting algorithm, which estimates a nonlinear mapping function and the missing pixels simultaneously. Numerical examples show that the proposed algorithm recovers missing pixels efficiently.},\n  keywords = {autoregressive processes;image reconstruction;image resolution;iterative methods;matrix algebra;minimisation;nonlinear model approach;image inpainting algorithm;nonlinear mapping function;intensity value;nonlinear mapped image;autoregressive model;image inpainting problem;matrix rank minimization problem;iterative partial matrix shrinkage algorithm;IPMS algorithm;low-rank appoach;missing pixels recovery;Signal processing algorithms;Minimization;Numerical models;Subspace constraints;Signal processing;Approximation algorithms;Europe;image inpainting;matrix rank minimization;AR modeling;matrix recovery;manifold learning},\n  doi = {10.23919/EUSIPCO.2017.8081224},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347729.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a new algorithm for image inpainting algorithm based on the matrix rank minimization with nonlinear mapping function. Assuming that each intensity value of a nonlinear mapped image can be modeled by the autoregressive (AR) model, the image inpainting problem is formulated as a kind of the matrix rank minimization problem, and this paper modifies the iterative partial matrix shrinkage (IPMS) algorithm and provides an inpainting algorithm, which estimates a nonlinear mapping function and the missing pixels simultaneously. Numerical examples show that the proposed algorithm recovers missing pixels efficiently.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Autonomous state space models for recursive signal estimation beyond least squares.\n \n \n \n \n\n\n \n Zalmai, N.; Wildhaber, R. A.; and Loeliger, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 341-345, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutonomousPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081225,\n  author = {N. Zalmai and R. A. Wildhaber and H. Loeliger},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Autonomous state space models for recursive signal estimation beyond least squares},\n  year = {2017},\n  pages = {341-345},\n  abstract = {The paper addresses the problem of fitting, at any given time, a parameterized signal generated by an autonomous linear state space model (LSSM) to discrete-time observations. When the cost function is the squared error, the fitting can be accomplished based on efficient recursions. In this paper, the squared error cost is generalized to more advanced cost functions while preserving recursive computations: first, the standard sample-wise squared error is augmented with a sample-dependent polynomial error; second, the sample-wise errors are localized by a window function that is itself described by an autonomous LSSM. It is further demonstrated how such a signal estimation can be extended to handle unknown additive and/or multiplicative interference. All these results rely on two facts: first, the correlation function between a given discrete-time signal and a LSSM signal can be computed by efficient recursions; second, the set of LSSM signals is a ring.},\n  keywords = {discrete time systems;least squares approximations;polynomials;signal processing;state-space methods;parameterized signal;recursive signal estimation;autonomous state space models;LSSM signal;correlation function;autonomous LSSM;window function;sample-dependent polynomial error;standard sample-wise squared error;advanced cost functions;discrete-time observations;autonomous linear state space model;Estimation;Cost function;Interference;Computational modeling;Additives;Kalman filters},\n  doi = {10.23919/EUSIPCO.2017.8081225},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341592.pdf},\n}\n\n
\n
\n\n\n
\n The paper addresses the problem of fitting, at any given time, a parameterized signal generated by an autonomous linear state space model (LSSM) to discrete-time observations. When the cost function is the squared error, the fitting can be accomplished based on efficient recursions. In this paper, the squared error cost is generalized to more advanced cost functions while preserving recursive computations: first, the standard sample-wise squared error is augmented with a sample-dependent polynomial error; second, the sample-wise errors are localized by a window function that is itself described by an autonomous LSSM. It is further demonstrated how such a signal estimation can be extended to handle unknown additive and/or multiplicative interference. All these results rely on two facts: first, the correlation function between a given discrete-time signal and a LSSM signal can be computed by efficient recursions; second, the set of LSSM signals is a ring.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Characterization of discrete linear shift-invariant systems.\n \n \n \n \n\n\n \n Clausen, M.; and Kurth, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 346-350, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CharacterizationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081226,\n  author = {M. Clausen and F. Kurth},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Characterization of discrete linear shift-invariant systems},\n  year = {2017},\n  pages = {346-350},\n  abstract = {Linear time-invariant (LTI) systems are of fundamental importance in classical digital signal processing. LTI systems are linear operators commuting with the time-shift operator. For N-periodic discrete time series the time-shift operator is a circulant N × N permutation matrix. Sandryhaila and Moura developed a linear discrete signal processing framework and corresponding tools for datasets arising from social, biological, and physical networks. In their framework, the circulant permutation matrix is replaced by a network-specific N × N matrix A, called a shift matrix, and the linear shift-invariant (LSI) systems are all N × N matrices H over C commuting with the shift matrix: HA = AH. Sandryhaila and Moura described all those H for the non-degenerate case, in which all eigenspaces of A are one-dimensional. Then the authors reduced the degenerate case to the non-degenerate one. As we show in this paper this reduction does, however, not generally hold, leaving open one gap in the proposed argument. In this paper we are able to close this gap and propose a complete characterization of all (i.e., degenerate and non-degenerate) LSI systems. Finally, we describe the corresponding spectral decompositions.},\n  keywords = {discrete time systems;linear systems;matrix algebra;signal processing;time series;discrete linear shift-invariant systems;linear time-invariant systems;classical digital signal processing;LTI systems;linear operators;time-shift operator;N-periodic discrete time series;social networks;physical networks;circulant permutation matrix;shift matrix;nondegenerate case;biological networks;linear discrete signal processing framework;Eigenvalues and eigenfunctions;Algebra;Signal processing;Large scale integration;Tools;Interpolation;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081226},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340919.pdf},\n}\n\n
\n
\n\n\n
\n Linear time-invariant (LTI) systems are of fundamental importance in classical digital signal processing. LTI systems are linear operators commuting with the time-shift operator. For N-periodic discrete time series the time-shift operator is a circulant N × N permutation matrix. Sandryhaila and Moura developed a linear discrete signal processing framework and corresponding tools for datasets arising from social, biological, and physical networks. In their framework, the circulant permutation matrix is replaced by a network-specific N × N matrix A, called a shift matrix, and the linear shift-invariant (LSI) systems are all N × N matrices H over C commuting with the shift matrix: HA = AH. Sandryhaila and Moura described all those H for the non-degenerate case, in which all eigenspaces of A are one-dimensional. Then the authors reduced the degenerate case to the non-degenerate one. As we show in this paper this reduction does, however, not generally hold, leaving open one gap in the proposed argument. In this paper we are able to close this gap and propose a complete characterization of all (i.e., degenerate and non-degenerate) LSI systems. Finally, we describe the corresponding spectral decompositions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Topology design to increase network lifetime in WSN for graph filtering in consensus processes.\n \n \n \n \n\n\n \n Ben Saad, L.; Weerasinghe, T.; and Beferull-Lozano, B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 351-355, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"TopologyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081227,\n  author = {L. {Ben Saad} and T. Weerasinghe and B. Beferull-Lozano},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Topology design to increase network lifetime in WSN for graph filtering in consensus processes},\n  year = {2017},\n  pages = {351-355},\n  abstract = {Graph filters, which are considered as the workhorses of graph signal analysis in the emerging field of signal processing on graphs, are useful for many applications such as distributed estimation in wireless sensor networks. Many of these tasks are based on basic distributed operators such as consensus, which are carried out by sensor devices under limited energy supply. To cope with the energy constraints, this paper focuses on designing the network topology in order to maximize the network lifetime and reduce the energy consumption when applying graph filters. The problem is a complex combinatorial problem and in this work, we propose two efficient heuristic algorithms for solving it. We show by simulations that they provide good performance in terms of the network lifetime and the total energy consumption of the filtering process.},\n  keywords = {energy consumption;filtering theory;graph theory;telecommunication network topology;telecommunication power management;wireless sensor networks;network lifetime;total energy consumption;filtering process;topology design;graph filtering;consensus processes;graph signal analysis;signal processing;distributed estimation;wireless sensor networks;basic distributed operators;sensor devices;energy supply;energy constraints;network topology;graph filters;Network topology;Topology;Signal processing algorithms;Wireless sensor networks;Genetic algorithms;Heuristic algorithms;Algorithm design and analysis},\n  doi = {10.23919/EUSIPCO.2017.8081227},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346811.pdf},\n}\n\n
\n
\n\n\n
\n Graph filters, which are considered as the workhorses of graph signal analysis in the emerging field of signal processing on graphs, are useful for many applications such as distributed estimation in wireless sensor networks. Many of these tasks are based on basic distributed operators such as consensus, which are carried out by sensor devices under limited energy supply. To cope with the energy constraints, this paper focuses on designing the network topology in order to maximize the network lifetime and reduce the energy consumption when applying graph filters. The problem is a complex combinatorial problem and in this work, we propose two efficient heuristic algorithms for solving it. We show by simulations that they provide good performance in terms of the network lifetime and the total energy consumption of the filtering process.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fractional graph-based semi-supervised learning.\n \n \n \n \n\n\n \n de Nigris , S.; Bautista, E.; Abry, P.; Avrachenkov, K.; and Goncalves, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 356-360, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"FractionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081228,\n  author = {S. {de Nigris} and E. Bautista and P. Abry and K. Avrachenkov and P. Goncalves},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Fractional graph-based semi-supervised learning},\n  year = {2017},\n  pages = {356-360},\n  abstract = {Graph-based semi-supervised learning for classification endorses a nice interpretation in terms of diffusive random walks, where the regularisation factor in the original optimisation formulation plays the role of a restarting probability. Recently, a new type of biased random walks for characterising certain dynamics on networks have been defined and rely on the γ-th power of the standard Laplacian matrix Lγ, with γ > 0. In particular, these processes embed long range transitions, the Levy flights, that are capable of one-step jumps between far-distant states (nodes) of the graph. The present contribution envisions to build upon these volatile random walks to propose a new version of graph based semi-supervised learning algorithms whose classification outcome could benefit from the dynamics induced by the fractional transition matrix.},\n  keywords = {graph theory;learning (artificial intelligence);matrix algebra;probability;random processes;regularisation factor;original optimisation formulation;far-distant states;volatile random walks;fractional transition matrix;standard Laplacian matrix;fractional graph-based semisupervised learning;Laplace equations;Standards;Semisupervised learning;Europe;Signal processing;Supervised learning;Probabilistic logic},\n  doi = {10.23919/EUSIPCO.2017.8081228},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347271.pdf},\n}\n\n
\n
\n\n\n
\n Graph-based semi-supervised learning for classification endorses a nice interpretation in terms of diffusive random walks, where the regularisation factor in the original optimisation formulation plays the role of a restarting probability. Recently, a new type of biased random walks for characterising certain dynamics on networks have been defined and rely on the γ-th power of the standard Laplacian matrix Lγ, with γ > 0. In particular, these processes embed long range transitions, the Levy flights, that are capable of one-step jumps between far-distant states (nodes) of the graph. The present contribution envisions to build upon these volatile random walks to propose a new version of graph based semi-supervised learning algorithms whose classification outcome could benefit from the dynamics induced by the fractional transition matrix.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed greedy sparse learning over doubly stochastic networks.\n \n \n \n \n\n\n \n Zaki, A.; Venkitaraman, A.; Chatterjee, S.; and Rasmussen, L. K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 361-364, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081229,\n  author = {A. Zaki and A. Venkitaraman and S. Chatterjee and L. K. Rasmussen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed greedy sparse learning over doubly stochastic networks},\n  year = {2017},\n  pages = {361-364},\n  abstract = {In this paper, we develop a greedy algorithm for sparse learning over a doubly stochastic network. In the proposed algorithm, nodes of the network perform sparse learning by exchanging their individual intermediate variables. The algorithm is iterative in nature. We provide a restricted isometry property (RIP)-based theoretical guarantee both on the performance of the algorithm and the number of iterations required for convergence. Using simulations, we show that the proposed algorithm provides good performance.},\n  keywords = {greedy algorithms;iterative methods;learning (artificial intelligence);stochastic processes;restricted isometry property;distributed greedy sparse learning;greedy algorithm;individual intermediate variables;doubly stochastic networks;Signal processing algorithms;Algorithm design and analysis;Greedy algorithms;Sparse matrices;Convergence;Signal processing;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081229},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347398.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we develop a greedy algorithm for sparse learning over a doubly stochastic network. In the proposed algorithm, nodes of the network perform sparse learning by exchanging their individual intermediate variables. The algorithm is iterative in nature. We provide a restricted isometry property (RIP)-based theoretical guarantee both on the performance of the algorithm and the number of iterations required for convergence. Using simulations, we show that the proposed algorithm provides good performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Signal processing on kernel-based random graphs.\n \n \n \n \n\n\n \n Morency, M. W.; and Leus, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 365-369, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SignalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081230,\n  author = {M. W. Morency and G. Leus},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Signal processing on kernel-based random graphs},\n  year = {2017},\n  pages = {365-369},\n  abstract = {We present the theory of sequences of random graphs and their convergence to limit objects. Sequences of random dense graphs are shown to converge to their limit objects in both their structural properties and their spectra. The limit objects are bounded symmetric functions on [0,1]2. The kernel functions define an equivalence class and thus identify collections of large random graphs who are spectrally and structurally equivalent. As the spectrum of the graph shift operator defines the graph Fourier transform (GFT), the behavior of the spectrum of the underlying graph has a great impact on the design and implementation of graph signal processing operators such as filters. The spectra of several graph limits are derived analytically and verified with numerical examples.},\n  keywords = {equivalence classes;filtering theory;Fourier transforms;graph theory;random processes;signal processing;graph shift operator;graph signal processing operators;graph limits;graph Fourier transform;equivalence class;kernel functions;bounded symmetric functions;graph spectra;structural properties;random dense graph sequences;kernel-based random graphs;Signal processing;Eigenvalues and eigenfunctions;Kernel;Mathematical model;Symmetric matrices;Europe;Convergence;Graph signal processing;random graphs;graph limits;graphon},\n  doi = {10.23919/EUSIPCO.2017.8081230},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347446.pdf},\n}\n\n
\n
\n\n\n
\n We present the theory of sequences of random graphs and their convergence to limit objects. Sequences of random dense graphs are shown to converge to their limit objects in both their structural properties and their spectra. The limit objects are bounded symmetric functions on [0,1]2. The kernel functions define an equivalence class and thus identify collections of large random graphs who are spectrally and structurally equivalent. As the spectrum of the graph shift operator defines the graph Fourier transform (GFT), the behavior of the spectrum of the underlying graph has a great impact on the design and implementation of graph signal processing operators such as filters. The spectra of several graph limits are derived analytically and verified with numerical examples.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph spectral domain feature representation for in-air drawn number recognition.\n \n \n \n \n\n\n \n Alwaely, B.; and Abhayaratne, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 370-374, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"GraphPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081231,\n  author = {B. Alwaely and C. Abhayaratne},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Graph spectral domain feature representation for in-air drawn number recognition},\n  year = {2017},\n  pages = {370-374},\n  abstract = {The emerging field of graph signal processing has brought new scope in understanding the spectral properties of arbitrary structures. This paper proposes a novel graph spectral domain feature representation scheme for recognising in-air drawn numbers. It provides the solution by forming the hand's path as a graph and extracting its features based on the spectral domain representation by computing the graph spectral transform. A novel graph generation model is proposed to form the topology of the shapes of numbers. The experiments show that the proposed features are flip and rotation-invariant which makes insensitive to changes in the rotation angle of the drawn numbers. The proposed solution achieves a high level of accuracy of nearly 98% for in-air hand drawn number recognition.},\n  keywords = {feature extraction;graph theory;image representation;spectral analysis;topology;spectral domain representation;in-air hand drawn number recognition;graph signal processing;graph spectral domain feature representation;arbitrary structure spectral properties;graph generation model;topology;rotation-invariant feature;Feature extraction;Laplace equations;Spectral analysis;Gesture recognition;Europe;Shape},\n  doi = {10.23919/EUSIPCO.2017.8081231},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347834.pdf},\n}\n\n
\n
\n\n\n
\n The emerging field of graph signal processing has brought new scope in understanding the spectral properties of arbitrary structures. This paper proposes a novel graph spectral domain feature representation scheme for recognising in-air drawn numbers. It provides the solution by forming the hand's path as a graph and extracting its features based on the spectral domain representation by computing the graph spectral transform. A novel graph generation model is proposed to form the topology of the shapes of numbers. The experiments show that the proposed features are flip and rotation-invariant which makes insensitive to changes in the rotation angle of the drawn numbers. The proposed solution achieves a high level of accuracy of nearly 98% for in-air hand drawn number recognition.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph adjacency matrix learning for irregularly sampled Markovian natural images.\n \n \n \n \n\n\n \n Colonnese, S.; Biagi, M.; Cusani, R.; and Scarano, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 375-379, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"GraphPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081232,\n  author = {S. Colonnese and M. Biagi and R. Cusani and G. Scarano},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Graph adjacency matrix learning for irregularly sampled Markovian natural images},\n  year = {2017},\n  pages = {375-379},\n  abstract = {The boost of signal processing on graph has recently solicited research on the problem of identifying (learning) the graph underlying the observed signal values according to given criteria, such as graph smoothness or graph sparsity. This paper proposes a procedure for learning the adjacency matrix of a graph providing support to a set of irregularly sampled image values. Our approach to the graph adjacency matrix learning takes into account both the image luminance and the spatial samples' distances, and leads to a flexible and computationally light parametric procedure. We show that, under mild conditions, the proposed procedure identifies a near optimal graph for Markovian fields; specifically, the links identified by the learning procedure minimize the potential energy of the Markov random field for the signal samples under concern. We also show, by numerical simulations, that the learned adjacency matrix leads to a higly compact spectral wavelet graph transform of the so obtained signal on graph and favourably compares to state-of-the-art graph learning procedures, definetly matching the intrinsic signal structure.},\n  keywords = {graph theory;image sampling;Markov processes;matrix algebra;pattern classification;learned adjacency matrix;higly compact spectral wavelet graph;graph adjacency matrix learning;irregularly sampled Markovian natural images;signal processing;observed signal values;graph smoothness;irregularly sampled image values;spatial samples;computationally light parametric procedure;learning procedure;Markov random field;intrinsic signal structure;Potential energy;Signal processing;Laplace equations;Europe;Wavelet transforms;Markov random fields},\n  doi = {10.23919/EUSIPCO.2017.8081232},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347111.pdf},\n}\n\n
\n
\n\n\n
\n The boost of signal processing on graph has recently solicited research on the problem of identifying (learning) the graph underlying the observed signal values according to given criteria, such as graph smoothness or graph sparsity. This paper proposes a procedure for learning the adjacency matrix of a graph providing support to a set of irregularly sampled image values. Our approach to the graph adjacency matrix learning takes into account both the image luminance and the spatial samples' distances, and leads to a flexible and computationally light parametric procedure. We show that, under mild conditions, the proposed procedure identifies a near optimal graph for Markovian fields; specifically, the links identified by the learning procedure minimize the potential energy of the Markov random field for the signal samples under concern. We also show, by numerical simulations, that the learned adjacency matrix leads to a higly compact spectral wavelet graph transform of the so obtained signal on graph and favourably compares to state-of-the-art graph learning procedures, definetly matching the intrinsic signal structure.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Defending surveillance sensor networks against data-injection attacks via trusted nodes.\n \n \n \n \n\n\n \n López-Valcarce, R.; and Romero, D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 380-384, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DefendingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081233,\n  author = {R. López-Valcarce and D. Romero},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Defending surveillance sensor networks against data-injection attacks via trusted nodes},\n  year = {2017},\n  pages = {380-384},\n  abstract = {By injecting false data through compromised sensors, an adversary can drive the probability of detection in a sensor network-based spatial field surveillance system to arbitrarily low values. As a countermeasure, a small subset of sensors may be secured. Leveraging the theory of Matched Subspace Detection, we propose and evaluate several detectors that add robustness to attacks when such trusted nodes are available. Our results reveal the performance-security tradeoff of these schemes and can be used to determine the number of trusted nodes required for a given performance target.},\n  keywords = {telecommunication security;trusted computing;video surveillance;wireless sensor networks;wireless sensor networks;matched subspace detection;performance-security tradeoff;spatial field surveillance system;trusted nodes;data-injection attacks;surveillance sensor networks;Robustness;Interference;Europe;Signal processing;Surveillance;Detectors;Adversarial signal processing;Byzantine sensors;cyber security;sensor networks},\n  doi = {10.23919/EUSIPCO.2017.8081233},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346729.pdf},\n}\n\n
\n
\n\n\n
\n By injecting false data through compromised sensors, an adversary can drive the probability of detection in a sensor network-based spatial field surveillance system to arbitrarily low values. As a countermeasure, a small subset of sensors may be secured. Leveraging the theory of Matched Subspace Detection, we propose and evaluate several detectors that add robustness to attacks when such trusted nodes are available. Our results reveal the performance-security tradeoff of these schemes and can be used to determine the number of trusted nodes required for a given performance target.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Channel estimation and training design for hybrid multi-carrier MmWave massive MIMO systems: The beamspace ESPRIT approach.\n \n \n \n \n\n\n \n Zhang, J.; and Haardt, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 385-389, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ChannelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081234,\n  author = {J. Zhang and M. Haardt},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Channel estimation and training design for hybrid multi-carrier MmWave massive MIMO systems: The beamspace ESPRIT approach},\n  year = {2017},\n  pages = {385-389},\n  abstract = {In this paper, we study the channel estimation problem for a cyclic prefix OFDM (CP-OFDM) based millimeter wave (mmWave) hybrid analog-digital MIMO system, where the analog processing is achieved using only phase shift networks. A three-dimensional (3-D) Standard ESPRIT in discrete Fourier transform (DFT) beamspace approach is developed to estimate the unknown frequency-selective channel. The required training protocol, analog precoding and decoding matrices, as well as pilot patterns are also discussed. Simulation results show that the proposed 3-D Standard ESPRIT in DFT beamspace based channel estimation algorithm provides accurate channel estimates when there is a sufficient number of snapshots.},\n  keywords = {channel estimation;discrete Fourier transforms;MIMO communication;OFDM modulation;precoding;3-D standard ESPRIT;beamspace ESPRIT;frequency-selective channel;discrete Fourier transforms;channel estimation;DFT beamspace based channel estimation algorithm;decoding matrices;analog precoding;phase shift networks;analog processing;millimeter wave hybrid analog-digital MIMO system;CP-OFDM;cyclic prefix OFDM;hybrid multicarrier MmWave massive MIMO systems;Channel estimation;Training;OFDM;MIMO;Discrete Fourier transforms;Frequency estimation;Precoding;MmWave Massive MIMO;hybrid precoding and decoding;multi-dimensional harmonic retrieval;Standard ESPRIT in DFT beamspace},\n  doi = {10.23919/EUSIPCO.2017.8081234},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347815.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we study the channel estimation problem for a cyclic prefix OFDM (CP-OFDM) based millimeter wave (mmWave) hybrid analog-digital MIMO system, where the analog processing is achieved using only phase shift networks. A three-dimensional (3-D) Standard ESPRIT in discrete Fourier transform (DFT) beamspace approach is developed to estimate the unknown frequency-selective channel. The required training protocol, analog precoding and decoding matrices, as well as pilot patterns are also discussed. Simulation results show that the proposed 3-D Standard ESPRIT in DFT beamspace based channel estimation algorithm provides accurate channel estimates when there is a sufficient number of snapshots.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modal beamforming for small circular arrays of particle velocity sensors.\n \n \n \n \n\n\n \n Gur, B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 390-394, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ModalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081235,\n  author = {B. Gur},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Modal beamforming for small circular arrays of particle velocity sensors},\n  year = {2017},\n  pages = {390-394},\n  abstract = {Vector sensors are directional receivers that measure the vectorial particle velocity associated with an acoustic wave rather than the scalar pressure. Therefore, arrays of vector sensors possess some desirable directional properties compared to conventional arrays of pressure sensors. In this paper, a modal beamformer for circular arrays of 1-D acoustic vectors sensors are presented. Arrays of both radially and circumferentially oriented vector sensors are considered. It is shown that the highly directional modes of the acoustic velocity field can be extracted from the sensor measurements using the spatial Fourier transform. These modes are weighted and combined to form narrow steerable beams. The highest order of mode that can be extracted is limited by the number of vector sensors utilized in the array. Theoretical analysis and numerical simulations indicate that the proposed modal beamformer attains the same directivity performance as that of circular pressure sensor array beamformers but outperforms them in terms of white noise gain. In addition, it uses half the number of sensors to achieve the same directivity performance of a circular vector sensor array modal beamformer reported previously in the literature. The proposed method is suitable for in-air and underwater low frequencies array processing applications.},\n  keywords = {array signal processing;Fourier transforms;particle velocity analysis;pressure sensors;sensor arrays;underwater acoustic communication;underwater low frequencies array processing applications;particle velocity sensors;directional receivers;vectorial particle velocity;radially oriented vector sensors;circumferentially oriented vector sensors;circular pressure sensor array beamformers;circular vector sensor array modal beamformer;1D acoustic vectors sensors;vector sensors;pressure sensor arrays;spatial Fourier transform;Sensor arrays;Acoustics;Velocity measurement;Acoustic arrays;Pressure sensors;Array signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081235},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347472.pdf},\n}\n\n
\n
\n\n\n
\n Vector sensors are directional receivers that measure the vectorial particle velocity associated with an acoustic wave rather than the scalar pressure. Therefore, arrays of vector sensors possess some desirable directional properties compared to conventional arrays of pressure sensors. In this paper, a modal beamformer for circular arrays of 1-D acoustic vectors sensors are presented. Arrays of both radially and circumferentially oriented vector sensors are considered. It is shown that the highly directional modes of the acoustic velocity field can be extracted from the sensor measurements using the spatial Fourier transform. These modes are weighted and combined to form narrow steerable beams. The highest order of mode that can be extracted is limited by the number of vector sensors utilized in the array. Theoretical analysis and numerical simulations indicate that the proposed modal beamformer attains the same directivity performance as that of circular pressure sensor array beamformers but outperforms them in terms of white noise gain. In addition, it uses half the number of sensors to achieve the same directivity performance of a circular vector sensor array modal beamformer reported previously in the literature. The proposed method is suitable for in-air and underwater low frequencies array processing applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Thinned coprime arrays for DOA estimation.\n \n \n \n \n\n\n \n Raza, A.; Liu, W.; and Shen, Q.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 395-399, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ThinnedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081236,\n  author = {A. Raza and W. Liu and Q. Shen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Thinned coprime arrays for DOA estimation},\n  year = {2017},\n  pages = {395-399},\n  abstract = {Sparse arrays can generate a larger aperture than traditional uniform linear arrays (ULA) and offer enhanced degrees-of-freedom (DOFs) which can be exploited in both beamforming and direction-of-arrival (DOA) estimation. One class of sparse arrays is the coprime array, composed of two uniform linear subarrays which yield an effective difference co-array with higher number of DOFs. In this work, we present a new coprime array structure termed thinned coprime array (TCA), which exploits the redundancy in the structure of the existing coprime array and achieves the same virtual aperture and DOFs as the conventional coprime array with much fewer number of sensors. An analysis of the DOFs provided by the new structure in comparison with other sparse arrays is provided and simulation results for DOA estimation using the compressive sensing based method are provided.},\n  keywords = {array signal processing;compressed sensing;direction-of-arrival estimation;thinned coprime array;DOA estimation;sparse arrays;direction-of-arrival estimation;uniform linear subarrays;coprime array structure;uniform linear arrays;DOF;difference co-array;Sensor arrays;Indexes;Direction-of-arrival estimation;Estimation;Array signal processing;Thinned coprime array;DOA estimation;degrees of freedom;difference co-array},\n  doi = {10.23919/EUSIPCO.2017.8081236},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347059.pdf},\n}\n\n
\n
\n\n\n
\n Sparse arrays can generate a larger aperture than traditional uniform linear arrays (ULA) and offer enhanced degrees-of-freedom (DOFs) which can be exploited in both beamforming and direction-of-arrival (DOA) estimation. One class of sparse arrays is the coprime array, composed of two uniform linear subarrays which yield an effective difference co-array with higher number of DOFs. In this work, we present a new coprime array structure termed thinned coprime array (TCA), which exploits the redundancy in the structure of the existing coprime array and achieves the same virtual aperture and DOFs as the conventional coprime array with much fewer number of sensors. An analysis of the DOFs provided by the new structure in comparison with other sparse arrays is provided and simulation results for DOA estimation using the compressive sensing based method are provided.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint frequency and 2-D DOA recovery with sub-Nyquist difference space-time array.\n \n \n \n \n\n\n \n Kumar, A. A.; Chandra, M. G.; and Balamuralidhar, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 400-404, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081237,\n  author = {A. A. Kumar and M. G. Chandra and P. Balamuralidhar},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Joint frequency and 2-D DOA recovery with sub-Nyquist difference space-time array},\n  year = {2017},\n  pages = {400-404},\n  abstract = {In this paper, joint frequency and 2-D direction of arrival (DOA) estimation at sub-Nyquist sampling rates of a multi-band signal (MBS) comprising of P disjoint narrowband signals is considered. Beginning with a standard uniform rectangular array (URA) consisting of M = Mx × My sensors, this paper proposes a simpler modification by adding a N - 1 delay channel network to only one of the sensor. A larger array is then formed by combining the sub-Nyquist sampled outputs of URA and the delay channel network, referred to as the difference space-time (DST) array. Towards estimating the joint frequency and 2-D DOA on this DST array, a new method utilizing the 3-D spatial smoothing for rank enhancement and a subspace algorithm based on ESPRIT is presented. Furthermore, it is shown that an ADC sampling frequency of fs ≥ B suffices, where B is the bandwidth of the narrow-band signal. With the proposed approach, it is shown that O(MN/4) frequencies and their 2-D DOAs can be estimated even when all frequencies alias to the same frequency due to sub-Nyquist sampling. Appropriate simulation results are also presented to corroborate these findings.},\n  keywords = {analogue-digital conversion;array signal processing;direction-of-arrival estimation;signal sampling;sub-Nyquist sampling rates;DST array;ADC sampling frequency;narrow-band signal;2-D DOA recovery;sub-Nyquist difference space-time array;multiband signal;2-D direction of arrival estimation;disjoint narrowband signals;standard uniform rectangular array;delay channel network;joint frequency and 2D DOA;3D spatial smoothing;rank enhancement;ESPRIT;subspace algorithm;Sensor arrays;Delays;Frequency estimation;Direction-of-arrival estimation;Estimation;Covariance matrices;Joint frequency-direction of arrival estimation;sub-Nyquist sampling;Space-time array;ESPRIT;Uniform rectangular array;Multiple-delay architecture},\n  doi = {10.23919/EUSIPCO.2017.8081237},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346858.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, joint frequency and 2-D direction of arrival (DOA) estimation at sub-Nyquist sampling rates of a multi-band signal (MBS) comprising of P disjoint narrowband signals is considered. Beginning with a standard uniform rectangular array (URA) consisting of M = Mx × My sensors, this paper proposes a simpler modification by adding a N - 1 delay channel network to only one of the sensor. A larger array is then formed by combining the sub-Nyquist sampled outputs of URA and the delay channel network, referred to as the difference space-time (DST) array. Towards estimating the joint frequency and 2-D DOA on this DST array, a new method utilizing the 3-D spatial smoothing for rank enhancement and a subspace algorithm based on ESPRIT is presented. Furthermore, it is shown that an ADC sampling frequency of fs ≥ B suffices, where B is the bandwidth of the narrow-band signal. With the proposed approach, it is shown that O(MN/4) frequencies and their 2-D DOAs can be estimated even when all frequencies alias to the same frequency due to sub-Nyquist sampling. Appropriate simulation results are also presented to corroborate these findings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Filtered multitone multicarrier modulation with partially overlapping sub-channels.\n \n \n \n \n\n\n \n Shao, K.; Pi, L.; Yli-Kaakinen, J.; and Renfors, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 405-409, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"FilteredPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081238,\n  author = {K. Shao and L. Pi and J. Yli-Kaakinen and M. Renfors},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Filtered multitone multicarrier modulation with partially overlapping sub-channels},\n  year = {2017},\n  pages = {405-409},\n  abstract = {Future wireless networks demand multicarrier modulation schemes with improved spectrum efficiency and superior spectrum containment. Orthogonal frequency division multiplexing (OFDM) has been the favorite technique in recent developments, but due to its limited spectrum containment, various alternative schemes are under consideration for future systems. Theoretically, it is not possible to reach maximum spectrum efficiency, high spectral containment, and orthogonality of subcarriers simultaneously, when using quadrature amplitude modulation (QAM) for subcarriers. This has motivated the study of non-orthogonal multicarrier modulation schemes. This paper focuses on the filtered multitone (FMT) scheme, one of the classical configurations of filter bank multicarrier (FBMC) modulation utilizing QAM subcarrier symbols. Our main aim is to improve the spectral efficiency of FMT by introducing controlled overlap of adjacent subchannels. An analytical model is developed for evaluating the tradeoffs between spectrum efficiency and intercarrier interference (ICI) introduced by the overlap. An efficient fast convolution waveform processing scheme is adopted for the generation of the proposed waveform. It allows effective adjustment of the roll-off and subcarrier spacing to facilitate waveform adaptation in real time. Analytical studies, confirmed by simulation results, indicate that the proposed FMT system can obtain significant spectral density improvement without requiring additional ICI cancellation techniques.},\n  keywords = {channel bank filters;intercarrier interference;interference suppression;OFDM modulation;quadrature amplitude modulation;filtered multitone multicarrier modulation;partially overlapping sub-channels;quadrature amplitude modulation;nonorthogonal multicarrier modulation schemes;filtered multitone scheme;filter bank multicarrier modulation;QAM subcarrier symbols;intercarrier interference;subcarrier spacing;FMT system;orthogonal frequency division multiplexing;fast convolution waveform processing scheme;Interference;OFDM;Quadrature amplitude modulation;Prototypes;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081238},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346804.pdf},\n}\n\n
\n
\n\n\n
\n Future wireless networks demand multicarrier modulation schemes with improved spectrum efficiency and superior spectrum containment. Orthogonal frequency division multiplexing (OFDM) has been the favorite technique in recent developments, but due to its limited spectrum containment, various alternative schemes are under consideration for future systems. Theoretically, it is not possible to reach maximum spectrum efficiency, high spectral containment, and orthogonality of subcarriers simultaneously, when using quadrature amplitude modulation (QAM) for subcarriers. This has motivated the study of non-orthogonal multicarrier modulation schemes. This paper focuses on the filtered multitone (FMT) scheme, one of the classical configurations of filter bank multicarrier (FBMC) modulation utilizing QAM subcarrier symbols. Our main aim is to improve the spectral efficiency of FMT by introducing controlled overlap of adjacent subchannels. An analytical model is developed for evaluating the tradeoffs between spectrum efficiency and intercarrier interference (ICI) introduced by the overlap. An efficient fast convolution waveform processing scheme is adopted for the generation of the proposed waveform. It allows effective adjustment of the roll-off and subcarrier spacing to facilitate waveform adaptation in real time. Analytical studies, confirmed by simulation results, indicate that the proposed FMT system can obtain significant spectral density improvement without requiring additional ICI cancellation techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low complexity channel shortening for discrete multitone modulation systems.\n \n \n \n \n\n\n \n Nguyen, V. M.; and Tomasin, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 410-414, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081239,\n  author = {V. M. Nguyen and S. Tomasin},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Low complexity channel shortening for discrete multitone modulation systems},\n  year = {2017},\n  pages = {410-414},\n  abstract = {In discrete multitone (DMT) modulation systems, the channel duration can be longer than the cyclic prefix, yielding both inter-block and inter-carrier interference. In order to avoid these effects, time-domain equalization (TEQ) techniques are usually employed before DMT demodulation. This letter introduces a modification of the iterative minimum delay spread (MDS) method proposed by Lopez-Valcarce in [1] to adapt the TEQ coefficients to the channel. The proposed solution is based on a) the approximation to the solution of a set of equation, and b) a new reference time selection. The resulting scheme requires only a single matrix inversion at the last iteration rather than one per iteration, achieving a significantly lower implementation complexity than iterative MDS, while performing similarly in terms of spectral efficiency.},\n  keywords = {equalisers;intercarrier interference;iterative methods;matrix inversion;modulation;spread spectrum communication;wireless channels;low complexity channel shortening;discrete multitone modulation systems;cyclic prefix;time-domain equalization techniques;DMT demodulation;iterative minimum delay spread method;Lopez-Valcarce;reference time selection;intercarrier interference;single matrix inversion;Interference;Signal to noise ratio;Complexity theory;Discrete Fourier transforms;Iterative methods;Europe;Digital Communications;Discrete Multitone;Time-Domain Equalization;Subscriber Loops},\n  doi = {10.23919/EUSIPCO.2017.8081239},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341873.pdf},\n}\n\n
\n
\n\n\n
\n In discrete multitone (DMT) modulation systems, the channel duration can be longer than the cyclic prefix, yielding both inter-block and inter-carrier interference. In order to avoid these effects, time-domain equalization (TEQ) techniques are usually employed before DMT demodulation. This letter introduces a modification of the iterative minimum delay spread (MDS) method proposed by Lopez-Valcarce in [1] to adapt the TEQ coefficients to the channel. The proposed solution is based on a) the approximation to the solution of a set of equation, and b) a new reference time selection. The resulting scheme requires only a single matrix inversion at the last iteration rather than one per iteration, achieving a significantly lower implementation complexity than iterative MDS, while performing similarly in terms of spectral efficiency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An online algorithm for joint long-term base station activation and beamforming in green downlink MISO networks.\n \n \n \n \n\n\n \n Lin, J.; Li, Q.; and Deng, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 415-419, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081240,\n  author = {J. Lin and Q. Li and H. Deng},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An online algorithm for joint long-term base station activation and beamforming in green downlink MISO networks},\n  year = {2017},\n  pages = {415-419},\n  abstract = {In modern wireless networks, optimizing the association between base stations (BSs) and users effectively improves network performance. On the other hand, a frequently changing BS-user association renders considerable operational burden for network management, e.g., it consumes extra power to awaken the deactivated BSs and to support users' switching among BSs. This motivates us to balance the flexibility and stability of BS-user association, leading to a long-term BS association problem. In this paper, we address this by considering the green communication problem in a MISO network where multiple BSs cooperatively forward information to multiple users. We minimize total power consumption (including not only the transmit power and device maintenance power, but also the switching power consumed as the BS-user association changes) by joint long-term BS activation and beamforming, i.e., we jointly optimize the active BSs, the transmit beamformer and the user switching frequency among BSs. Due to the inherent causality constraints on channel state information (CSI), we develop an online algorithm for the optimization problem, utilizing the sample average approximation (SAA) method. To improve the algorithm's efficiency, we further fit the problem into the framework of alternating direction method of multipliers (ADMM), and finally design a low-complexity distributed online algorithm for the green communication problem.},\n  keywords = {array signal processing;cooperative communication;integer programming;iterative methods;radio networks;telecommunication control;telecommunication network management;telecommunication power management;telecommunication switching;long-term base station activation;green downlink MISO networks;network management;MISO network;total power consumption;transmit power;device maintenance power;switching power;long-term BS activation;transmit beamformer;Switches;Optimization;Array signal processing;Maintenance engineering;Signal processing algorithms;Approximation algorithms;Quality of service},\n  doi = {10.23919/EUSIPCO.2017.8081240},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341616.pdf},\n}\n\n
\n
\n\n\n
\n In modern wireless networks, optimizing the association between base stations (BSs) and users effectively improves network performance. On the other hand, a frequently changing BS-user association renders considerable operational burden for network management, e.g., it consumes extra power to awaken the deactivated BSs and to support users' switching among BSs. This motivates us to balance the flexibility and stability of BS-user association, leading to a long-term BS association problem. In this paper, we address this by considering the green communication problem in a MISO network where multiple BSs cooperatively forward information to multiple users. We minimize total power consumption (including not only the transmit power and device maintenance power, but also the switching power consumed as the BS-user association changes) by joint long-term BS activation and beamforming, i.e., we jointly optimize the active BSs, the transmit beamformer and the user switching frequency among BSs. Due to the inherent causality constraints on channel state information (CSI), we develop an online algorithm for the optimization problem, utilizing the sample average approximation (SAA) method. To improve the algorithm's efficiency, we further fit the problem into the framework of alternating direction method of multipliers (ADMM), and finally design a low-complexity distributed online algorithm for the green communication problem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Joint channel estimation / data detection in MIMO-FBMC/OQAM systems — A tensor-based approach.\n \n \n \n\n\n \n Kofidis, E.; Chatzichristos, C.; and de Almeida , A. L. F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 420-424, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081241,\n  author = {E. Kofidis and C. Chatzichristos and A. L. F. {de Almeida}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Joint channel estimation / data detection in MIMO-FBMC/OQAM systems — A tensor-based approach},\n  year = {2017},\n  pages = {420-424},\n  abstract = {Filter bank-based multicarrier (FBMC) systems have been considered as a prevalent candidate for replacing the long established cyclic prefix (CP)-based orthogonal frequency division multiplexing (CP-OFDM) in the physical layer of next generation communications systems. In particular, offset quadrature amplitude modulation (OQAM)-based FBMC has received increasing attention due to, among other features, its potential for maximum spectral efficiency. It suffers, however, from an intrinsic self-interference effect, which complicates signal processing tasks at the receiver, including synchronization, channel estimation and equalization. In a multiple-input multiple-output (MIMO) configuration, the multi-antenna interference has also to be taken into account. (Semi-)blind FBMC/OQAM receivers have been little studied so far and mainly for single-antenna systems. The problem of joint channel estimation and data detection in a MIMO-FBMC/OQAM system, given limited or no training information, is studied in this paper through a tensor-based viewpoint in the light of the success of such techniques in OFDM applications. Simulation-based comparisons with CP-OFDM are included, for realistic transmission models.},\n  keywords = {channel bank filters;channel estimation;MIMO communication;OFDM modulation;quadrature amplitude modulation;next generation communications systems;orthogonal frequency division multiplexing;data detection;quadrature amplitude modulation;cyclic prefix;tensor;channel estimation;single-antenna systems;multiantenna interference;equalization;intrinsic self-interference effect;CP-OFDM;multicarrier systems;MIMO-FBMC/OQAM system;Channel estimation;OFDM;Receivers;Tensile stress;MIMO;Interference;Synchronization},\n  doi = {10.23919/EUSIPCO.2017.8081241},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Filter bank-based multicarrier (FBMC) systems have been considered as a prevalent candidate for replacing the long established cyclic prefix (CP)-based orthogonal frequency division multiplexing (CP-OFDM) in the physical layer of next generation communications systems. In particular, offset quadrature amplitude modulation (OQAM)-based FBMC has received increasing attention due to, among other features, its potential for maximum spectral efficiency. It suffers, however, from an intrinsic self-interference effect, which complicates signal processing tasks at the receiver, including synchronization, channel estimation and equalization. In a multiple-input multiple-output (MIMO) configuration, the multi-antenna interference has also to be taken into account. (Semi-)blind FBMC/OQAM receivers have been little studied so far and mainly for single-antenna systems. The problem of joint channel estimation and data detection in a MIMO-FBMC/OQAM system, given limited or no training information, is studied in this paper through a tensor-based viewpoint in the light of the success of such techniques in OFDM applications. Simulation-based comparisons with CP-OFDM are included, for realistic transmission models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unmixing dynamic PET images with a PALM algorithm.\n \n \n \n \n\n\n \n Cavalcanti, Y. C.; Oberlin, T.; Dobigeon, N.; and Tauber, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 425-429, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"UnmixingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081242,\n  author = {Y. C. Cavalcanti and T. Oberlin and N. Dobigeon and C. Tauber},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Unmixing dynamic PET images with a PALM algorithm},\n  year = {2017},\n  pages = {425-429},\n  abstract = {Unmixing is a ubiquitous task in hyperspectral image analysis which consists in jointly extracting typical spectral signatures and estimating their respective proportions in the voxels, providing an explicit spatial mapping of these elementary signatures over the observed scene. Inspired by this approach, this paper aims at proposing a new framework for analyzing dynamic positron emission tomography (PET) images. More precisely, a PET-dedicated mixing model and an associated unmixing algorithm are derived to jointly estimate time-activity curves (TAC) characterizing each type of tissues, and the proportions of those tissues in the voxels of the imaged brain. In particular, the TAC corresponding to the specific binding class is expected to be voxel-wise dependent. The proposed approach allows this intrinsic spatial variability to be properly modeled, mitigated and quantified. Finally, the main contributions of the paper are twofold: first, we demonstrate that the unmixing concept is an appropriate analysis tool for dynamic PET images; and second, we propose a novel unmixing algorithm allowing for variability, which significantly improves the analysis and interpretation of dynamic PET images when compared with state-of-the-art unmixing algorithms.},\n  keywords = {biological tissues;brain;medical image processing;positron emission tomography;unmixing dynamic PET images;PALM algorithm;ubiquitous task;hyperspectral image analysis;dynamic positron emission tomography images;tissues;brain imaging;time-activity curve estimation;positron emission tomography;Signal processing algorithms;Positron emission tomography;Heuristic algorithms;Principal component analysis;Algorithm design and analysis;Optimization;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081242},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342829.pdf},\n}\n\n
\n
\n\n\n
\n Unmixing is a ubiquitous task in hyperspectral image analysis which consists in jointly extracting typical spectral signatures and estimating their respective proportions in the voxels, providing an explicit spatial mapping of these elementary signatures over the observed scene. Inspired by this approach, this paper aims at proposing a new framework for analyzing dynamic positron emission tomography (PET) images. More precisely, a PET-dedicated mixing model and an associated unmixing algorithm are derived to jointly estimate time-activity curves (TAC) characterizing each type of tissues, and the proportions of those tissues in the voxels of the imaged brain. In particular, the TAC corresponding to the specific binding class is expected to be voxel-wise dependent. The proposed approach allows this intrinsic spatial variability to be properly modeled, mitigated and quantified. Finally, the main contributions of the paper are twofold: first, we demonstrate that the unmixing concept is an appropriate analysis tool for dynamic PET images; and second, we propose a novel unmixing algorithm allowing for variability, which significantly improves the analysis and interpretation of dynamic PET images when compared with state-of-the-art unmixing algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Effect of a humorous audio-visual stimulus on autonomic nervous system and heart of females.\n \n \n \n \n\n\n \n Pande, K.; Nayak, S. K.; Gaur, D.; Ray, S. S.; Pal, K.; Patel, S. J.; Anis, A.; and Mohapatra, B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 430-433, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"EffectPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081243,\n  author = {K. Pande and S. K. Nayak and D. Gaur and S. S. Ray and K. Pal and S. J. Patel and A. Anis and B. Mohapatra},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Effect of a humorous audio-visual stimulus on autonomic nervous system and heart of females},\n  year = {2017},\n  pages = {430-433},\n  abstract = {This study was an attempt to recognize the effect of a humorous audio-visual stimulus on the autonomic nervous system (ANS) and the heart physiology of females. Electrocardiogram (ECG) signals were acquired from eleven female volunteers under pre- and post-stimulus conditions. Heart rate variability (HRV) and time-domain ECG analyses were performed to non-invasively realize the effect of the humorous audio-visual stimulus on the ANS and heart physiology, respectively. HRV analysis suggested an increase in the parasympathetic activity during post-stimulus period. Time-domain analysis of ECG signals suggested a post-stimulus alteration in the electrical activity of the heart. Artificial neural network (ANN) classification resulted in an efficiency of ≥ 85% in both HRV and time-domain ECG analyses.},\n  keywords = {electrocardiography;medical signal processing;neural nets;neurophysiology;time-domain analysis;audio-visual stimulus;autonomic nervous system;females;heart physiology;electrocardiogram signals;female volunteers;post-stimulus conditions;post-stimulus period;ECG signals;post-stimulus alteration;HRV analysis;heart rate variability;time-domain ECG analysis;artificial neural network;ANN classification;Electrocardiography;Heart rate variability;Time-domain analysis;Physiology;Feature extraction;ECG;HRV;females;ANN;heart},\n  doi = {10.23919/EUSIPCO.2017.8081243},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347699.pdf},\n}\n\n
\n
\n\n\n
\n This study was an attempt to recognize the effect of a humorous audio-visual stimulus on the autonomic nervous system (ANS) and the heart physiology of females. Electrocardiogram (ECG) signals were acquired from eleven female volunteers under pre- and post-stimulus conditions. Heart rate variability (HRV) and time-domain ECG analyses were performed to non-invasively realize the effect of the humorous audio-visual stimulus on the ANS and heart physiology, respectively. HRV analysis suggested an increase in the parasympathetic activity during post-stimulus period. Time-domain analysis of ECG signals suggested a post-stimulus alteration in the electrical activity of the heart. Artificial neural network (ANN) classification resulted in an efficiency of ≥ 85% in both HRV and time-domain ECG analyses.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Triaxial rehabilitative data analysis incorporating matching pursuit.\n \n \n \n \n\n\n \n Lee, T. K. M.; Leo, K.; Sanei, S.; Chew, E.; and Zhao, L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 434-438, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"TriaxialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081244,\n  author = {T. K. M. Lee and K. Leo and S. Sanei and E. Chew and L. Zhao},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Triaxial rehabilitative data analysis incorporating matching pursuit},\n  year = {2017},\n  pages = {434-438},\n  abstract = {The continuing drive for better rehabilitative healthcare hinges on the availability of sensor data which can be shared and analysed. This leverages on the widespread communications network to provide an integrated health management environment. For this paper, we delineate our current work in sensorizing rehabilitative tests of upper limb movements. Where previously we applied data driven analysis, we now employ time-frequency methods to provide a better analytical basis for our derivations. The use of Matching Pursuit algorithm in biological signals has concentrated on brain signals and much less on human motion. Thus we contribute to efficacy of the algorithm by employing it on rehabilitative data collected from widely available sensors. We describe how we obtained the parameters based on pre-analysing an available data set. By selecting the most useful signal constituents and applying this to signal denoising, we are able to better classify the condition of a patient automatically - which shows encouraging promise in the quest for integrative healthcare.},\n  keywords = {brain;data analysis;feature extraction;health care;iterative methods;medical signal processing;patient rehabilitation;signal denoising;time-frequency analysis;sensor data;widespread communications network;integrated health management environment;rehabilitative tests;upper limb movements;data driven analysis;time-frequency methods;Matching Pursuit algorithm;biological signals;brain signals;integrative healthcare;continuing drive;rehabilitative healthcare hinges;signal constituents;triaxial rehabilitative data analysis;Matching pursuit algorithms;Instruments;Accelerometers;Force sensors;Monitoring;Force;Dictionaries;Matching pursuit;rehabilitation;accelerometer;instrumented objects},\n  doi = {10.23919/EUSIPCO.2017.8081244},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347474.pdf},\n}\n\n
\n
\n\n\n
\n The continuing drive for better rehabilitative healthcare hinges on the availability of sensor data which can be shared and analysed. This leverages on the widespread communications network to provide an integrated health management environment. For this paper, we delineate our current work in sensorizing rehabilitative tests of upper limb movements. Where previously we applied data driven analysis, we now employ time-frequency methods to provide a better analytical basis for our derivations. The use of Matching Pursuit algorithm in biological signals has concentrated on brain signals and much less on human motion. Thus we contribute to efficacy of the algorithm by employing it on rehabilitative data collected from widely available sensors. We describe how we obtained the parameters based on pre-analysing an available data set. By selecting the most useful signal constituents and applying this to signal denoising, we are able to better classify the condition of a patient automatically - which shows encouraging promise in the quest for integrative healthcare.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Three-dimensional fluorescence lifetime imaging in confocal microscopy of living cells.\n \n \n \n \n\n\n \n Baiazitova, L.; Čmiel, V.; Skopalík, J.; Svoboda, O.; and Provazník, I.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 439-443, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Three-dimensionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081245,\n  author = {L. Baiazitova and V. Čmiel and J. Skopalík and O. Svoboda and I. Provazník},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Three-dimensional fluorescence lifetime imaging in confocal microscopy of living cells},\n  year = {2017},\n  pages = {439-443},\n  abstract = {Fluorescence lifetime imaging (FLIM) is a modern optical method which increases the potential of standard microscopy. This paper shows the possibilities of extended fluorescence lifetime evaluation and imaging in studying three-dimensional structures such as compartments of living cells with different fluorescence lifetimes. The method for quasi-FLIM image calculation is presented and image processing steps useful for biological experiments are suggested. The method was tested on isolated cardiomyocyte cells (CMs) and rat bone marrow stromal cells (MSCs) labelled with SPIO-rhodamine nanoparticles and stained with standard fluorescent dyes. We proved it is possible to use an exponential decrease of fluorescence in time and lifetime parameters for pseudo-colour 3D image mapping of living cells and their compartments that is not a standard function of confocal microscopes.},\n  keywords = {biomedical optical imaging;bone;cellular biophysics;dyes;fluorescence;iron compounds;medical image processing;nanomagnetics;nanomedicine;nanoparticles;optical microscopy;superparamagnetism;fluorescent dyes;optical method;three-dimensional fluorescence lifetime imaging;cardiomyocyte cells;SPIO-rhodamine nanoparticles;confocal microscopy;pseudocolour 3D image mapping;lifetime parameters;rat bone marrow stromal cells;image processing steps;quasiFLIM image calculation;living cells;Fe2O3;Fluorescence;Microscopy;Bars;Image segmentation;Bones;Nanoparticles;fluorescence lifetime;FLIM;cardiomyocyte;stromal cell;image segmentation},\n  doi = {10.23919/EUSIPCO.2017.8081245},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347399.pdf},\n}\n\n
\n
\n\n\n
\n Fluorescence lifetime imaging (FLIM) is a modern optical method which increases the potential of standard microscopy. This paper shows the possibilities of extended fluorescence lifetime evaluation and imaging in studying three-dimensional structures such as compartments of living cells with different fluorescence lifetimes. The method for quasi-FLIM image calculation is presented and image processing steps useful for biological experiments are suggested. The method was tested on isolated cardiomyocyte cells (CMs) and rat bone marrow stromal cells (MSCs) labelled with SPIO-rhodamine nanoparticles and stained with standard fluorescent dyes. We proved it is possible to use an exponential decrease of fluorescence in time and lifetime parameters for pseudo-colour 3D image mapping of living cells and their compartments that is not a standard function of confocal microscopes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Characterizing absence epileptic seizures from depth cortical measurements.\n \n \n \n \n\n\n \n Akhavan, S.; Phlypo, R.; Soltanian-Zadeh, H.; Studer, F.; Depaulis, A.; and Jutten, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 444-448, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CharacterizingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081246,\n  author = {S. Akhavan and R. Phlypo and H. Soltanian-Zadeh and F. Studer and A. Depaulis and C. Jutten},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Characterizing absence epileptic seizures from depth cortical measurements},\n  year = {2017},\n  pages = {444-448},\n  abstract = {In this paper, we are going to localize the onset regions and investigate the dynamics of absence epileptic seizures using local field potential recording by depth electrode. We assume that there are some hidden states (under Markovian model) during the seizure and each spike of the seizure is generated when one of the states is activated. Each state is considered as the linear superposition of a few epileptic activities (substates) and each epileptic activity is described by two characteristics: 1) the spatial topography and 2) the temporal representation. Experimental results demonstrate the generality of the proposed model in characterizing absence epileptic seizures.},\n  keywords = {bioelectric potentials;biomedical electrodes;electroencephalography;hidden Markov models;medical disorders;medical signal processing;neurophysiology;depth cortical measurements;local field potential recording;depth electrode;hidden states;epileptic activity;absence epileptic seizures;Markovian model;linear superposition;spatial topography;temporal representation;Epilepsy;Brain modeling;Matrix decomposition;Adaptation models;Europe;Signal processing;Surfaces;Absence seizure;state;substate;spike},\n  doi = {10.23919/EUSIPCO.2017.8081246},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347237.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we are going to localize the onset regions and investigate the dynamics of absence epileptic seizures using local field potential recording by depth electrode. We assume that there are some hidden states (under Markovian model) during the seizure and each spike of the seizure is generated when one of the states is activated. Each state is considered as the linear superposition of a few epileptic activities (substates) and each epileptic activity is described by two characteristics: 1) the spatial topography and 2) the temporal representation. Experimental results demonstrate the generality of the proposed model in characterizing absence epileptic seizures.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Direct nonparametric estimation of the period and the shape of a periodic component in short duration signals.\n \n \n \n \n\n\n \n Mohammad-Djafari, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 449-453, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DirectPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081247,\n  author = {A. Mohammad-Djafari},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Direct nonparametric estimation of the period and the shape of a periodic component in short duration signals},\n  year = {2017},\n  pages = {449-453},\n  abstract = {In this paper a new direct nonparametric estimation of the period and the shape of a periodic component in short duration signals is proposed and evaluated. Classical Fourier Transform (FT) methods lack precision and resolution when the duration of the signal is very short and the signal is noisy. The proposed method is based on the direct description of the problem as a linear inverse problem and a Bayesian inference approach with appropriate prior distributions. The expression of the joint posterior law of the period and the shape of the periodic component is obtained and used to determine both the period and the shape of the periodic component. Some results on synthetic data show the performance of the proposed method compared to the state of the art methods.},\n  keywords = {Bayes methods;Fourier transforms;inverse problems;signal processing;direct nonparametric estimation;periodic component;short duration signals;Bayesian inference;linear inverse problem;Fourier Transform methods;Shape;Estimation;Signal processing algorithms;Signal to noise ratio;Bayes methods;Uncertainty;keywords;Periodic signals;Period estimation;Short duration signals;Bayesian inference;Approximate Bayesian Computation (ABC)},\n  doi = {10.23919/EUSIPCO.2017.8081247},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346155.pdf},\n}\n\n
\n
\n\n\n
\n In this paper a new direct nonparametric estimation of the period and the shape of a periodic component in short duration signals is proposed and evaluated. Classical Fourier Transform (FT) methods lack precision and resolution when the duration of the signal is very short and the signal is noisy. The proposed method is based on the direct description of the problem as a linear inverse problem and a Bayesian inference approach with appropriate prior distributions. The expression of the joint posterior law of the period and the shape of the periodic component is obtained and used to determine both the period and the shape of the periodic component. Some results on synthetic data show the performance of the proposed method compared to the state of the art methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Investigation of nonlinear granger causality in the context of epilepsy.\n \n \n \n \n\n\n \n Mahjoub, C.; Bellanger, J. J.; Chaibi, S.; Kachouri, A.; and Le Bouquin Jeannès, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 454-458, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"InvestigationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081248,\n  author = {C. Mahjoub and J. J. Bellanger and S. Chaibi and A. Kachouri and R. {Le Bouquin Jeannès}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Investigation of nonlinear granger causality in the context of epilepsy},\n  year = {2017},\n  pages = {454-458},\n  abstract = {Granger causality approaches have been widely used to estimate effective connectivity in complex dynamic systems. These techniques are based on the building of predictive models which not only depend on a proper selection of the predictive vectors size but also on the chosen class of regression functions. The question addressed in this paper is the estimation of the model order in the computation of Granger causality indices to characterize the propagation flow between simulated epileptic signals. In this contribution, a new strategy is proposed to select a suitable model order for potentially nonlinear systems. A nonlinear vectorial autoregressive model based on a wavelet network is considered for the identification and an optimal nonlinear model order is selected using the Bayesian information criterion and imported in nonlinear kernel predictors to derive Granger causality. Simulations are firstly conducted on a linear autoregressive model, then on toy nonlinear models and, finally, on simulated intracranial electroencephalographic signals obtained from an electrophysiology based model to reveal the directional relationships between time series data. The performance of our approach proves the effectiveness of the new strategy in the Granger index estimation.},\n  keywords = {autoregressive processes;Bayes methods;causality;electroencephalography;medical signal processing;neurophysiology;physiological models;regression analysis;time series;Bayesian information criterion;nonlinear kernel predictors;linear autoregressive model;toy nonlinear models;simulated intracranial electroencephalographic signals;Granger index estimation;epilepsy;complex dynamic systems;predictive models;predictive vectors size;regression functions;Granger causality indices;propagation flow;simulated epileptic signals;potentially nonlinear systems;nonlinear vectorial autoregressive model;wavelet network;optimal nonlinear model order;nonlinear Granger causality;time series data;Brain modeling;Computational modeling;Mathematical model;Kernel;Indexes;Estimation;Data models;Epilepsy;Granger causality;Bayesian information criterion;physiology-based model},\n  doi = {10.23919/EUSIPCO.2017.8081248},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347288.pdf},\n}\n\n
\n
\n\n\n
\n Granger causality approaches have been widely used to estimate effective connectivity in complex dynamic systems. These techniques are based on the building of predictive models which not only depend on a proper selection of the predictive vectors size but also on the chosen class of regression functions. The question addressed in this paper is the estimation of the model order in the computation of Granger causality indices to characterize the propagation flow between simulated epileptic signals. In this contribution, a new strategy is proposed to select a suitable model order for potentially nonlinear systems. A nonlinear vectorial autoregressive model based on a wavelet network is considered for the identification and an optimal nonlinear model order is selected using the Bayesian information criterion and imported in nonlinear kernel predictors to derive Granger causality. Simulations are firstly conducted on a linear autoregressive model, then on toy nonlinear models and, finally, on simulated intracranial electroencephalographic signals obtained from an electrophysiology based model to reveal the directional relationships between time series data. The performance of our approach proves the effectiveness of the new strategy in the Granger index estimation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n EEG seizure detection by integrating slantlet transform with sparse coding.\n \n \n \n \n\n\n \n Ibrahim, A. K.; Zhuang, H.; Erdol, N.; and Ali, A. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 459-462, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"EEGPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081249,\n  author = {A. K. Ibrahim and H. Zhuang and N. Erdol and A. M. Ali},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {EEG seizure detection by integrating slantlet transform with sparse coding},\n  year = {2017},\n  pages = {459-462},\n  abstract = {EEG signals, recording both normal and abnormal activities of neurons in the brain, are widely used in epilepsy detection. In this paper, an EEG signal classification method based on Slantlet Transform and sparse coding is proposed to greatly reduce number of false alarms and improve speed of detection. With Slantlet Transform, salient information of EEG signals is mapped into a sparse space. In order to ensure good detection rates from the EEG signals, a Sparse Representation Classifier is adopted. Experimental results demonstrate that the in terms of detection rate, the proposed method, which integrates Slantlet Transform with sparse coding, outperforms the current state-of-the-art EEG seizure detection methods.},\n  keywords = {cellular biophysics;electroencephalography;medical disorders;medical signal detection;medical signal processing;neurophysiology;signal classification;EEG signal classification method;sparse coding;sparse space;Slantlet transform;epilepsy detection;sparse representation classifier;EEG seizure detection;abnormal neuron activities;Electroencephalography;Feature extraction;Filter banks;Discrete wavelet transforms;Epilepsy;Signal processing algorithms;EEG signal;seizure;Slantlet Transform;Sparse Representation Classifier},\n  doi = {10.23919/EUSIPCO.2017.8081249},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347660.pdf},\n}\n\n
\n
\n\n\n
\n EEG signals, recording both normal and abnormal activities of neurons in the brain, are widely used in epilepsy detection. In this paper, an EEG signal classification method based on Slantlet Transform and sparse coding is proposed to greatly reduce number of false alarms and improve speed of detection. With Slantlet Transform, salient information of EEG signals is mapped into a sparse space. In order to ensure good detection rates from the EEG signals, a Sparse Representation Classifier is adopted. Experimental results demonstrate that the in terms of detection rate, the proposed method, which integrates Slantlet Transform with sparse coding, outperforms the current state-of-the-art EEG seizure detection methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n High-dimensional optimization of color coded apertures for compressive spectral cameras.\n \n \n \n \n\n\n \n Rueda, H.; Arguello, H.; and Arce, G. R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 463-467, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"High-dimensionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081250,\n  author = {H. Rueda and H. Arguello and G. R. Arce},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {High-dimensional optimization of color coded apertures for compressive spectral cameras},\n  year = {2017},\n  pages = {463-467},\n  abstract = {A spectral image can be regarded as a three-dimensional cube where each pixel is a vector of intensities representing a spectral signature. Compressive spectral imaging (CSI) is a sensing and reconstruction framework, based on the fundamentals of the compressive sensing theory, which focuses on capturing spectral images efficiently, exploiting their highly correlated information by coding its spectral characteristics commonly using a black-and-white, grayscale or recently a color coded aperture. The distribution of the entries of the coded apertures determines the quality of the estimated spectral images. State of the art methods have used random coded apertures, and some optimization procedures have focused on the optimal design of horizontal sections of the coded apertures; however, they do not fully exploit the spatio-spectral correlations within the spectral images. To that end, in this paper, it is proposed a high-dimensional optimization procedure to design color coded apertures for CSI systems, which exploits not only the spectral correlations but also the spatial correlations within an spectral image. Simulations analyzing the conditioning of the sensing matrices, as well as the reconstruction quality of the attained spectral images show the improvement entailed by the proposed method.},\n  keywords = {cameras;compressed sensing;correlation methods;image coding;image colour analysis;image reconstruction;image sensors;optimisation;random codes;color coded aperture;compressive spectral cameras;sensing matrices;3D cube;CSI;high-dimensional optimization procedure;spatio-spectral correlations;random coded apertures;spectral characteristics;compressive sensing theory;compressive spectral imaging;spectral signature;Apertures;Image color analysis;Optimization;Image coding;Optical imaging;Optical sensors;Compressive spectral imaging;coded aperture design;color filter array;numerical optimization},\n  doi = {10.23919/EUSIPCO.2017.8081250},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347285.pdf},\n}\n\n
\n
\n\n\n
\n A spectral image can be regarded as a three-dimensional cube where each pixel is a vector of intensities representing a spectral signature. Compressive spectral imaging (CSI) is a sensing and reconstruction framework, based on the fundamentals of the compressive sensing theory, which focuses on capturing spectral images efficiently, exploiting their highly correlated information by coding its spectral characteristics commonly using a black-and-white, grayscale or recently a color coded aperture. The distribution of the entries of the coded apertures determines the quality of the estimated spectral images. State of the art methods have used random coded apertures, and some optimization procedures have focused on the optimal design of horizontal sections of the coded apertures; however, they do not fully exploit the spatio-spectral correlations within the spectral images. To that end, in this paper, it is proposed a high-dimensional optimization procedure to design color coded apertures for CSI systems, which exploits not only the spectral correlations but also the spatial correlations within an spectral image. Simulations analyzing the conditioning of the sensing matrices, as well as the reconstruction quality of the attained spectral images show the improvement entailed by the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-resolution reconstruction algorithm for compressive single pixel spectral imaging.\n \n \n \n \n\n\n \n Garcia, H.; Correa, C. V.; Villarreal, O.; Pinilla, S.; and Arguello, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 468-472, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-resolutionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081251,\n  author = {H. Garcia and C. V. Correa and O. Villarreal and S. Pinilla and H. Arguello},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-resolution reconstruction algorithm for compressive single pixel spectral imaging},\n  year = {2017},\n  pages = {468-472},\n  abstract = {Spectral imaging is useful in a wide range of applications for non-invasive detection and classification. However, the massive amount of involved data increases its processing and storing costs. In contrast, compressive spectral imaging (CSI) establishes that the three-dimensional data cube can be recovered from a small set of projections, that are generally captured in 2-dimensional detectors. Furthermore, the single-pixel camera (SPC) has been also employed for spectral imaging. Specifically, the SPC captures the spatial and spectral information in a single measurement. CSI reconstructions are traditionally obtained by solving a minimization problem using iterative algorithms. However, the computational load of these algorithms is high due to the dimensionality of the involved sensing matrices. In this paper, a multi-resolution (MR) reconstruction model is proposed such that the complexity of the inverse problem is reduced. In particular, this model uses the spectral correlation to group pixels with similar spectral characteristics. Simulation results show that the MR model improves the reconstruction PSNR in up to 9dB with respect to the traditional methods. In addition, the proposed approach is 79% faster, using only 25% of the measurements.},\n  keywords = {cameras;correlation methods;image reconstruction;image resolution;inverse problems;iterative methods;matrix algebra;storing costs;compressive spectral imaging;three-dimensional data cube;2-dimensional detectors;single-pixel camera;SPC;spatial information;spectral information;CSI reconstructions;iterative algorithms;multiresolution reconstruction model;spectral correlation;group pixels;reconstruction PSNR;multiresolution reconstruction algorithm;compressive single pixel spectral imaging;sensing matrices;CSI;minimization problem;inverse problem;noise figure 9.0 dB;Image reconstruction;Imaging;Signal processing algorithms;Image resolution;Detectors;Apertures},\n  doi = {10.23919/EUSIPCO.2017.8081251},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347321.pdf},\n}\n\n
\n
\n\n\n
\n Spectral imaging is useful in a wide range of applications for non-invasive detection and classification. However, the massive amount of involved data increases its processing and storing costs. In contrast, compressive spectral imaging (CSI) establishes that the three-dimensional data cube can be recovered from a small set of projections, that are generally captured in 2-dimensional detectors. Furthermore, the single-pixel camera (SPC) has been also employed for spectral imaging. Specifically, the SPC captures the spatial and spectral information in a single measurement. CSI reconstructions are traditionally obtained by solving a minimization problem using iterative algorithms. However, the computational load of these algorithms is high due to the dimensionality of the involved sensing matrices. In this paper, a multi-resolution (MR) reconstruction model is proposed such that the complexity of the inverse problem is reduced. In particular, this model uses the spectral correlation to group pixels with similar spectral characteristics. Simulation results show that the MR model improves the reconstruction PSNR in up to 9dB with respect to the traditional methods. In addition, the proposed approach is 79% faster, using only 25% of the measurements.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Honey dataset standard using hyperspectral imaging for machine learning problems.\n \n \n \n \n\n\n \n Noviyanto, A.; and Abdullah, W. H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 473-477, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"HoneyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081252,\n  author = {A. Noviyanto and W. H. Abdullah},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Honey dataset standard using hyperspectral imaging for machine learning problems},\n  year = {2017},\n  pages = {473-477},\n  abstract = {Hyperspectral imaging has been rarely investigated for honey analyses, on the contrary to the optical spectroscopy which is widely investigated. The essential missing component to kick start this research is a standard honey hyperspectral images, called hypercubes, dataset. This paper proposes a systematic procedure for the preparation of the first honey hypercube dataset using hyperspectral imaging. Moreover, a scalable and flexible dataset module is introduced to ease the interaction between raw hypercube data and machine learning software. The developed dataset greatly benefits researchers to progress on the research of honey analysis including constituents prediction and types classification using hyperspectral imaging and machine learning.},\n  keywords = {geophysical image processing;hyperspectral imaging;learning (artificial intelligence);honey dataset standard;machine learning problems;standard honey hyperspectral images;honey hypercube dataset;scalable dataset module;flexible dataset module;machine learning software;constituent prediction;Hypercubes;Feature extraction;Hyperspectral imaging;Standards;Databases;Metadata;Containers},\n  doi = {10.23919/EUSIPCO.2017.8081252},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570344824.pdf},\n}\n\n
\n
\n\n\n
\n Hyperspectral imaging has been rarely investigated for honey analyses, on the contrary to the optical spectroscopy which is widely investigated. The essential missing component to kick start this research is a standard honey hyperspectral images, called hypercubes, dataset. This paper proposes a systematic procedure for the preparation of the first honey hypercube dataset using hyperspectral imaging. Moreover, a scalable and flexible dataset module is introduced to ease the interaction between raw hypercube data and machine learning software. The developed dataset greatly benefits researchers to progress on the research of honey analysis including constituents prediction and types classification using hyperspectral imaging and machine learning.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayesian method with sparsity enforcing prior of dual-tree complex wavelet transform coefficients for X-ray CT image reconstruction.\n \n \n \n \n\n\n \n Wang, L.; Mohammad-Djafari, A.; and Gac, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 478-482, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081253,\n  author = {L. Wang and A. Mohammad-Djafari and N. Gac},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Bayesian method with sparsity enforcing prior of dual-tree complex wavelet transform coefficients for X-ray CT image reconstruction},\n  year = {2017},\n  pages = {478-482},\n  abstract = {In this paper, a Bayesian method with a hierarchical sparsity enforcing prior model for Dual-Tree Complex Wavelet Transform (DT-CWT) coefficients is proposed. This model is used for X-ray Computed Tomography (CT) image reconstruction. A generalized Student-t distributed prior model is used to enforce the sparse structure of the DT-CWT coefficient of the image. The joint Maximum A Posterior algorithm (JMAP) is used in this Bayesian context. Comparisons with the conventional and other state-of-the-art methods are presented, showing that the proposed method gives more accurate and robust reconstruction results while the dataset is insufficient.},\n  keywords = {Bayes methods;computerised tomography;image reconstruction;maximum likelihood estimation;medical image processing;trees (mathematics);wavelet transforms;Bayesian method;X-ray CT image reconstruction;hierarchical sparsity;Dual-Tree Complex Wavelet Transform coefficients;generalized Student-t distributed prior model;DT-CWT coefficient;Bayesian context;tomography image reconstruction;Wavelet transforms;Bayes methods;Computed tomography;Image reconstruction;Computational modeling;Additive noise;Computed Tomography (CT);Bayesian Approach;Hierarchical Model;Dual-Tree Complex Wavelet Transformation (DT-CWT);Generalized Student-t distribution;Joint Maximum A Posterior (JMAP)},\n  doi = {10.23919/EUSIPCO.2017.8081253},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347128.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a Bayesian method with a hierarchical sparsity enforcing prior model for Dual-Tree Complex Wavelet Transform (DT-CWT) coefficients is proposed. This model is used for X-ray Computed Tomography (CT) image reconstruction. A generalized Student-t distributed prior model is used to enforce the sparse structure of the DT-CWT coefficient of the image. The joint Maximum A Posterior algorithm (JMAP) is used in this Bayesian context. Comparisons with the conventional and other state-of-the-art methods are presented, showing that the proposed method gives more accurate and robust reconstruction results while the dataset is insufficient.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A new multiplicative nonnegative matrix factorization method for unmixing hyperspectral images combined with multispectral data.\n \n \n \n \n\n\n \n Benkouider, Y. K.; Karoui, M. S.; De ville, Y.; and Hosseini, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 483-487, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081254,\n  author = {Y. K. Benkouider and M. S. Karoui and Y. {De ville} and S. Hosseini},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A new multiplicative nonnegative matrix factorization method for unmixing hyperspectral images combined with multispectral data},\n  year = {2017},\n  pages = {483-487},\n  abstract = {In these investigations, a novel algorithm is proposed for linearly unmixing hyperspectral images combined with multispectral data. This algorithm, which is used to unmix the considered hyperspectral image, is founded on nonnegative matrix factorization. It minimizes, with new multiplicative update rules, a novel cost function, which includes multispectral data and a spectral degradation model between these data and hyperspectral ones. The considered multispectral variables are also used to initialize the proposed algorithm. Tests, using synthetic data, are carried out to assess the performance of our algorithm and its robustness to spectral variability between the processed data. The obtained results are compared to those of state of the art methods. These tests prove that the proposed algorithm outperforms all other used approaches.},\n  keywords = {geophysical image processing;hyperspectral imaging;matrix decomposition;hyperspectral ones;considered multispectral variables;synthetic data;processed data;multiplicative nonnegative matrix factorization method;multispectral data;linearly unmixing hyperspectral images;considered hyperspectral image;multiplicative update rules;Hyperspectral imaging;Signal processing algorithms;Algorithm design and analysis;Cost function;Spatial resolution;Data models;Hyper/multispectral imaging;linear unmixing;multiplicative update rule;nonnegative matrix factorization;spectral degradation model},\n  doi = {10.23919/EUSIPCO.2017.8081254},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346773.pdf},\n}\n\n
\n
\n\n\n
\n In these investigations, a novel algorithm is proposed for linearly unmixing hyperspectral images combined with multispectral data. This algorithm, which is used to unmix the considered hyperspectral image, is founded on nonnegative matrix factorization. It minimizes, with new multiplicative update rules, a novel cost function, which includes multispectral data and a spectral degradation model between these data and hyperspectral ones. The considered multispectral variables are also used to initialize the proposed algorithm. Tests, using synthetic data, are carried out to assess the performance of our algorithm and its robustness to spectral variability between the processed data. The obtained results are compared to those of state of the art methods. These tests prove that the proposed algorithm outperforms all other used approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hyperspectral image restoration based on spatio-spectral structure tensor regularization.\n \n \n \n \n\n\n \n Kurihara, R.; Ono, S.; Shirai, K.; and Okuda, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 488-492, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"HyperspectralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081255,\n  author = {R. Kurihara and S. Ono and K. Shirai and M. Okuda},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Hyperspectral image restoration based on spatio-spectral structure tensor regularization},\n  year = {2017},\n  pages = {488-492},\n  abstract = {We propose a regularization function for hyperspectral image restoration based on a newly-designed structure tensor. We adopt a convex optimization approach with the use of the nuclear norm of a matrix, termed as spatio-spectral structure tensor. It consists of the gradient components of a hyperspectral image cube w.r.t. the spatio-spectral domain. The proposed approach allows to penalize variations in the spectral domain as well as the spatial domain to exploit the spatio-spectral correlations. Our experiments on denoising of hyperspectral images show that the proposed regularization leads to significant improvements in restoration performance over state-of-the-art methods.},\n  keywords = {convex programming;gradient methods;hyperspectral imaging;image restoration;matrix algebra;tensors;convex optimization approach;hyperspectral image cube w.r.t;spatio-spectral domain;spatio-spectral correlations;hyperspectral images;hyperspectral image restoration;matrix nuclear norm;gradient components;spatio-spectral structure tensor regularization function;Hyperspectral imaging;Tensile stress;Image restoration;Convex functions;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081255},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346838.pdf},\n}\n\n
\n
\n\n\n
\n We propose a regularization function for hyperspectral image restoration based on a newly-designed structure tensor. We adopt a convex optimization approach with the use of the nuclear norm of a matrix, termed as spatio-spectral structure tensor. It consists of the gradient components of a hyperspectral image cube w.r.t. the spatio-spectral domain. The proposed approach allows to penalize variations in the spectral domain as well as the spatial domain to exploit the spatio-spectral correlations. Our experiments on denoising of hyperspectral images show that the proposed regularization leads to significant improvements in restoration performance over state-of-the-art methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A new approach to dictionary-based nonnegative matrix factorization.\n \n \n \n \n\n\n \n Cohen, J. E.; and Gillis, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 493-497, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081256,\n  author = {J. E. Cohen and N. Gillis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A new approach to dictionary-based nonnegative matrix factorization},\n  year = {2017},\n  pages = {493-497},\n  abstract = {In this paper, we propose a new model along with an algorithm for dictionary-based nonnegative matrix factorization. We show its effectiveness on spectral unmixing of hyperspectral images using self dictionary compared to state-of-the-art methods.},\n  keywords = {hyperspectral imaging;image processing;matrix decomposition;self dictionary;spectral unmixing;hyperspectral image unmixing;dictionary-based nonnegative matrix factorization;Signal processing algorithms;Dictionaries;Hyperspectral imaging;Clustering algorithms;Europe;Signal processing;Indexes;dictionary;nonnegative matrix factorization;hyperspectral imaging},\n  doi = {10.23919/EUSIPCO.2017.8081256},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342524.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a new model along with an algorithm for dictionary-based nonnegative matrix factorization. We show its effectiveness on spectral unmixing of hyperspectral images using self dictionary compared to state-of-the-art methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Denoising galaxy spectra with coupled dictionary learning.\n \n \n \n \n\n\n \n Fotiadou, K.; Tsagkatakis, G.; Moraes, B.; Abdalla, F. B.; and Tsakalides, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 498-502, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DenoisingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081257,\n  author = {K. Fotiadou and G. Tsagkatakis and B. Moraes and F. B. Abdalla and P. Tsakalides},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Denoising galaxy spectra with coupled dictionary learning},\n  year = {2017},\n  pages = {498-502},\n  abstract = {The Euclid satellite aims to measure accurately the global properties of the Universe, with particular emphasis on the properties of the mysterious Dark Energy that is driving the acceleration of its expansion. One of its two main observational probes relies on accurate measurements of the radial distances of galaxies through the identification of important features in their individual light spectra that are redshifted due to their receding velocity. However, several challenges for robust automated spectroscopic redshift estimation remain unsolved, one of which is the characterization of the types of spectra present in the observed galaxy population. This paper proposes a denoising technique that exploits the mathematical frameworks of Sparse Representations and Coupled Dictionary Learning, and tests it on simulated Euclid-like noisy spectroscopic templates. The reconstructed spectral profiles are able to improve the accuracy, reliability and robustness of automated redshift estimation methods. The key contribution of this work is the design of a novel model which considers coupled feature spaces, composed of high- and low-quality spectral profiles, when applied to the spectroscopic data denoising problem. The coupled dictionary learning technique is formulated within the context of the Alternating Direction Method of Multipliers, optimizing each variable via closed-form expressions. Experimental results suggest that the proposed powerful coupled dictionary learning scheme reconstructs successfully spectral profiles from their corresponding noisy versions, even with extreme noise scenarios.},\n  keywords = {astronomical image processing;astronomical techniques;cosmology;dark energy;galaxies;image denoising;image reconstruction;image representation;iterative methods;learning (artificial intelligence);red shift;signal denoising;individual light spectra;receding velocity;robust automated spectroscopic redshift estimation;denoising technique;noisy spectroscopic templates;automated redshift estimation methods;low-quality spectral profiles;spectroscopic data denoising problem;coupled dictionary learning technique;galaxy spectra;Euclid satellite;sparse representations;Noise measurement;Machine learning;Dictionaries;Noise reduction;Estimation;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081257},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347529.pdf},\n}\n\n
\n
\n\n\n
\n The Euclid satellite aims to measure accurately the global properties of the Universe, with particular emphasis on the properties of the mysterious Dark Energy that is driving the acceleration of its expansion. One of its two main observational probes relies on accurate measurements of the radial distances of galaxies through the identification of important features in their individual light spectra that are redshifted due to their receding velocity. However, several challenges for robust automated spectroscopic redshift estimation remain unsolved, one of which is the characterization of the types of spectra present in the observed galaxy population. This paper proposes a denoising technique that exploits the mathematical frameworks of Sparse Representations and Coupled Dictionary Learning, and tests it on simulated Euclid-like noisy spectroscopic templates. The reconstructed spectral profiles are able to improve the accuracy, reliability and robustness of automated redshift estimation methods. The key contribution of this work is the design of a novel model which considers coupled feature spaces, composed of high- and low-quality spectral profiles, when applied to the spectroscopic data denoising problem. The coupled dictionary learning technique is formulated within the context of the Alternating Direction Method of Multipliers, optimizing each variable via closed-form expressions. Experimental results suggest that the proposed powerful coupled dictionary learning scheme reconstructs successfully spectral profiles from their corresponding noisy versions, even with extreme noise scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Restoration from multispectral blurred data with non-stationary instrument response.\n \n \n \n \n\n\n \n Hadj-Youcef, M. A.; Orieux, F.; Fraysse, A.; and Abergel, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 503-507, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RestorationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081258,\n  author = {M. A. Hadj-Youcef and F. Orieux and A. Fraysse and A. Abergel},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Restoration from multispectral blurred data with non-stationary instrument response},\n  year = {2017},\n  pages = {503-507},\n  abstract = {In this paper we propose an approach of image restoration from multispectral data provided by an imaging system. We specifically address two topics: (i) Development of a multi-wavelength direct model for non-stationary instrument response that includes a spatial convolution and a spectral integration, (ii) Implementation of multispectral image restoration using a regularized least-square, based on a quadratic criterion and minimized by a gradient algorithm. We test our approach on simulated data of the Mid-InfraRed Instrument IMager (MIRIM) of the James Webb Space Telescope (JWST). Our method shows a clear increase of spatial resolution compare to conventional methods.},\n  keywords = {astronomical image processing;astronomical techniques;gradient methods;image restoration;least squares approximations;multispectral blurred data;nonstationary instrument response;imaging system;multiwavelength direct model;multispectral image restoration;Mid-InfraRed Instrument IMager;James Webb Space Telescope;spatial resolution;Instruments;Detectors;Optical imaging;Convolution;Aerospace electronics;Optical detectors;Two dimensional displays;Direct Model;Multispectral Imaging;Inverse Problems;Image Restoration},\n  doi = {10.23919/EUSIPCO.2017.8081258},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347249.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose an approach of image restoration from multispectral data provided by an imaging system. We specifically address two topics: (i) Development of a multi-wavelength direct model for non-stationary instrument response that includes a spatial convolution and a spectral integration, (ii) Implementation of multispectral image restoration using a regularized least-square, based on a quadratic criterion and minimized by a gradient algorithm. We test our approach on simulated data of the Mid-InfraRed Instrument IMager (MIRIM) of the James Webb Space Telescope (JWST). Our method shows a clear increase of spatial resolution compare to conventional methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Landmine detection from GPR data using convolutional neural networks.\n \n \n \n \n\n\n \n Lameri, S.; Lombardi, F.; Bestagini, P.; Lualdi, M.; and Tubaro, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 508-512, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LandminePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081259,\n  author = {S. Lameri and F. Lombardi and P. Bestagini and M. Lualdi and S. Tubaro},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Landmine detection from GPR data using convolutional neural networks},\n  year = {2017},\n  pages = {508-512},\n  abstract = {The presence of buried landmines is a serious threat in many areas around the World. Despite various techniques have been proposed in the literature to detect and recognize buried objects, automatic and easy to use systems providing accurate performance are still under research. Given the incredible results achieved by deep learning in many detection tasks, in this paper we propose a pipeline for buried landmine detection based on convolutional neural networks (CNNs) applied to ground-penetrating radar (GPR) images. The proposed algorithm is capable of recognizing whether a B-scan profile obtained from GPR acquisitions contains traces of buried mines. Validation of the presented system is carried out on real GPR acquisitions, albeit system training can be performed simply relying on synthetically generated data. Results show that it is possible to reach 95% of detection accuracy without training in real acquisition of landmine profiles.},\n  keywords = {ground penetrating radar;landmine detection;learning (artificial intelligence);military computing;military radar;neural nets;object recognition;radar computing;radar detection;radar imaging;detection accuracy;landmine profiles;GPR data;convolutional neural networks;buried landmines;serious threat;deep learning;detection tasks;buried landmine detection;ground-penetrating radar images;GPR acquisitions;system training;buried object recognition;buried object detection;CNNs;B-scan profile;B;Ground penetrating radar;Landmine detection;Training;Convolution;Pipelines},\n  doi = {10.23919/EUSIPCO.2017.8081259},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347200.pdf},\n}\n\n
\n
\n\n\n
\n The presence of buried landmines is a serious threat in many areas around the World. Despite various techniques have been proposed in the literature to detect and recognize buried objects, automatic and easy to use systems providing accurate performance are still under research. Given the incredible results achieved by deep learning in many detection tasks, in this paper we propose a pipeline for buried landmine detection based on convolutional neural networks (CNNs) applied to ground-penetrating radar (GPR) images. The proposed algorithm is capable of recognizing whether a B-scan profile obtained from GPR acquisitions contains traces of buried mines. Validation of the presented system is carried out on real GPR acquisitions, albeit system training can be performed simply relying on synthetically generated data. Results show that it is possible to reach 95% of detection accuracy without training in real acquisition of landmine profiles.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A statistical model of tristimulus measurements within and between OLED displays.\n \n \n \n \n\n\n \n Raitoharju, M.; Kallio, S.; and Pellikka, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 513-517, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081260,\n  author = {M. Raitoharju and S. Kallio and M. Pellikka},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A statistical model of tristimulus measurements within and between OLED displays},\n  year = {2017},\n  pages = {513-517},\n  abstract = {We present an empirical model for noises in color measurements from OLED displays. According to measured data the noise is not isotropic in the XYZ space, instead most of the noise is along an axis that is parallel to a vector from origin to measured XYZ vector. The presented empirical model is simple and depends only on the measured XYZ values. Our tests show that the variations between multiple panels of the same type have similar distribution as the temporal noise in measurements from a single panel, but a larger magnitude.},\n  keywords = {colorimetry;LED displays;organic light emitting diodes;statistical analysis;statistical model;tristimulus measurements;OLED displays;empirical model;color measurements;XYZ space;measured XYZ vector;temporal noise;Noise measurement;Colored noise;Color;Organic light emitting diodes;Image color analysis;Sea measurements;Calibration;displays;measurement uncertainty;noise measurement;calibration;mathematical model},\n  doi = {10.23919/EUSIPCO.2017.8081260},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347177.pdf},\n}\n\n
\n
\n\n\n
\n We present an empirical model for noises in color measurements from OLED displays. According to measured data the noise is not isotropic in the XYZ space, instead most of the noise is along an axis that is parallel to a vector from origin to measured XYZ vector. The presented empirical model is simple and depends only on the measured XYZ values. Our tests show that the variations between multiple panels of the same type have similar distribution as the temporal noise in measurements from a single panel, but a larger magnitude.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic crater detection and age estimation for mare regions on the lunar surface.\n \n \n \n \n\n\n \n Salih, A. L.; Schulte, P.; Grumpe, A.; Wöhler, C.; and Hiesinger, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 518-522, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081261,\n  author = {A. L. Salih and P. Schulte and A. Grumpe and C. Wöhler and H. Hiesinger},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic crater detection and age estimation for mare regions on the lunar surface},\n  year = {2017},\n  pages = {518-522},\n  abstract = {In this paper, we investigate how well an automatic crater detection algorithm is suitable to determine the surface age of different lunar regions. A template-based crater detection algorithm is used to analyze image data under known illumination conditions. For this purpose, artificially illuminated crater templates are used to detect and count craters and their diameters in the areas under investigation. The automatic detection results are used to obtain the crater size-frequency distribution (CSFD) for the examined areas, which is then used for estimating the absolute model age (AMA) of the surface. The main focus of this work is to find out whether there exists an ideal sensitivity value for automatic crater detection to obtain smallest possible errors between the automatically derived AMA and a reference AMA derived from manually detected craters. The detection sensitivity threshold of our crater detection algorithm (CDA) is calibrated based on five different regions in Mare Cognitum on the Moon such that the age inferred from the manual crater counts corresponds to the age inferred from the CDA results. The obtained best detection threshold value is used to apply the CDA algorithm to another five regions in the lunar Oceanus Procellarum region. The accuracy of the method is examined by comparing the calculated AMAs with the manually determined ones from the literature. It is shown that the automatic age estimation yields AMA values that are generally consistent with the reference values with respect to the one standard deviation errors.},\n  keywords = {astronomical techniques;calibration;lunar interior;lunar rocks;lunar surface;meteorite craters;mare regions;lunar surface;automatic crater detection algorithm;surface age;different lunar regions;template-based crater detection algorithm;crater templates;automatic detection results;crater size-frequency distribution;absolute model age;automatically derived AMA;manually detected craters;detection sensitivity threshold;manual crater;detection threshold value;lunar Oceanus Procellarum region;automatic age estimation yields AMA;Moon;Lighting;Sea surface;Surface treatment;Surface topography;Estimation;Three-dimensional displays;remote sensing;automatic crater detection;crater statistics;absolute model age;age mapping},\n  doi = {10.23919/EUSIPCO.2017.8081261},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347580.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we investigate how well an automatic crater detection algorithm is suitable to determine the surface age of different lunar regions. A template-based crater detection algorithm is used to analyze image data under known illumination conditions. For this purpose, artificially illuminated crater templates are used to detect and count craters and their diameters in the areas under investigation. The automatic detection results are used to obtain the crater size-frequency distribution (CSFD) for the examined areas, which is then used for estimating the absolute model age (AMA) of the surface. The main focus of this work is to find out whether there exists an ideal sensitivity value for automatic crater detection to obtain smallest possible errors between the automatically derived AMA and a reference AMA derived from manually detected craters. The detection sensitivity threshold of our crater detection algorithm (CDA) is calibrated based on five different regions in Mare Cognitum on the Moon such that the age inferred from the manual crater counts corresponds to the age inferred from the CDA results. The obtained best detection threshold value is used to apply the CDA algorithm to another five regions in the lunar Oceanus Procellarum region. The accuracy of the method is examined by comparing the calculated AMAs with the manually determined ones from the literature. It is shown that the automatic age estimation yields AMA values that are generally consistent with the reference values with respect to the one standard deviation errors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Voice activity detection using discriminative restricted Boltzmann machines.\n \n \n \n \n\n\n \n Borin, R. G.; and Silva, M. T. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 523-527, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"VoicePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081262,\n  author = {R. G. Borin and M. T. M. Silva},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Voice activity detection using discriminative restricted Boltzmann machines},\n  year = {2017},\n  pages = {523-527},\n  abstract = {Voice Activity Detection (VAD) plays an important role in current technological applications, such as wireless communications and speech recognition. In this paper, we address the VAD task through machine learning by using a discriminative restricted Boltzmann machine (DRBM). We extend the conventional DRBM to deal with continuous-valued data and employ feature vectors based either on mel-frequency cepstral coefficients or on filter-bank energies. The resulting detector slightly outperforms the VAD often used as benchmark for detector comparison. Results also indicate that DRBM is able to deal with strongly correlated feature vectors.},\n  keywords = {Boltzmann machines;feature extraction;learning (artificial intelligence);speech recognition;voice activity detection;discriminative restricted Boltzmann machine;current technological applications;wireless communications;speech recognition;VAD task;machine learning;conventional DRBM;DRBM;Detectors;Training;Speech;Signal to noise ratio;Cepstral analysis},\n  doi = {10.23919/EUSIPCO.2017.8081262},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347672.pdf},\n}\n\n
\n
\n\n\n
\n Voice Activity Detection (VAD) plays an important role in current technological applications, such as wireless communications and speech recognition. In this paper, we address the VAD task through machine learning by using a discriminative restricted Boltzmann machine (DRBM). We extend the conventional DRBM to deal with continuous-valued data and employ feature vectors based either on mel-frequency cepstral coefficients or on filter-bank energies. The resulting detector slightly outperforms the VAD often used as benchmark for detector comparison. Results also indicate that DRBM is able to deal with strongly correlated feature vectors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Class specific GMM based sparse feature for speech units classification.\n \n \n \n \n\n\n \n Sharma, P.; Abrol, V.; Dileep, A. D.; and Sao, A. K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 528-532, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ClassPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081263,\n  author = {P. Sharma and V. Abrol and A. D. Dileep and A. K. Sao},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Class specific GMM based sparse feature for speech units classification},\n  year = {2017},\n  pages = {528-532},\n  abstract = {In this paper, features based on the sparse representation (SR) are proposed for the classification of speech units. The proposed method employs multiple dictionaries to effectively model variations present in the speech signal. Here, a Gaussian mixture model (GMM) is built using spectral features corresponding to frames of all the examples of a speech class. Multiple dictionaries corresponding to different mixture are learned using the respective speech frames. Given a train/test speech frame, minimum spectral distance measure from the GMM means is employed to select an appropriate dictionary. The selected dictionary is used to obtain the sparse feature representation, which is used for the classification of speech units. The effectiveness of the proposed feature is demonstrated using continuous density hidden Markov model (CDHMM) based classifiers for (i) classification of isolated utterances of E-set of English alphabet, (ii) classification of consonant-vowel (CV) segments in Hindi language and (iii) classification of phoneme from TIMIT phonetic corpus. Experimental results reveal that the proposed features outperforms existing feature representations for various speech units classification tasks.},\n  keywords = {feature extraction;Gaussian processes;hidden Markov models;mixture models;natural language processing;signal representation;speaker recognition;speech processing;speech recognition;speech units classification tasks;sparse representation;multiple dictionaries;speech signal;Gaussian mixture model;spectral features;train/test speech frame;minimum spectral distance measure;sparse feature representation;Markov model based classifiers;feature representations;speech frames;class specific GMM based sparse feature;continuous density hidden Markov model;CDHMM;consonant-vowel segments;CV segments;TIMIT phonetic corpus;Speech;Dictionaries;Mel frequency cepstral coefficient;Speech recognition;Hidden Markov models;Machine learning;Sparse representation;speech recognition;dictionary learning},\n  doi = {10.23919/EUSIPCO.2017.8081263},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347441.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, features based on the sparse representation (SR) are proposed for the classification of speech units. The proposed method employs multiple dictionaries to effectively model variations present in the speech signal. Here, a Gaussian mixture model (GMM) is built using spectral features corresponding to frames of all the examples of a speech class. Multiple dictionaries corresponding to different mixture are learned using the respective speech frames. Given a train/test speech frame, minimum spectral distance measure from the GMM means is employed to select an appropriate dictionary. The selected dictionary is used to obtain the sparse feature representation, which is used for the classification of speech units. The effectiveness of the proposed feature is demonstrated using continuous density hidden Markov model (CDHMM) based classifiers for (i) classification of isolated utterances of E-set of English alphabet, (ii) classification of consonant-vowel (CV) segments in Hindi language and (iii) classification of phoneme from TIMIT phonetic corpus. Experimental results reveal that the proposed features outperforms existing feature representations for various speech units classification tasks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparison of I-vector and GMM-UBM approaches to speaker identification with TIMIT and NIST 2008 databases in challenging environments.\n \n \n \n \n\n\n \n Al-Kaltakchi, M. T. S.; Woo, W. L.; Dlay, S. S.; and Chambers, J. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 533-537, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ComparisonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081264,\n  author = {M. T. S. Al-Kaltakchi and W. L. Woo and S. S. Dlay and J. A. Chambers},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Comparison of I-vector and GMM-UBM approaches to speaker identification with TIMIT and NIST 2008 databases in challenging environments},\n  year = {2017},\n  pages = {533-537},\n  abstract = {In this paper, two models, the I-vector and the Gaussian Mixture Model-Universal Background Model (GMM-UBM), are compared for the speaker identification task. Four feature combinations of I-vectors with seven fusion techniques are considered: maximum, mean, weighted sum, cumulative, interleaving and concatenated for both two and four features. In addition, an Extreme Learning Machine (ELM) is exploited to identify speakers, and then Speaker Identification Accuracy (SIA) is calculated. Both systems are evaluated for 120 speakers from the TIMIT and NIST 2008 databases for clean speech. Furthermore, a comprehensive evaluation is made under Additive White Gaussian Noise (AWGN) conditions and with three types of Non Stationary Noise (NSN), both with and without handset effects for the TIMIT database. The results show that the I-vector approach is better than the GMM-UBM for both clean and AWGN conditions without a handset. However, the GMM-UBM had better accuracy for NSN types.},\n  keywords = {AWGN;Gaussian processes;learning (artificial intelligence);speaker recognition;GMM-UBM approaches;speaker identification task;AWGN conditions;Gaussian mixture model-universal background model;extreme learning machine;speaker identification accuracy;additive white Gaussian noise conditions;Databases;Telephone sets;Speech;NIST;Noise measurement;Training;Mel frequency cepstral coefficient},\n  doi = {10.23919/EUSIPCO.2017.8081264},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570335984.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, two models, the I-vector and the Gaussian Mixture Model-Universal Background Model (GMM-UBM), are compared for the speaker identification task. Four feature combinations of I-vectors with seven fusion techniques are considered: maximum, mean, weighted sum, cumulative, interleaving and concatenated for both two and four features. In addition, an Extreme Learning Machine (ELM) is exploited to identify speakers, and then Speaker Identification Accuracy (SIA) is calculated. Both systems are evaluated for 120 speakers from the TIMIT and NIST 2008 databases for clean speech. Furthermore, a comprehensive evaluation is made under Additive White Gaussian Noise (AWGN) conditions and with three types of Non Stationary Noise (NSN), both with and without handset effects for the TIMIT database. The results show that the I-vector approach is better than the GMM-UBM for both clean and AWGN conditions without a handset. However, the GMM-UBM had better accuracy for NSN types.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low resource point process models for keyword spotting using unsupervised online learning.\n \n \n \n \n\n\n \n Sadhu, S.; and Ghosh, P. K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 538-542, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081265,\n  author = {S. Sadhu and P. K. Ghosh},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Low resource point process models for keyword spotting using unsupervised online learning},\n  year = {2017},\n  pages = {538-542},\n  abstract = {Point Process Models (PPM) have been widely used for keyword spotting applications. Training these models typically requires a considerable number of keyword examples. In this work, we consider a scenario where very few keyword examples are available for training. The availability of a limited number of training examples results in a PPM with poorly learnt parameters. We propose an unsupervised online learning algorithm that starts from a poor PPM model and updates the PPM parameters using newly detected samples of the keyword in a corpus under consideration and uses the updated model for further keyword detection. We test our algorithm on eight keywords taken from the TIMIT database, the training set of which, on average, has 469 samples of each keyword. With an initial set of only five samples of a keyword (corresponds to ~ 1% of the total number of samples) followed by the proposed online parameter updating throughout the entire TIMIT train set, the performance on the TIMIT test set using the final model is found to be comparable to that of a PPM trained with all the samples of the respective keyword available from the entire TIMIT train set.},\n  keywords = {feature extraction;speech recognition;unsupervised learning;word processing;low resource point process models;keyword spotting;TIMIT database;voice samples;training set;keyword detection;unsupervised online learning algorithm;PPM;Training;Signal processing algorithms;Mathematical model;Europe;Signal processing;Speech;Maximum likelihood estimation},\n  doi = {10.23919/EUSIPCO.2017.8081265},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347403.pdf},\n}\n\n
\n
\n\n\n
\n Point Process Models (PPM) have been widely used for keyword spotting applications. Training these models typically requires a considerable number of keyword examples. In this work, we consider a scenario where very few keyword examples are available for training. The availability of a limited number of training examples results in a PPM with poorly learnt parameters. We propose an unsupervised online learning algorithm that starts from a poor PPM model and updates the PPM parameters using newly detected samples of the keyword in a corpus under consideration and uses the updated model for further keyword detection. We test our algorithm on eight keywords taken from the TIMIT database, the training set of which, on average, has 469 samples of each keyword. With an initial set of only five samples of a keyword (corresponds to   1% of the total number of samples) followed by the proposed online parameter updating throughout the entire TIMIT train set, the performance on the TIMIT test set using the final model is found to be comparable to that of a PPM trained with all the samples of the respective keyword available from the entire TIMIT train set.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Residual neural networks for speech recognition.\n \n \n \n \n\n\n \n Vydana, H. K.; and Vuppala, A. K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 543-547, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ResidualPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081266,\n  author = {H. K. Vydana and A. K. Vuppala},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Residual neural networks for speech recognition},\n  year = {2017},\n  pages = {543-547},\n  abstract = {Recent developments in deep learning methods have greatly influenced the performances of speech recognition systems. In a Hidden Markov model-Deep neural network (HMM-DNN) based speech recognition system, DNNs have been employed to model senones (context dependent states of HMM), where HMMs capture the temporal relations among senones. Due to the use of more deeper networks significant improvement in the performances has been observed and developing deep learning methods to train more deeper architectures has gained a lot of scientific interest. Optimizing a deeper network is more complex task than to optimize a less deeper network, but recently residual network have exhibited a capability to train a very deep neural network architectures and are not prone to vanishing/exploding gradient problems. In this work, the effectiveness of residual networks have been explored for of speech recognition. Along with the depth of the residual network, the criticality of width of the residual network has also been studied. It has been observed that at higher depth, width of the networks is also a crucial parameter for attaining significant improvements. A 14-hour subset of WSJ corpus is used for training the speech recognition systems, it has been observed that the residual networks have shown much ease in convergence even with a depth much higher than the deep neural network. In this work, using residual networks an absolute reduction of 0.4 in WER error rates (8% reduction in the relative error) is attained compared to the best performing deep neural network.},\n  keywords = {gradient methods;hidden Markov models;learning (artificial intelligence);recurrent neural nets;speech recognition;residual neural networks;deep learning methods;Hidden Markov model-Deep neural network based speech recognition system;deep neural network architectures;WSJ corpus;convergence;WER error rates;Speech recognition;Hidden Markov models;Neural networks;Training;Speech;Error analysis},\n  doi = {10.23919/EUSIPCO.2017.8081266},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347223.pdf},\n}\n\n
\n
\n\n\n
\n Recent developments in deep learning methods have greatly influenced the performances of speech recognition systems. In a Hidden Markov model-Deep neural network (HMM-DNN) based speech recognition system, DNNs have been employed to model senones (context dependent states of HMM), where HMMs capture the temporal relations among senones. Due to the use of more deeper networks significant improvement in the performances has been observed and developing deep learning methods to train more deeper architectures has gained a lot of scientific interest. Optimizing a deeper network is more complex task than to optimize a less deeper network, but recently residual network have exhibited a capability to train a very deep neural network architectures and are not prone to vanishing/exploding gradient problems. In this work, the effectiveness of residual networks have been explored for of speech recognition. Along with the depth of the residual network, the criticality of width of the residual network has also been studied. It has been observed that at higher depth, width of the networks is also a crucial parameter for attaining significant improvements. A 14-hour subset of WSJ corpus is used for training the speech recognition systems, it has been observed that the residual networks have shown much ease in convergence even with a depth much higher than the deep neural network. In this work, using residual networks an absolute reduction of 0.4 in WER error rates (8% reduction in the relative error) is attained compared to the best performing deep neural network.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A curriculum learning method for improved noise robustness in automatic speech recognition.\n \n \n \n \n\n\n \n Braun, S.; Neil, D.; and Liu, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 548-552, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081267,\n  author = {S. Braun and D. Neil and S. Liu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A curriculum learning method for improved noise robustness in automatic speech recognition},\n  year = {2017},\n  pages = {548-552},\n  abstract = {The performance of automatic speech recognition systems under noisy environments still leaves room for improvement. Speech enhancement or feature enhancement techniques for increasing noise robustness of these systems usually add components to the recognition system that need careful optimization. In this work, we propose the use of a relatively simple curriculum training strategy called accordion annealing (ACCAN). It uses a multi-stage training schedule where samples at signal-to-noise ratio (SNR) values as low as 0dB are first added and samples at increasing higher SNR values are gradually added up to an SNR value of 50dB. We also use a method called per-epoch noise mixing (PEM) that generates noisy training samples online during training and thus enables dynamically changing the SNR of our training data. Both the ACCAN and the PEM methods are evaluated on a end-to-end speech recognition pipeline on the Wall Street Journal corpus. ACCAN decreases the average word error rate (WER) on the 20dB to -10dB SNR range by up to 31.4% when compared to a conventional multi-condition training method.},\n  keywords = {learning (artificial intelligence);optimisation;speech enhancement;speech recognition;noisy training samples;training data;ACCAN;PEM methods;end-to-end speech recognition pipeline;curriculum learning method;improved noise robustness;automatic speech recognition systems;feature enhancement techniques;accordion annealing;multistage training schedule;signal-to-noise ratio;SNR value;per-epoch noise mixing;speech enhancement;curriculum training strategy;word error rate;noise figure 0.0 dB;noise figure 50.0 dB;noise figure 20.0 dB to -10 dB;Training;Signal to noise ratio;Noise robustness;Training data;Noise measurement;Feature extraction;Neural networks},\n  doi = {10.23919/EUSIPCO.2017.8081267},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341635.pdf},\n}\n\n
\n
\n\n\n
\n The performance of automatic speech recognition systems under noisy environments still leaves room for improvement. Speech enhancement or feature enhancement techniques for increasing noise robustness of these systems usually add components to the recognition system that need careful optimization. In this work, we propose the use of a relatively simple curriculum training strategy called accordion annealing (ACCAN). It uses a multi-stage training schedule where samples at signal-to-noise ratio (SNR) values as low as 0dB are first added and samples at increasing higher SNR values are gradually added up to an SNR value of 50dB. We also use a method called per-epoch noise mixing (PEM) that generates noisy training samples online during training and thus enables dynamically changing the SNR of our training data. Both the ACCAN and the PEM methods are evaluated on a end-to-end speech recognition pipeline on the Wall Street Journal corpus. ACCAN decreases the average word error rate (WER) on the 20dB to -10dB SNR range by up to 31.4% when compared to a conventional multi-condition training method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A reassigned front-end for speech recognition.\n \n \n \n \n\n\n \n Tryfou, G.; and Omologo, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 553-557, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081268,\n  author = {G. Tryfou and M. Omologo},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A reassigned front-end for speech recognition},\n  year = {2017},\n  pages = {553-557},\n  abstract = {This paper introduces the use of the TFRCC features, a time-frequency reassigned feature set, as a front-end for speech recognition. Compared to the power spectrogram, the time-frequency reassigned version is particularly helpful in describing simultaneously the temporal and spectral features of speech signals, as it offers an improved visualization of the various components. This powerful attribute is exploited from the cepstral reassigned features, which are incorporated in a state-of-the-art speech recognizer. Experimental activities investigate the proposed features in various scenarios, starting from recognition of close-talk signals and gradually increasing the complexity of the task. The results prove the superiority of these features compared to a MFCC baseline.},\n  keywords = {feature extraction;speech recognition;time-frequency analysis;speech signals;improved visualization;cepstral reassigned features;state-of-the-art speech recognizer;reassigned front-end;speech recognition;TFRCC features;power spectrogram;time-frequency reassigned version;spectral features;Time-frequency analysis;Speech;Feature extraction;Acoustics;Speech recognition;Hidden Markov models;Spectrogram},\n  doi = {10.23919/EUSIPCO.2017.8081268},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347107.pdf},\n}\n\n
\n
\n\n\n
\n This paper introduces the use of the TFRCC features, a time-frequency reassigned feature set, as a front-end for speech recognition. Compared to the power spectrogram, the time-frequency reassigned version is particularly helpful in describing simultaneously the temporal and spectral features of speech signals, as it offers an improved visualization of the various components. This powerful attribute is exploited from the cepstral reassigned features, which are incorporated in a state-of-the-art speech recognizer. Experimental activities investigate the proposed features in various scenarios, starting from recognition of close-talk signals and gradually increasing the complexity of the task. The results prove the superiority of these features compared to a MFCC baseline.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the improvement of modulation features using multi-microphone energy tracking for robust distant speech recognition.\n \n \n \n \n\n\n \n Rodomagoulakis, I.; and Maragos, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 558-562, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081269,\n  author = {I. Rodomagoulakis and P. Maragos},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {On the improvement of modulation features using multi-microphone energy tracking for robust distant speech recognition},\n  year = {2017},\n  pages = {558-562},\n  abstract = {In this work, we investigate robust speech energy estimation and tracking schemes aiming at improved energy-based multiband speech demodulation and feature extraction for multi-microphone distant speech recognition. Based on the spatial diversity of the speech and noise recordings of a multi-microphone setup, the proposed Multichannel, Multiband Demodulation (MMD) scheme includes: 1) energy selection across the microphones that are less affected by noise and 2) cross-signal energy estimation based on the cross-Teager energy operator. Instantaneous modulations of speech resonances are estimated on the denoised energies. Second-order frequency modulation features are measured and combined with MFCCs achieving improved distant speech recognition on simulated and real data recorded in noisy and reverberant domestic environments.},\n  keywords = {array signal processing;demodulation;feature extraction;microphone arrays;speech recognition;feature extraction;multimicrophone distant speech recognition;cross-signal energy estimation;cross-Teager energy operator;second-order frequency modulation features;multimicrophone energy tracking;robust distant speech recognition;robust speech energy estimation;energy based multiband speech demodulation;speech resonances estimation;energy denoising;Speech;Feature extraction;Microphones;Noise measurement;Demodulation;Robustness},\n  doi = {10.23919/EUSIPCO.2017.8081269},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347597.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we investigate robust speech energy estimation and tracking schemes aiming at improved energy-based multiband speech demodulation and feature extraction for multi-microphone distant speech recognition. Based on the spatial diversity of the speech and noise recordings of a multi-microphone setup, the proposed Multichannel, Multiband Demodulation (MMD) scheme includes: 1) energy selection across the microphones that are less affected by noise and 2) cross-signal energy estimation based on the cross-Teager energy operator. Instantaneous modulations of speech resonances are estimated on the denoised energies. Second-order frequency modulation features are measured and combined with MFCCs achieving improved distant speech recognition on simulated and real data recorded in noisy and reverberant domestic environments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n VTLN-warped Gaussian posteriorgram for QbE-STD.\n \n \n \n \n\n\n \n Madhavi, M. C.; and Patil, H. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 563-567, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"VTLN-warpedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081270,\n  author = {M. C. Madhavi and H. A. Patil},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {VTLN-warped Gaussian posteriorgram for QbE-STD},\n  year = {2017},\n  pages = {563-567},\n  abstract = {Vocal Tract Length Normalization (VTLN) is a very important speaker normalization technique for speech recognition tasks. In this paper, we propose the use of Gaussian posteriorgram of VTLN-warped spectral features for a Query-by-Example Spoken Term Detection (QbE-STD). This paper presents the use of a Gaussian Mixture Model (GMM) framework for estimation of VTLN warping factor. This GMM framework does not require phoneme-level transcription and hence, it can be useful for unsupervised tasks. We propose the iterative approach for VTLN warping factor estimation with two GMM training approaches, namely, Expectation-Maximization (EM) and Deterministic Annealing-Expectation Maximization (DAEM). The VTLN-warped Gaussian posteriorgram gave the better QbE-STD performance. The performance of TIMIT QbE-STD was investigated with different evaluation factors, such as a number of Gaussian components in GMM, various local constraints, and a number of iterations in VTLN warping factor estimation. VTLN-warped Gaussian posteriorgram reduces the speaker-specific variation in Gaussian posteriorgram and hence, it is expected to give better performance than Gaussian posteriorgram.},\n  keywords = {expectation-maximisation algorithm;query processing;speaker recognition;speech recognition;voice activity detection;Gaussian posteriorgram;VTLN warping factor estimation;TIMIT QbE-STD;Gaussian components;vocal tract length normalization;query-by-example spoken term detection;GMM;QbE-STD;speaker normalization technique;Gaussian mixture model framework;Hidden Markov models;Feature extraction;Computational modeling;Training;Maximum likelihood estimation},\n  doi = {10.23919/EUSIPCO.2017.8081270},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347463.pdf},\n}\n\n
\n
\n\n\n
\n Vocal Tract Length Normalization (VTLN) is a very important speaker normalization technique for speech recognition tasks. In this paper, we propose the use of Gaussian posteriorgram of VTLN-warped spectral features for a Query-by-Example Spoken Term Detection (QbE-STD). This paper presents the use of a Gaussian Mixture Model (GMM) framework for estimation of VTLN warping factor. This GMM framework does not require phoneme-level transcription and hence, it can be useful for unsupervised tasks. We propose the iterative approach for VTLN warping factor estimation with two GMM training approaches, namely, Expectation-Maximization (EM) and Deterministic Annealing-Expectation Maximization (DAEM). The VTLN-warped Gaussian posteriorgram gave the better QbE-STD performance. The performance of TIMIT QbE-STD was investigated with different evaluation factors, such as a number of Gaussian components in GMM, various local constraints, and a number of iterations in VTLN warping factor estimation. VTLN-warped Gaussian posteriorgram reduces the speaker-specific variation in Gaussian posteriorgram and hence, it is expected to give better performance than Gaussian posteriorgram.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Context incorporation using context — aware language features.\n \n \n \n\n\n \n Vlachostergiou, A.; Marandianos, G.; and Kollias, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 568-572, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081271,\n  author = {A. Vlachostergiou and G. Marandianos and S. Kollias},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Context incorporation using context — aware language features},\n  year = {2017},\n  pages = {568-572},\n  abstract = {This paper investigates the problem of context incorporation into human language systems and particular in Sentiment Analysis (SA) systems. So far, the analysis of how different features, when incorporated into such systems, improve their performance, has been discussed in a number of studies. However, a complete picture of their effectiveness remains unexplored. With this work, we attempt to extend the pool of the context - aware language features at the sentence level and to provide the foundations for a concise analysis of the importance of the various types of contextual features, using data from two different in type and size datasets: the Movie Review Dataset (MR) and the Finegrained Sentiment Dataset (FSD).},\n  keywords = {data analysis;sentiment analysis;social networking (online);ubiquitous computing;context incorporation;human language systems;Sentiment Analysis systems;concise analysis;contextual features;context aware language features;SA systems;Movie Review Dataset;MR dataset;Finegrained Sentiment Dataset;FSD;Motion pictures;Feature extraction;Sentiment analysis;Europe;Signal processing;Frequency measurement;Size measurement;Human language technology;Sentiment Analysis;Context — aware language features;CRF;discourse RST},\n  doi = {10.23919/EUSIPCO.2017.8081271},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n This paper investigates the problem of context incorporation into human language systems and particular in Sentiment Analysis (SA) systems. So far, the analysis of how different features, when incorporated into such systems, improve their performance, has been discussed in a number of studies. However, a complete picture of their effectiveness remains unexplored. With this work, we attempt to extend the pool of the context - aware language features at the sentence level and to provide the foundations for a concise analysis of the importance of the various types of contextual features, using data from two different in type and size datasets: the Movie Review Dataset (MR) and the Finegrained Sentiment Dataset (FSD).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Effectiveness of ideal ratio mask for non-intrusive quality assessment of noise suppressed speech.\n \n \n \n \n\n\n \n Soni, M. H.; and Patil, H. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 573-577, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"EffectivenessPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081272,\n  author = {M. H. Soni and H. A. Patil},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Effectiveness of ideal ratio mask for non-intrusive quality assessment of noise suppressed speech},\n  year = {2017},\n  pages = {573-577},\n  abstract = {The Ideal Ratio Mask (IRM) has proven to be very effective tool in many applications such as speech segregation, speech enhancement for hearing aid design and noise robust speech recognition tasks. The IRM provides information regarding the amount of signal power at each Time-Frequency (T-F) unit in a given signal-plus-noise mixture. In this paper, we propose to use the IRM for non-intrusive quality assessment of noise suppressed speech. Since the quality of noise suppressed speech is dependent on the residual noise present in speech, IRM can be extremely useful for its quality assessment. The quality assessment problem is posed as a regression problem and the mapping between statistics of acoustic features, namely, Mel Filterbank Energies (FBEs) plus IRM features and the subjective score of the corresponding utterances was found using single-layer Artificial Neural Network (ANN). The results of our experiments suggest that by using the mean of FBEs and IRM features as the input, the quality prediction accuracy was significantly increased.},\n  keywords = {acoustic noise;acoustic signal processing;channel bank filters;feature extraction;neural nets;regression analysis;speech enhancement;time-frequency analysis;signal-plus-noise mixture;time-frequency unit;T-F unit;acoustic features;mel filterbank energies;FBE;artificial neural network;ANN;regression problem;nonintrusive quality assessment;ideal ratio mask;quality assessment problem;IRM;residual noise;noise suppressed speech;Speech;Feature extraction;Quality assessment;Noise measurement;Acoustics;Reactive power},\n  doi = {10.23919/EUSIPCO.2017.8081272},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570339953.pdf},\n}\n\n
\n
\n\n\n
\n The Ideal Ratio Mask (IRM) has proven to be very effective tool in many applications such as speech segregation, speech enhancement for hearing aid design and noise robust speech recognition tasks. The IRM provides information regarding the amount of signal power at each Time-Frequency (T-F) unit in a given signal-plus-noise mixture. In this paper, we propose to use the IRM for non-intrusive quality assessment of noise suppressed speech. Since the quality of noise suppressed speech is dependent on the residual noise present in speech, IRM can be extremely useful for its quality assessment. The quality assessment problem is posed as a regression problem and the mapping between statistics of acoustic features, namely, Mel Filterbank Energies (FBEs) plus IRM features and the subjective score of the corresponding utterances was found using single-layer Artificial Neural Network (ANN). The results of our experiments suggest that by using the mean of FBEs and IRM features as the input, the quality prediction accuracy was significantly increased.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Perception and production of L2 Mandarin tones by Swedish learners.\n \n \n \n \n\n\n \n Nagano-Madsen, Y.; and Wan, X.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 578-582, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PerceptionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081273,\n  author = {Y. Nagano-Madsen and X. Wan},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Perception and production of L2 Mandarin tones by Swedish learners},\n  year = {2017},\n  pages = {578-582},\n  abstract = {This study presents the results of perception and production of L2 Mandarin tones in mono- and di-syllabic words by Swedish learners at the beginner level. Although studies of perception and production on Mandarin tones are many, those by speakers of lexical-pitch accent language such as Swedish are still very limited. The result reveals both discrepancy and agreement between perception and production. Swedish learners perform best in discriminating a level tone (T1) from contour tones (T2, T3, T4) both in perception and production. Discrepancy between perception and production was noted for T3. In perception, the identification of T3 was second best after the level tone (T1), but the production of T3 was found to be difficult.},\n  keywords = {linguistics;natural language processing;natural languages;perception;L2 Mandarin tones;Swedish learners;level tone;contour tones;monosyllabic words;disyllabic words;lexical-pitch accent language;identification;Production;Europe;Registers;Signal processing;Acoustics;Pragmatics;Oral communication;perception;production;F0 manifestatio;L2 Mandarin tones;lexical-pitch accent},\n  doi = {10.23919/EUSIPCO.2017.8081273},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347554.pdf},\n}\n\n
\n
\n\n\n
\n This study presents the results of perception and production of L2 Mandarin tones in mono- and di-syllabic words by Swedish learners at the beginner level. Although studies of perception and production on Mandarin tones are many, those by speakers of lexical-pitch accent language such as Swedish are still very limited. The result reveals both discrepancy and agreement between perception and production. Swedish learners perform best in discriminating a level tone (T1) from contour tones (T2, T3, T4) both in perception and production. Discrepancy between perception and production was noted for T3. In perception, the identification of T3 was second best after the level tone (T1), but the production of T3 was found to be difficult.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Decorrelation measures for stabilizing adaptive feedback cancellation in hearing aids.\n \n \n \n \n\n\n \n Puder, H.; and Strasser, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 583-587, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DecorrelationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081274,\n  author = {H. Puder and F. Strasser},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Decorrelation measures for stabilizing adaptive feedback cancellation in hearing aids},\n  year = {2017},\n  pages = {583-587},\n  abstract = {In this contribution we describe an adaptive feed-back cancellation (FBC) system realized with 48 sub-band filters. As core procedure we propose a combination of two decorrelation measures to stabilize and optimally control the adaptation. We show that especially this combination of pre-whitening and frequency shift allows realizing three major steps for a fast and reliable FBC in real hearing aids. First, the adaptation bias is removed. Second, an optimal adaptation control can be realized, and third, we show that a differentiation between feedback and tonal input signals is possible. The latter can be used for an additional improvement of the adaptation control.},\n  keywords = {acoustic signal processing;adaptive control;adaptive filters;decorrelation;feedback;hearing aids;medical computing;optimal control;signal denoising;decorrelation measures;hearing aids;adaptive feed-back cancellation system;sub-band filters;pre-whitening;frequency shift;fast FBC;reliable FBC;optimal adaptation control;Decorrelation;Hearing aids;Frequency measurement;Stability analysis;Correlation;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081274},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341766.pdf},\n}\n\n
\n
\n\n\n
\n In this contribution we describe an adaptive feed-back cancellation (FBC) system realized with 48 sub-band filters. As core procedure we propose a combination of two decorrelation measures to stabilize and optimally control the adaptation. We show that especially this combination of pre-whitening and frequency shift allows realizing three major steps for a fast and reliable FBC in real hearing aids. First, the adaptation bias is removed. Second, an optimal adaptation control can be realized, and third, we show that a differentiation between feedback and tonal input signals is possible. The latter can be used for an additional improvement of the adaptation control.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Real time noise suppression in social settings comprising a mixture of non-stationary anc transient noise.\n \n \n \n\n\n \n Yong, P. C.; and Nordholm, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 588-592, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081275,\n  author = {P. C. Yong and S. Nordholm},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Real time noise suppression in social settings comprising a mixture of non-stationary anc transient noise},\n  year = {2017},\n  pages = {588-592},\n  abstract = {Hearable is a recently emerging term that describes a wireless earpiece that enhances the user's listening experience in various acoustic environment. Another important feature of hearable devices is their capability to improve speech communication in difficult social settings, which usually consist of a mixture of different non-stationary noise. In this paper, we present techniques to suppress a combination of non-stationary noise and transient noise. This is achieved by employing a combined noise suppression filter based on prediction and masking to achieve impulsive noise suppression. Experimental results highlight the robustness of the proposed algorithm in suppressing the transient noise while maintaining the speech components, without requiring any prior information of the noise.},\n  keywords = {filtering theory;impulse noise;interference suppression;signal denoising;speech enhancement;speech processing;speech recognition;transient noise suppression;speech communication improvement;acoustic environment;nonstationary noise suppression;social settings;wireless earpiece;impulsive noise suppression;combined noise suppression filter;hearable devices;Transient analysis;Speech;Speech enhancement;Noise measurement;Time-frequency analysis;Signal to noise ratio;Estimation},\n  doi = {10.23919/EUSIPCO.2017.8081275},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Hearable is a recently emerging term that describes a wireless earpiece that enhances the user's listening experience in various acoustic environment. Another important feature of hearable devices is their capability to improve speech communication in difficult social settings, which usually consist of a mixture of different non-stationary noise. In this paper, we present techniques to suppress a combination of non-stationary noise and transient noise. This is achieved by employing a combined noise suppression filter based on prediction and masking to achieve impulsive noise suppression. Experimental results highlight the robustness of the proposed algorithm in suppressing the transient noise while maintaining the speech components, without requiring any prior information of the noise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Time-domain Kalman filter for active noise cancellation headphones.\n \n \n \n \n\n\n \n Liebich, S.; Fabry, J.; Jax, P.; and Vary, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 593-597, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Time-domainPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081276,\n  author = {S. Liebich and J. Fabry and P. Jax and P. Vary},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Time-domain Kalman filter for active noise cancellation headphones},\n  year = {2017},\n  pages = {593-597},\n  abstract = {Noise pollution has a large negative influence on the health of humans, especially in case of long-term exposure. Various passive hearing protection approaches are available. However, they often lack good protection against low frequency noise. For these applications, the principle of Active Noise Cancellation (ANC) offers a promising supplement. It relies on anti-phase compensation of the noise signal. Within the area of ANC, only few publications deal with the Kalman filter approach. The state-of-the-art in literature is briefly reviewed. The algorithm presented in this contribution is inspired by the time-domain Kalman filter. The Kalman filter has the favorable property of fast convergence as well as good tracking properties. Especially the tracking of time-varying noise conditions is often a drawback of least-mean-square (LMS) and recursive-least-square (RLS) approaches. The proposed algorithm uses the Kalman equations which are extended by online model parameter estimation based on observable signals. This results in faster convergence and higher robustness against dynamically changing noise conditions. The performance of the algorithm is evaluated by means of convergence, tracking and stability with measured acoustic paths from a real-time system.},\n  keywords = {active noise control;headphones;hearing;Kalman filters;least mean squares methods;least squares approximations;parameter estimation;time-domain Kalman filter;active noise cancellation headphones;passive hearing protection approaches;low frequency noise;ANC;anti-phase compensation;noise signal;Kalman filter approach;time-varying noise conditions;Kalman equations;dynamically changing noise conditions;real-time system;noise pollution;least-mean-square approaches;recursive-least-square approaches;online model parameter esitmation;Kalman filters;Estimation;Convergence;Noise measurement;Signal processing algorithms;Headphones;Mathematical model},\n  doi = {10.23919/EUSIPCO.2017.8081276},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346966.pdf},\n}\n\n
\n
\n\n\n
\n Noise pollution has a large negative influence on the health of humans, especially in case of long-term exposure. Various passive hearing protection approaches are available. However, they often lack good protection against low frequency noise. For these applications, the principle of Active Noise Cancellation (ANC) offers a promising supplement. It relies on anti-phase compensation of the noise signal. Within the area of ANC, only few publications deal with the Kalman filter approach. The state-of-the-art in literature is briefly reviewed. The algorithm presented in this contribution is inspired by the time-domain Kalman filter. The Kalman filter has the favorable property of fast convergence as well as good tracking properties. Especially the tracking of time-varying noise conditions is often a drawback of least-mean-square (LMS) and recursive-least-square (RLS) approaches. The proposed algorithm uses the Kalman equations which are extended by online model parameter estimation based on observable signals. This results in faster convergence and higher robustness against dynamically changing noise conditions. The performance of the algorithm is evaluated by means of convergence, tracking and stability with measured acoustic paths from a real-time system.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Binaural speech enhancement with spatial cue preservation utilising simultaneous masking.\n \n \n \n \n\n\n \n Koutrouvelis, A. I.; Jensen, J.; Guo, M.; Hendriks, R. C.; and Heusdens, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 598-602, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BinauralPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081277,\n  author = {A. I. Koutrouvelis and J. Jensen and M. Guo and R. C. Hendriks and R. Heusdens},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Binaural speech enhancement with spatial cue preservation utilising simultaneous masking},\n  year = {2017},\n  pages = {598-602},\n  abstract = {Binaural multi-microphone noise reduction methods aim at noise suppression while preserving the spatial impression of the acoustic scene. Recently, a new binaural speech enhancement method was proposed which chooses per time-frequency (TF) tile either the enhanced target or a suppressed noisy version. The selection between the two is based on the input SNR per TF tile. In this paper we modify this method such that the selection mechanism is based on the output SNR. The proposed modification of deciding which TF tile is target-or noise-dominated leads to choices, which are better aligned with simultaneous masking properties of the auditory system, and, hence, improves the performance over the initial version of the algorithm.},\n  keywords = {acoustic signal processing;microphones;signal denoising;speech enhancement;time-frequency analysis;binaural multimicrophone noise reduction methods;noise suppression;spatial impression;acoustic scene;binaural speech enhancement method;time-frequency tile;spatial cue preservation;simultaneous masking;Microphones;Noise measurement;Noise reduction;Time-frequency analysis;Signal to noise ratio;Acoustic distortion;Binaural hearing aids;noise reduction;simultaneous masking},\n  doi = {10.23919/EUSIPCO.2017.8081277},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347090.pdf},\n}\n\n
\n
\n\n\n
\n Binaural multi-microphone noise reduction methods aim at noise suppression while preserving the spatial impression of the acoustic scene. Recently, a new binaural speech enhancement method was proposed which chooses per time-frequency (TF) tile either the enhanced target or a suppressed noisy version. The selection between the two is based on the input SNR per TF tile. In this paper we modify this method such that the selection mechanism is based on the output SNR. The proposed modification of deciding which TF tile is target-or noise-dominated leads to choices, which are better aligned with simultaneous masking properties of the auditory system, and, hence, improves the performance over the initial version of the algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sensitivity analysis of the multi-frame MVDR filter for single-microphone speech enhancement.\n \n \n \n \n\n\n \n Fischer, D.; and Doclo, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 603-607, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SensitivityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081278,\n  author = {D. Fischer and S. Doclo},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Sensitivity analysis of the multi-frame MVDR filter for single-microphone speech enhancement},\n  year = {2017},\n  pages = {603-607},\n  abstract = {Recently, a multi-frame minimum variance distortionless response (MFMVDR) filter for single-microphone noise reduction has been proposed, which exploits speech correlation across consecutive time frames. It has been shown that the MFMVDR filter achieves impressive results when the speech interframe correlation vector can be accurately estimated. In this paper, we analyze the influence of estimation errors for all required parameters, i.e., the speech interframe correlation vector and the undesired correlation matrix, on the performance of the MFMVDR filter. We compare the performance difference between oracle estimators and practically feasible blind estimators. Experimental results show that even small estimation errors substantially degrade the speech quality, where the most critical parameter is the speech interframe correlation vector.},\n  keywords = {correlation methods;filtering theory;matrix algebra;microphones;sensitivity analysis;speech enhancement;vectors;single-microphone speech enhancement;multiframe minimum variance distortionless response filter;single-microphone noise reduction;speech correlation;consecutive time frames;MFMVDR filter;speech interframe correlation vector;estimation errors;undesired correlation matrix;practically feasible blind estimators;speech quality;multiframe MVDR filter;Speech;Correlation;Noise measurement;Noise reduction;Estimation error;Speech processing;Time-frequency analysis},\n  doi = {10.23919/EUSIPCO.2017.8081278},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347585.pdf},\n}\n\n
\n
\n\n\n
\n Recently, a multi-frame minimum variance distortionless response (MFMVDR) filter for single-microphone noise reduction has been proposed, which exploits speech correlation across consecutive time frames. It has been shown that the MFMVDR filter achieves impressive results when the speech interframe correlation vector can be accurately estimated. In this paper, we analyze the influence of estimation errors for all required parameters, i.e., the speech interframe correlation vector and the undesired correlation matrix, on the performance of the MFMVDR filter. We compare the performance difference between oracle estimators and practically feasible blind estimators. Experimental results show that even small estimation errors substantially degrade the speech quality, where the most critical parameter is the speech interframe correlation vector.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Real-time adaptive equalization for headphone listening.\n \n \n \n \n\n\n \n Liski, J.; Välimäki, V.; Vesa, S.; and Väänänen, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 608-612, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Real-timePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081279,\n  author = {J. Liski and V. Välimäki and S. Vesa and R. Väänänen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Real-time adaptive equalization for headphone listening},\n  year = {2017},\n  pages = {608-612},\n  abstract = {The experienced sound quality produced by headphones varies between individuals. Especially with insert headphones a constant equalization may not work properly, when a specific perceived frequency response is desired. Instead, adaptive individualized equalization can be used. Previously this required multiple sensors in a headphone earpiece. This paper proposes a signal processing algorithm for continuous on-line equalization of a headset with a single microphone. The magnitude response of the headphones is estimated using arbitrary reproduced sounds. Then, the headphone response is equalized to a user-selected target response with a graphical equalizer. Measurements show that the proposed algorithm produces accurate estimates with different sound materials and the equalization produces results that closely match the target response. The algorithm can be implemented for multiple applications to obtain accurate and quick personalization, since the target response can be set arbitrarily.},\n  keywords = {acoustic signal processing;adaptive equalisers;audio signal processing;frequency response;headphones;microphones;headphone listening;experienced sound quality;adaptive individualized equalization;required multiple sensors;headphone earpiece;signal processing algorithm;on-line equalization;magnitude response;arbitrary reproduced sounds;headphone response;target response;graphical equalizer;real-time adaptive equalization;frequency response;sound materials;Headphones;Signal processing algorithms;Ear;Irrigation;Microphones;Frequency response;Prototypes},\n  doi = {10.23919/EUSIPCO.2017.8081279},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347830.pdf},\n}\n\n
\n
\n\n\n
\n The experienced sound quality produced by headphones varies between individuals. Especially with insert headphones a constant equalization may not work properly, when a specific perceived frequency response is desired. Instead, adaptive individualized equalization can be used. Previously this required multiple sensors in a headphone earpiece. This paper proposes a signal processing algorithm for continuous on-line equalization of a headset with a single microphone. The magnitude response of the headphones is estimated using arbitrary reproduced sounds. Then, the headphone response is equalized to a user-selected target response with a graphical equalizer. Measurements show that the proposed algorithm produces accurate estimates with different sound materials and the equalization produces results that closely match the target response. The algorithm can be implemented for multiple applications to obtain accurate and quick personalization, since the target response can be set arbitrarily.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Throughput-based performance evaluation of 5G-candidate waveforms in high speed scenarios.\n \n \n \n \n\n\n \n Domínguez-Bolaño, T.; Rodríguez-Pifieiro, J.; García-Naya, J. A.; and Castedo, L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 613-617, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Throughput-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081280,\n  author = {T. Domínguez-Bolaño and J. Rodríguez-Pifieiro and J. A. García-Naya and L. Castedo},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Throughput-based performance evaluation of 5G-candidate waveforms in high speed scenarios},\n  year = {2017},\n  pages = {613-617},\n  abstract = {The radio access technology for railway communications is expected to migrate from GSM for Railways (GSM-R) to a more suitable generation of communication systems for the services offered nowadays, like the fourth generation (4G) or the fifth generation (5G). Recently, considerable attention has been devoted to high-speed trains since this particular environment poses challenging problems in terms of performance simulation and measurement. In order to considerably decrease the cost and complexity of high-speed measurement campaigns, we proposed in the past a technique to induce effects caused by highly-time varying channels on multicarrier signals while conducting measurements at low speeds. This technique has been proved to be accurate for Orthogonal Frequency-Division Multiplexing (OFDM) signals, as well as for the waveforms proposed for 5G systems, such as Filter Bank Multicarrier (FBMC). In this work, we employ the technique to estimate experimentally the throughput of modulation schemes proposed for 5G (Cyclic Prefix Orthogonal Frequency-Division Multiplexing (CP-OFDM) and FBMC) at high speeds.},\n  keywords = {5G mobile communication;channel bank filters;OFDM modulation;radio access networks;railway communication;time-varying channels;radio access technology;railway communications;GSM-R;high-speed trains;orthogonal frequency-division multiplexing signals;filter bank multicarrier;cyclic prefix orthogonal frequency-division multiplexing;CP-OFDM;Signal to noise ratio;Encoding;Throughput;Gain;Time measurement;OFDM;Velocity measurement},\n  doi = {10.23919/EUSIPCO.2017.8081280},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570344790.pdf},\n}\n\n
\n
\n\n\n
\n The radio access technology for railway communications is expected to migrate from GSM for Railways (GSM-R) to a more suitable generation of communication systems for the services offered nowadays, like the fourth generation (4G) or the fifth generation (5G). Recently, considerable attention has been devoted to high-speed trains since this particular environment poses challenging problems in terms of performance simulation and measurement. In order to considerably decrease the cost and complexity of high-speed measurement campaigns, we proposed in the past a technique to induce effects caused by highly-time varying channels on multicarrier signals while conducting measurements at low speeds. This technique has been proved to be accurate for Orthogonal Frequency-Division Multiplexing (OFDM) signals, as well as for the waveforms proposed for 5G systems, such as Filter Bank Multicarrier (FBMC). In this work, we employ the technique to estimate experimentally the throughput of modulation schemes proposed for 5G (Cyclic Prefix Orthogonal Frequency-Division Multiplexing (CP-OFDM) and FBMC) at high speeds.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analysis of the performance of a non-coherent large scale SIMO system based on M-DPSK under Rician fading.\n \n \n \n \n\n\n \n Baeza, V. M.; and Armada, A. G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 618-622, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnalysisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081281,\n  author = {V. M. Baeza and A. G. Armada},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Analysis of the performance of a non-coherent large scale SIMO system based on M-DPSK under Rician fading},\n  year = {2017},\n  pages = {618-622},\n  abstract = {In this paper we extend the analysis of the performance of a non-coherent large-scale single input multiple output (LS-SIMO) uplink system based on M-DPSK to consider Rician-K fading channel conditions. The interference is analyzed for a generalized k-factor and we provide analytical expressions of the signal to interference plus noise ratio (SINR) for a single user. We demonstrate that for Rician fading our proposed system is independent of the channel statistics, which simplifies the receiver design. The performance is evaluated in terms of the required number of antennas and the error probability. Finally, we provide numerical results showing that our proposal require a lower number of receive antennas to achieve a given error probability than other non-coherent benchmark schemes available in the literature. As the results show an increase in the spectral efficiency using DPSK modulation combined with NC LS-SIMO, this makes it a good candidate for 5G and beyond.},\n  keywords = {differential phase shift keying;error statistics;receiving antennas;Rician channels;M-DPSK;Rician fading;large-scale single input multiple output;noise ratio;receiver design;receive antennas;DPSK modulation;NC LS-SIMO;SIMO system;error probability;Rician-K fading channel;Signal to noise ratio;Interference;Receiving antennas;Rician channels;Differential phase shift keying;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081281},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346299.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we extend the analysis of the performance of a non-coherent large-scale single input multiple output (LS-SIMO) uplink system based on M-DPSK to consider Rician-K fading channel conditions. The interference is analyzed for a generalized k-factor and we provide analytical expressions of the signal to interference plus noise ratio (SINR) for a single user. We demonstrate that for Rician fading our proposed system is independent of the channel statistics, which simplifies the receiver design. The performance is evaluated in terms of the required number of antennas and the error probability. Finally, we provide numerical results showing that our proposal require a lower number of receive antennas to achieve a given error probability than other non-coherent benchmark schemes available in the literature. As the results show an increase in the spectral efficiency using DPSK modulation combined with NC LS-SIMO, this makes it a good candidate for 5G and beyond.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A mutual coupling model for massive MIMO applied to the 3GPP 3D channel model.\n \n \n \n \n\n\n \n Pratschner, S.; Caban, S.; Schwarz, S.; and Rupp, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 623-627, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081282,\n  author = {S. Pratschner and S. Caban and S. Schwarz and M. Rupp},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A mutual coupling model for massive MIMO applied to the 3GPP 3D channel model},\n  year = {2017},\n  pages = {623-627},\n  abstract = {Massive Multiple-Input Multiple-Output (MIMO) has become one of the key technologies for future mobile communication systems. Although there is a variety of potential benefits, unresolved implementation issues of massive MIMO are manifold. In this work, we consider the issue of mutual coupling in large antenna arrays. A known matrix model that describes array coupling effects is considered. This coupling model is augmented by a matching network in order to provide a universal coupling model that is applicable to any channel model. Impact of array coupling is then shown by applying the coupling matrix on the 3rd Generation Partnership Project 3D channel model. We show that the matching network leads to decorrelation and significantly reduces capacity losses due to mutual coupling in the context of massive MIMO.},\n  keywords = {antenna arrays;matrix algebra;MIMO communication;mutual coupling model;massive MIMO;Massive Multiple-Input Multiple-Output;future mobile communication systems;unresolved implementation issues;antenna arrays;array coupling effects;matching network;universal coupling model;coupling matrix;3rd Generation Partnership Project 3D;3GPP 3D channel model;matrix model;Antenna arrays;Couplings;Mutual coupling;Impedance;MIMO;Channel models;massive MIMO;mutual coupling;coupling model;large arrays},\n  doi = {10.23919/EUSIPCO.2017.8081282},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346708.pdf},\n}\n\n
\n
\n\n\n
\n Massive Multiple-Input Multiple-Output (MIMO) has become one of the key technologies for future mobile communication systems. Although there is a variety of potential benefits, unresolved implementation issues of massive MIMO are manifold. In this work, we consider the issue of mutual coupling in large antenna arrays. A known matrix model that describes array coupling effects is considered. This coupling model is augmented by a matching network in order to provide a universal coupling model that is applicable to any channel model. Impact of array coupling is then shown by applying the coupling matrix on the 3rd Generation Partnership Project 3D channel model. We show that the matching network leads to decorrelation and significantly reduces capacity losses due to mutual coupling in the context of massive MIMO.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Covariance estimation with projected data: Applications to CSI covariance acquisition and tracking.\n \n \n \n \n\n\n \n Decurninge, A.; and Guillaud, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 628-632, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CovariancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081283,\n  author = {A. Decurninge and M. Guillaud},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Covariance estimation with projected data: Applications to CSI covariance acquisition and tracking},\n  year = {2017},\n  pages = {628-632},\n  abstract = {We consider the problem of covariance estimation with projected or missing data, and in particular the application to spatial channel covariance estimation in a multi-user Massive MIMO wireless communication system with arbitrary (possibly time-varying and/or non-orthogonal) pilot sequences. We introduce batch and online estimators based on the expectation-maximization (EM) approach, and provide sufficient conditions for their asymptotic (for large sample sizes) unbiasedness. We analyze their application to both uplink and downlink Massive MIMO, and provide numerical performance benchmarks.},\n  keywords = {covariance matrices;expectation-maximisation algorithm;MIMO communication;CSI covariance acquisition;spatial channel covariance estimation;multiuser Massive MIMO wireless communication system;online estimators;expectation-maximization approach;Signal processing algorithms;MIMO;Maximum likelihood estimation;Channel estimation;Correlation;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081283},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347536.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of covariance estimation with projected or missing data, and in particular the application to spatial channel covariance estimation in a multi-user Massive MIMO wireless communication system with arbitrary (possibly time-varying and/or non-orthogonal) pilot sequences. We introduce batch and online estimators based on the expectation-maximization (EM) approach, and provide sufficient conditions for their asymptotic (for large sample sizes) unbiasedness. We analyze their application to both uplink and downlink Massive MIMO, and provide numerical performance benchmarks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sensor-based predictive communication for highly dynamic multi-hop vehicular networks.\n \n \n \n \n\n\n \n Alieiev, R.; Blumenstein, J.; Maršalek, R.; Hehn, T.; Kwoczek, A.; and Kürner, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 633-637, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Sensor-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081284,\n  author = {R. Alieiev and J. Blumenstein and R. Maršalek and T. Hehn and A. Kwoczek and T. Kürner},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Sensor-based predictive communication for highly dynamic multi-hop vehicular networks},\n  year = {2017},\n  pages = {633-637},\n  abstract = {We introduce a sensor-aided predictive algorithm for multi-hop link quality estimation. The proposed concept uses vehicle sensor data to improve a link adaptation and end-to-end path selection for a vehicular multi-hop data transmission. The obtained results show that the proposed concept allows better multi-hop link quality estimation and significantly improves end-to-end transmission characteristics in dynamic vehicular environments.},\n  keywords = {radio networks;vehicular ad hoc networks;multihop vehicular networks;multihop link quality estimation;vehicle sensor data;dynamic vehicular environments;sensor-based predictive communication;sensor-aided predictive algorithm;vehicular multihop data transmission;Spread spectrum communication;Vehicle dynamics;Channel estimation;Delays;Estimation;Decision making;Interference},\n  doi = {10.23919/EUSIPCO.2017.8081284},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347649.pdf},\n}\n\n
\n
\n\n\n
\n We introduce a sensor-aided predictive algorithm for multi-hop link quality estimation. The proposed concept uses vehicle sensor data to improve a link adaptation and end-to-end path selection for a vehicular multi-hop data transmission. The obtained results show that the proposed concept allows better multi-hop link quality estimation and significantly improves end-to-end transmission characteristics in dynamic vehicular environments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A study on the physical layer performance of GFDM for high throughput wireless communication.\n \n \n \n \n\n\n \n Nimr, A.; Zhang, D.; Martinez, A.; and Fettweis, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 638-642, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081285,\n  author = {A. Nimr and D. Zhang and A. Martinez and G. Fettweis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A study on the physical layer performance of GFDM for high throughput wireless communication},\n  year = {2017},\n  pages = {638-642},\n  abstract = {In this paper, we investigate the physical layer (PHY) performance of generalized frequency division multiplexing (GFDM) for high throughput wireless communication. For comparison purposes, orthogonal frequency division multiplexing (OFDM)-based IEEE 802.11ac PHY is used as a benchmark. Harnessing the flexibility of GFDM, we propose a novel configuration, being compliant to the IEEE 802.11ac PHY for data transmission. With that configuration, we can achieve not only lower out-of-band (OOB) emission performance but also higher spectral efficiency. By further deriving the corresponding receiver, the overall GFDM-based PHY implementation is shown to attain better frame error rates (FERs) under various modulation and coding schemes (MCSs). Moreover, at the signal to noise ratios (SNRs) where the target FER of 10% is fulfilled, GFDM can also provide higher throughput than OFDM.},\n  keywords = {error statistics;frequency division multiplexing;OFDM modulation;wireless LAN;physical layer performance;out-of-band emission performance;generalized frequency division multiplexing;orthogonal frequency division multiplexing;high throughput wireless communication;OFDM-based IEEE 802.11ac PHY;GFDM-based PHY implementation;spectral efficiency;frame error rates;modulation and coding schemes;MCS;signal tonoise ratios;OFDM;Payloads;Throughput;Modulation;Europe;Receivers},\n  doi = {10.23919/EUSIPCO.2017.8081285},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347688.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we investigate the physical layer (PHY) performance of generalized frequency division multiplexing (GFDM) for high throughput wireless communication. For comparison purposes, orthogonal frequency division multiplexing (OFDM)-based IEEE 802.11ac PHY is used as a benchmark. Harnessing the flexibility of GFDM, we propose a novel configuration, being compliant to the IEEE 802.11ac PHY for data transmission. With that configuration, we can achieve not only lower out-of-band (OOB) emission performance but also higher spectral efficiency. By further deriving the corresponding receiver, the overall GFDM-based PHY implementation is shown to attain better frame error rates (FERs) under various modulation and coding schemes (MCSs). Moreover, at the signal to noise ratios (SNRs) where the target FER of 10% is fulfilled, GFDM can also provide higher throughput than OFDM.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ACMTF for fusion of multi-modal neuroimaging data and identification of biomarkers.\n \n \n \n \n\n\n \n Acar, E.; Levin-Schwartz, Y.; Calhoun, V. D.; and Adali, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 643-647, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ACMTFPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081286,\n  author = {E. Acar and Y. Levin-Schwartz and V. D. Calhoun and T. Adali},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {ACMTF for fusion of multi-modal neuroimaging data and identification of biomarkers},\n  year = {2017},\n  pages = {643-647},\n  abstract = {Joint analysis of neuroimaging data from multiple modalities has the potential to improve our understanding of brain function since each modality provides complementary information. In this paper, we address the problem of jointly analyzing functional magnetic resonance imaging (fMRI), structural MRI (sMRI) and electroencephalography (EEG) data collected during an auditory oddball (AOD) task with the goal of capturing neural patterns that differ between patients with schizophrenia and healthy controls. Traditionally, fusion methods such as joint independent component analysis (jICA) have been used to jointly analyze such multi-modal neuroimaging data. However, previous jICA analyses typically analyze the EEG signal from a single electrode or concatenate signals from multiple electrodes, thus ignoring the potential multilinear structure of the EEG data, and models the data using a common mixing matrix for both modalities. In this paper, we arrange the multi-channel EEG signals as a third-order tensor with modes: subjects, time samples and electrodes, and jointly analyze the tensor with the fMRI and sMRI data, both in the form of subjects by voxels matrices, using a structure-revealing coupled matrix and tensor factorization (CMTF) model. Through this modeling approach, we (i) exploit the multilinear structure of multi-channel EEG data and (ii) capture weights for components indicative of the level of contribution from each modality. We compare the results of the structure-revealing CMTF model with those of jICA and demonstrate that, while both models capture significant distinguishing patterns between patients and controls, the structure-revealing CMTF model provides more robust activation.},\n  keywords = {biomedical electrodes;biomedical MRI;electroencephalography;image fusion;independent component analysis;matrix decomposition;medical image processing;neurophysiology;physiological models;functional magnetic resonance imaging;auditory oddball task;joint independent component analysis;multiple electrodes;multichannel EEG signals;structure-revealing CMTF model;structure-revealing coupled matrix and tensor factorization model;multimodal neuroimaging data fusion;brain function;structural MRI;electroencephalography data;neural patterns;schizophrenia;single electrode;mixing matrix;third-order tensor;voxels matrices;Brain modeling;Electroencephalography;Data models;Tensile stress;Electrodes;Neuroimaging},\n  doi = {10.23919/EUSIPCO.2017.8081286},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347290.pdf},\n}\n\n
\n
\n\n\n
\n Joint analysis of neuroimaging data from multiple modalities has the potential to improve our understanding of brain function since each modality provides complementary information. In this paper, we address the problem of jointly analyzing functional magnetic resonance imaging (fMRI), structural MRI (sMRI) and electroencephalography (EEG) data collected during an auditory oddball (AOD) task with the goal of capturing neural patterns that differ between patients with schizophrenia and healthy controls. Traditionally, fusion methods such as joint independent component analysis (jICA) have been used to jointly analyze such multi-modal neuroimaging data. However, previous jICA analyses typically analyze the EEG signal from a single electrode or concatenate signals from multiple electrodes, thus ignoring the potential multilinear structure of the EEG data, and models the data using a common mixing matrix for both modalities. In this paper, we arrange the multi-channel EEG signals as a third-order tensor with modes: subjects, time samples and electrodes, and jointly analyze the tensor with the fMRI and sMRI data, both in the form of subjects by voxels matrices, using a structure-revealing coupled matrix and tensor factorization (CMTF) model. Through this modeling approach, we (i) exploit the multilinear structure of multi-channel EEG data and (ii) capture weights for components indicative of the level of contribution from each modality. We compare the results of the structure-revealing CMTF model with those of jICA and demonstrate that, while both models capture significant distinguishing patterns between patients and controls, the structure-revealing CMTF model provides more robust activation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tensor-based sparsity order estimation for big data applications.\n \n \n \n \n\n\n \n Liu, K.; Roemer, F.; da Costa , J. P. C. L.; Xiong, J.; Yan, Y.; Wang, W.; and Del Galdo, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 648-652, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Tensor-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081287,\n  author = {K. Liu and F. Roemer and J. P. C. L. {da Costa} and J. Xiong and Y. Yan and W. Wang and G. {Del Galdo}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Tensor-based sparsity order estimation for big data applications},\n  year = {2017},\n  pages = {648-652},\n  abstract = {In Big Data Processing we typically face very large data sets that are highly structured. To save the computation and storage cost, it is desirable to extract the essence of the data from a reduced number of observations. One example of such a structural constraint is sparsity. If the data possesses a sparse representation in a suitable domain, it can be recovered from a small number of linear projections into a low-dimensional space. In this case, the degree of sparsity, referred to as sparsity order, is of high interest. It has recently been shown that if the measurement matrix obey certain structural constraints, one can estimate the sparsity order directly from the compressed data. The rich structure of the measurement matrix allows to rearrange the multiple-snapshot measurement vectors into a fourth-order tensor with rank equal to the desired sparsity order. In this paper, we exploit the multilinear structure of the data for accurate sparsity order estimation with improved identifiability. We discuss the choice of the parameters, i.e., the block size, block offset, and number of blocks, to maximize the sparsity order that can be inferred from a certain number of observations, and compare state-of-the-art order selection algorithms for sparsity order estimation under the chosen parameter settings. By performing an extensive campaign of simulations, we show that the discriminant function based method and the random matrix theory algorithm outperform other approaches in small and large snapshot-number scenarios, respectively.},\n  keywords = {Big Data;compressed sensing;matrix algebra;tensors;structural constraint;measurement matrix;compressed data;multiple-snapshot measurement vectors;reduced number;Big Data applications;Big Data processing;sparsity order;sparsity order estimation;order selection algorithms;discriminant function based method;random matrix theory algorithm;Tensile stress;Estimation;Eigenvalues and eigenfunctions;Signal processing algorithms;Signal processing;Sensors;Europe;Compressed sensing;sparsity order;order selection;tensor decomposition},\n  doi = {10.23919/EUSIPCO.2017.8081287},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347330.pdf},\n}\n\n
\n
\n\n\n
\n In Big Data Processing we typically face very large data sets that are highly structured. To save the computation and storage cost, it is desirable to extract the essence of the data from a reduced number of observations. One example of such a structural constraint is sparsity. If the data possesses a sparse representation in a suitable domain, it can be recovered from a small number of linear projections into a low-dimensional space. In this case, the degree of sparsity, referred to as sparsity order, is of high interest. It has recently been shown that if the measurement matrix obey certain structural constraints, one can estimate the sparsity order directly from the compressed data. The rich structure of the measurement matrix allows to rearrange the multiple-snapshot measurement vectors into a fourth-order tensor with rank equal to the desired sparsity order. In this paper, we exploit the multilinear structure of the data for accurate sparsity order estimation with improved identifiability. We discuss the choice of the parameters, i.e., the block size, block offset, and number of blocks, to maximize the sparsity order that can be inferred from a certain number of observations, and compare state-of-the-art order selection algorithms for sparsity order estimation under the chosen parameter settings. By performing an extensive campaign of simulations, we show that the discriminant function based method and the random matrix theory algorithm outperform other approaches in small and large snapshot-number scenarios, respectively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A novel non-iterative algorithm for low-multilinear-rank tensor approximation.\n \n \n \n \n\n\n \n de Morais Goulart , J. H.; and Comon, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 653-657, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{8081288,\n  author = {J. H. {de Morais Goulart} and P. Comon},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A novel non-iterative algorithm for low-multilinear-rank tensor approximation},\n  year = {2017},\n  pages = {653-657},\n  abstract = {Low-rank tensor approximation algorithms are building blocks in tensor methods for signal processing. In particular, approximations of low multilinear rank (mrank) are of central importance in tensor subspace analysis. This paper proposes a novel non-iterative algorithm for computing a low-mrank approximation, termed sequential low-rank approximation and projection (SeLRAP). Our algorithm generalizes sequential rank-one approximation and projection (SeROAP), which aims at the rank-one case. For third-order mrank-(1,R,R) approximations, SeLRAP's outputs are always at least as accurate as those of previously proposed methods. Our simulation results suggest that this is actually the case for the overwhelmingly majority of random third- and fourth-order tensors and several different mranks. Though the accuracy improvement is often small, we show it can make a large difference when repeatedly computing approximations, as happens, e.g., in an iterative hard thresholding algorithm for tensor completion.},\n  keywords = {approximation theory;iterative methods;signal processing;tensors;tensor subspace analysis;low-mrank approximation;fourth-order tensors;iterative hard thresholding algorithm;tensor completion;noniterative algorithm;low-multilinear-rank tensor approximation algorithm;signal processing;SeLRAP;sequential rank-one approximation and projection;sequential low-rank approximation and projection;third-order mrank-(1,R,R) approximations;Tensile stress;Approximation algorithms;Signal processing algorithms;Europe;Signal processing;Algorithm design and analysis;Iterative methods;Multilinear rank;low-rank approximation;tensor;tensor completion},\n  doi = {10.23919/EUSIPCO.2017.8081288},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347531.pdf},\n}\n\n
\n
\n\n\n
\n Low-rank tensor approximation algorithms are building blocks in tensor methods for signal processing. In particular, approximations of low multilinear rank (mrank) are of central importance in tensor subspace analysis. This paper proposes a novel non-iterative algorithm for computing a low-mrank approximation, termed sequential low-rank approximation and projection (SeLRAP). Our algorithm generalizes sequential rank-one approximation and projection (SeROAP), which aims at the rank-one case. For third-order mrank-(1,R,R) approximations, SeLRAP's outputs are always at least as accurate as those of previously proposed methods. Our simulation results suggest that this is actually the case for the overwhelmingly majority of random third- and fourth-order tensors and several different mranks. Though the accuracy improvement is often small, we show it can make a large difference when repeatedly computing approximations, as happens, e.g., in an iterative hard thresholding algorithm for tensor completion.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Large deviation analysis of the CPD detection problem based on random tensor theory.\n \n \n \n \n\n\n \n Bayer, R.; and Laubatan, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 658-662, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LargePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081289,\n  author = {R. Bayer and P. Laubatan},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Large deviation analysis of the CPD detection problem based on random tensor theory},\n  year = {2017},\n  pages = {658-662},\n  abstract = {The performance in terms of minimal Bayes' error probability for detection of a random tensor is a fundamental understudied difficult problem. In this work, we assume that we observe under the alternative hypothesis a noisy rank-ñ tensor admitting a Q-order Canonical Polyadic Decomposition (CPD) with large factors of size Nq × R, i.e., for 1 ≤ q ≤ Q, R,Nq → ∞ with R1/q/Nq converges to a finite constant. The detection of the random entries of the core tensor is hard to study since an analytic expression of the error probability is not easily tractable. To mitigate this technical difficulty, the Chernoff Upper Bound (CUB) and the error exponent on the error probability are derived and studied for the considered tensor-based detection problem. These two quantities are relied to a key quantity for the considered detection problem due to its strong link with the moment generating function of the log-likelihood test. However, the tightest CUB is reached for the value, denoted by s*, which minimizes the error exponent. To solve this step, two methodologies are standard in the literature. The first one is based on the use of a costly numerical optimization algorithm. An alternative strategy is to consider the Bhattacharyya Upper Bound (BUB) for s* = 1/2. In this last scenario, the costly numerical optimization step is avoided but no guaranty exists on the optimality of the BUB. Based on powerful random matrix theory tools, a simple analytical expression of s* is provided with respect to the Signal to Noise Ratio (SNR) and for low rank CPD. Associated to a compact expression of the CUB, an easily tractable expression of the tightest CUB and the error exponent are provided and analyzed. A main conclusion of this work is that the BUB is the tightest bound at low SNRs. At contrary, this property is no longer true for higher SNRs.},\n  keywords = {Bayes methods;eigenvalues and eigenfunctions;error statistics;matrix algebra;maximum likelihood estimation;optimisation;random processes;tensors;deviation analysis;CPD detection problem;random tensor theory;noisy rank-ñ tensor;core tensor;error exponent;key quantity;moment generating function;log-likelihood test;powerful random matrix theory tools;low rank CPD;error probability;Q-order canonical polyadic decomposition;Chernoff upper bound;Signal to noise ratio;Tensile stress;Error probability;Upper bound;Europe;Noise measurement},\n  doi = {10.23919/EUSIPCO.2017.8081289},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347534.pdf},\n}\n\n
\n
\n\n\n
\n The performance in terms of minimal Bayes' error probability for detection of a random tensor is a fundamental understudied difficult problem. In this work, we assume that we observe under the alternative hypothesis a noisy rank-ñ tensor admitting a Q-order Canonical Polyadic Decomposition (CPD) with large factors of size Nq × R, i.e., for 1 ≤ q ≤ Q, R,Nq → ∞ with R1/q/Nq converges to a finite constant. The detection of the random entries of the core tensor is hard to study since an analytic expression of the error probability is not easily tractable. To mitigate this technical difficulty, the Chernoff Upper Bound (CUB) and the error exponent on the error probability are derived and studied for the considered tensor-based detection problem. These two quantities are relied to a key quantity for the considered detection problem due to its strong link with the moment generating function of the log-likelihood test. However, the tightest CUB is reached for the value, denoted by s*, which minimizes the error exponent. To solve this step, two methodologies are standard in the literature. The first one is based on the use of a costly numerical optimization algorithm. An alternative strategy is to consider the Bhattacharyya Upper Bound (BUB) for s* = 1/2. In this last scenario, the costly numerical optimization step is avoided but no guaranty exists on the optimality of the BUB. Based on powerful random matrix theory tools, a simple analytical expression of s* is provided with respect to the Signal to Noise Ratio (SNR) and for low rank CPD. Associated to a compact expression of the CUB, an easily tractable expression of the tightest CUB and the error exponent are provided and analyzed. A main conclusion of this work is that the BUB is the tightest bound at low SNRs. At contrary, this property is no longer true for higher SNRs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Nonlinear least squares updating of the canonical polyadic decomposition.\n \n \n \n \n\n\n \n Vandecappelle, M.; Vervliet, N.; and De Lathauwer, L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 663-667, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"NonlinearPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081290,\n  author = {M. Vandecappelle and N. Vervliet and L. {De Lathauwer}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Nonlinear least squares updating of the canonical polyadic decomposition},\n  year = {2017},\n  pages = {663-667},\n  abstract = {Current batch tensor methods often struggle to keep up with fast-arriving data. Even storing the full tensors that have to be decomposed can be problematic. To alleviate these limitations, tensor updating methods modify a tensor decomposition using efficient updates instead of recomputing the entire decomposition when new data becomes available. In this paper, the structure of the decomposition is exploited to achieve fast updates for the canonical polyadic decomposition whenever new slices are added to the tensor in a certain mode. A batch NLS-algorithm is adapted so that it can be used in an updating context. By only storing the old decomposition and the new slice of the tensor, the algorithm is both time- and memory efficient. Experimental results show that the proposed method is faster than batch ALS and NLS methods, while maintaining a good accuracy for the decomposition.},\n  keywords = {least squares approximations;matrix decomposition;tensors;canonical polyadic decomposition;tensor updating methods;tensor decomposition;updating context;old decomposition;nonlinear least square updating;batch tensor methods;ALS;NLS-algorithm;Tensile stress;Matrix decomposition;Europe;Signal processing;Signal processing algorithms;Algorithm design and analysis;Linear programming},\n  doi = {10.23919/EUSIPCO.2017.8081290},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347577.pdf},\n}\n\n
\n
\n\n\n
\n Current batch tensor methods often struggle to keep up with fast-arriving data. Even storing the full tensors that have to be decomposed can be problematic. To alleviate these limitations, tensor updating methods modify a tensor decomposition using efficient updates instead of recomputing the entire decomposition when new data becomes available. In this paper, the structure of the decomposition is exploited to achieve fast updates for the canonical polyadic decomposition whenever new slices are added to the tensor in a certain mode. A batch NLS-algorithm is adapted so that it can be used in an updating context. By only storing the old decomposition and the new slice of the tensor, the algorithm is both time- and memory efficient. Experimental results show that the proposed method is faster than batch ALS and NLS methods, while maintaining a good accuracy for the decomposition.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low rank canonical polyadic decomposition of tensors based on group sparsity.\n \n \n \n \n\n\n \n Han, X.; Albera, L.; Kachenoura, A.; Senhadji, L.; and Shu, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 668-672, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081291,\n  author = {X. Han and L. Albera and A. Kachenoura and L. Senhadji and H. Shu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Low rank canonical polyadic decomposition of tensors based on group sparsity},\n  year = {2017},\n  pages = {668-672},\n  abstract = {A new and robust method for low rank Canonical Polyadic (CP) decomposition of tensors is introduced in this paper. The proposed method imposes the Group Sparsity of the coefficients of each Loading (GSL) matrix under orthonormal subspace. By this way, the low rank CP decomposition problem is solved without any knowledge of the true rank and without using any nuclear norm regularization term, which generally leads to computationally prohibitive iterative optimization for large-scale data. Our GSL-CP technique can be then implemented using only an upper bound of the rank. It is compared in terms of performance with classical methods, which require to know exactly the rank of the tensor. Numerical simulated experiments with noisy tensors and results on fluorescence data show the advantages of the proposed GSL-CP method in comparison with classical algorithms.},\n  keywords = {iterative methods;matrix algebra;optimisation;tensors;nuclear norm regularization term;GSL-CP technique;noisy tensors;robust method;low-rank CP decomposition problem;group sparsity-of-the-coefficient-of-each-loading matrix;low-rank Canonical Polyadic decomposition;computationally prohibitive iterative optimization;CP;Tensile stress;Loading;Matrix decomposition;Linear programming;Signal processing algorithms;Signal processing;Minimization},\n  doi = {10.23919/EUSIPCO.2017.8081291},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347658.pdf},\n}\n\n
\n
\n\n\n
\n A new and robust method for low rank Canonical Polyadic (CP) decomposition of tensors is introduced in this paper. The proposed method imposes the Group Sparsity of the coefficients of each Loading (GSL) matrix under orthonormal subspace. By this way, the low rank CP decomposition problem is solved without any knowledge of the true rank and without using any nuclear norm regularization term, which generally leads to computationally prohibitive iterative optimization for large-scale data. Our GSL-CP technique can be then implemented using only an upper bound of the rank. It is compared in terms of performance with classical methods, which require to know exactly the rank of the tensor. Numerical simulated experiments with noisy tensors and results on fluorescence data show the advantages of the proposed GSL-CP method in comparison with classical algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Synthesis of a spatially band-limited plane wave in the time-domain using wave field synthesis.\n \n \n \n \n\n\n \n Hahn, N.; Winter, F.; and Spors, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 673-677, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SynthesisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081292,\n  author = {N. Hahn and F. Winter and S. Spors},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Synthesis of a spatially band-limited plane wave in the time-domain using wave field synthesis},\n  year = {2017},\n  pages = {673-677},\n  abstract = {Wave Field Synthesis (WFS) is a spatial sound reproduction technique aiming at a physically accurate reconstruction of a desired sound field within an extended listening area. It was shown in a recent study that the accuracy of the synthesized sound field can be improved in a local area by applying a spatial band-limitation to the driving function. However, the computational complexity of the frequency-domain driving function is demanding because of the involved Bessel functions. In this paper, a time-domain WFS driving function is introduced for the synthesis of a spatially band-limited plane wave. The driving function is obtained based on a time-domain representation of the sound field which is given as a superposition of plane waves with time-varying direction and amplitude. The performance of the proposed approach is evaluated by numerical simulations. Practical issues regarding the discretization of the analytic driving function and dynamic range control are discussed.},\n  keywords = {acoustic field;acoustic signal processing;Bessel functions;signal synthesis;sound reproduction;time-domain analysis;dynamic range control;time-domain WFS driving function;Bessel functions;analytic driving function;time-varying direction;plane waves;time-domain representation;frequency-domain driving function;spatial band-limitation;synthesized sound field;extended listening area;desired sound field;spatial sound reproduction technique;wave field synthesis;spatially band-limited plane wave;Time-domain analysis;Harmonic analysis;Europe;Bandwidth;Computational complexity;Numerical simulation},\n  doi = {10.23919/EUSIPCO.2017.8081292},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347337.pdf},\n}\n\n
\n
\n\n\n
\n Wave Field Synthesis (WFS) is a spatial sound reproduction technique aiming at a physically accurate reconstruction of a desired sound field within an extended listening area. It was shown in a recent study that the accuracy of the synthesized sound field can be improved in a local area by applying a spatial band-limitation to the driving function. However, the computational complexity of the frequency-domain driving function is demanding because of the involved Bessel functions. In this paper, a time-domain WFS driving function is introduced for the synthesis of a spatially band-limited plane wave. The driving function is obtained based on a time-domain representation of the sound field which is given as a superposition of plane waves with time-varying direction and amplitude. The performance of the proposed approach is evaluated by numerical simulations. Practical issues regarding the discretization of the analytic driving function and dynamic range control are discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse parametric modeling of the early part of acoustic impulse responses.\n \n \n \n \n\n\n \n Papayiannis, C.; Evers, C.; and Naylor, P. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 678-682, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SparsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081293,\n  author = {C. Papayiannis and C. Evers and P. A. Naylor},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Sparse parametric modeling of the early part of acoustic impulse responses},\n  year = {2017},\n  pages = {678-682},\n  abstract = {Acoustic channels are typically described by their Acoustic Impulse Response (AIR) as a Moving Average (MA) process. Such AIRs are often considered in terms of their early and late parts, describing discrete reflections and the diffuse reverberation tail respectively. We propose an approach for constructing a sparse parametric model for the early part. The model aims at reducing the number of parameters needed to represent it and subsequently reconstruct from the representation the MA coefficients that describe it. It consists of a representation of the reflections arriving at the receiver as delayed copies of an excitation signal. The Time-Of-Arrivals of reflections are not restricted to integer sample instances and a dynamically estimated model for the excitation sound is used. We also present a corresponding parameter estimation method, which is based on regularized-regression and nonlinear optimization. The proposed method also serves as an analysis tool, since estimated parameters can be used for the estimation of room geometry, the mixing time and other channel properties. Experiments involving simulated and measured AIRs are presented, in which the AIR coefficient reconstruction-error energy does not exceed 11.4% of the energy of the original AIR coefficients. The results also indicate dimensionality reduction figures exceeding 90% when compared to a MA process representation.},\n  keywords = {acoustic signal processing;architectural acoustics;parameter estimation;regression analysis;reverberation;transient response;sparse parametric modeling;acoustic impulse responses;discrete reflections;diffuse reverberation tail;sparse parametric model;excitation signal;dynamically estimated model;excitation sound;channel properties;AIR coefficient reconstruction-error energy;original AIR coefficients;MA process representation;acoustic channels;moving average process;parameter estimation method;time-of-arrivals;Atmospheric modeling;Autoregressive processes;Reverberation;Receivers;Optimization;Europe;Sparse Modeling;Reverberation;Acoustic Environments;Reflection TOA Estimation},\n  doi = {10.23919/EUSIPCO.2017.8081293},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343110.pdf},\n}\n\n
\n
\n\n\n
\n Acoustic channels are typically described by their Acoustic Impulse Response (AIR) as a Moving Average (MA) process. Such AIRs are often considered in terms of their early and late parts, describing discrete reflections and the diffuse reverberation tail respectively. We propose an approach for constructing a sparse parametric model for the early part. The model aims at reducing the number of parameters needed to represent it and subsequently reconstruct from the representation the MA coefficients that describe it. It consists of a representation of the reflections arriving at the receiver as delayed copies of an excitation signal. The Time-Of-Arrivals of reflections are not restricted to integer sample instances and a dynamically estimated model for the excitation sound is used. We also present a corresponding parameter estimation method, which is based on regularized-regression and nonlinear optimization. The proposed method also serves as an analysis tool, since estimated parameters can be used for the estimation of room geometry, the mixing time and other channel properties. Experiments involving simulated and measured AIRs are presented, in which the AIR coefficient reconstruction-error energy does not exceed 11.4% of the energy of the original AIR coefficients. The results also indicate dimensionality reduction figures exceeding 90% when compared to a MA process representation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An image-inspired audio sharpness index.\n \n \n \n \n\n\n \n Mahé, G.; Moisan, L.; and Mitrea, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 683-687, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081294,\n  author = {G. Mahé and L. Moisan and M. Mitrea},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An image-inspired audio sharpness index},\n  year = {2017},\n  pages = {683-687},\n  abstract = {We propose a new non-intrusive (reference-free) objective measure of speech intelligibility that is inspired from previous works on image sharpness. We define the audio Sharpness Index (aSI) as the sensitivity of the spectrogram sparsity to the convolution of the signal with a white noise, and we calculate a closed-form formula of the aSI. Experiments with various speakers, noise and reverberation conditions show a high correlation between the aSI and the well-established Speech Transmission Index (STI), which is intrusive (full-reference). Additionally, the aSI can be used as an intelligibility or clarity criterion to drive sound enhancement algorithms. Experimental results on stereo mixtures of two sounds show that blind source separation based on aSI maximization performs well for speech and for music.},\n  keywords = {blind source separation;convolution;image processing;optimisation;reverberation;speech enhancement;speech intelligibility;white noise;nonintrusive objective measure;speech intelligibility;white noise;closed-form formula;reverberation conditions;Speech Transmission Index;aSI maximization;image-inspired audio sharpness index;reference-free objective measure;spectrogram sparsity sensitivity;signal convolution;STI;stereo mixtures;blind source separation;sound enhancement algorithms;clarity criterion;Spectrogram;Indexes;Speech;White noise;Convolution;Noise measurement;Reverberation},\n  doi = {10.23919/EUSIPCO.2017.8081294},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347471.pdf},\n}\n\n
\n
\n\n\n
\n We propose a new non-intrusive (reference-free) objective measure of speech intelligibility that is inspired from previous works on image sharpness. We define the audio Sharpness Index (aSI) as the sensitivity of the spectrogram sparsity to the convolution of the signal with a white noise, and we calculate a closed-form formula of the aSI. Experiments with various speakers, noise and reverberation conditions show a high correlation between the aSI and the well-established Speech Transmission Index (STI), which is intrusive (full-reference). Additionally, the aSI can be used as an intelligibility or clarity criterion to drive sound enhancement algorithms. Experimental results on stereo mixtures of two sounds show that blind source separation based on aSI maximization performs well for speech and for music.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Time-domain realisation of model-based rendering for 2.5D local wave field synthesis using spatial bandwidth-limitation.\n \n \n \n \n\n\n \n Winter, F.; Hahn, N.; and Spors, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 688-692, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Time-domainPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081295,\n  author = {F. Winter and N. Hahn and S. Spors},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Time-domain realisation of model-based rendering for 2.5D local wave field synthesis using spatial bandwidth-limitation},\n  year = {2017},\n  pages = {688-692},\n  abstract = {Wave Field Synthesis aims at a physically accurate synthesis of a desired sound field inside an extended listening area. This area is surrounded by loudspeakers individually driven by their respective driving signals. Recently, the authors have published an approach for so-called Local Wave Field Synthesis which enhances the reproduction accuracy in a limited region by applying a spatial bandwidth limitation in the circular/spherical harmonics domain to the desired sound field. This paper presents an efficient time-domain realisation of the mentioned approach for 2.5-dimensional synthesis scenarios. It focuses on the model-based rendering of virtual plane waves and point sources. As an outcome, the parametric representation of the driving signals for both source types allows for the reproduction of time-varying acoustic scenarios. This also includes an adaptation to the tracked position of a moving listener. The realisation is compared with conventional Wave Field Synthesis regarding the spatial structure and spectral properties of the reproduced sound field. The results confirm the findings of the prior publication, that the reproduction accuracy can be locally improved with Local Wave Field Synthesis.},\n  keywords = {acoustic signal processing;loudspeakers;rendering (computer graphics);sound reproduction;spatial bandwidth-limitation;spatial bandwidth limitation;circular/spherical harmonics domain;local wave field synthesis;2.5-dimensional synthesis;Time-domain analysis;Rendering (computer graphics);Loudspeakers;Europe;Signal processing;Acoustics;Mathematical model;Circular Harmonics;Sound Field Synthesis;Wave Field Synthesis;Local Wave Field Synthesis},\n  doi = {10.23919/EUSIPCO.2017.8081295},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346165.pdf},\n}\n\n
\n
\n\n\n
\n Wave Field Synthesis aims at a physically accurate synthesis of a desired sound field inside an extended listening area. This area is surrounded by loudspeakers individually driven by their respective driving signals. Recently, the authors have published an approach for so-called Local Wave Field Synthesis which enhances the reproduction accuracy in a limited region by applying a spatial bandwidth limitation in the circular/spherical harmonics domain to the desired sound field. This paper presents an efficient time-domain realisation of the mentioned approach for 2.5-dimensional synthesis scenarios. It focuses on the model-based rendering of virtual plane waves and point sources. As an outcome, the parametric representation of the driving signals for both source types allows for the reproduction of time-varying acoustic scenarios. This also includes an adaptation to the tracked position of a moving listener. The realisation is compared with conventional Wave Field Synthesis regarding the spatial structure and spectral properties of the reproduced sound field. The results confirm the findings of the prior publication, that the reproduction accuracy can be locally improved with Local Wave Field Synthesis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Investigation of electric network frequency for synchronization of low cost and wireless sound cards.\n \n \n \n \n\n\n \n Golokolenko, O.; and Schuller, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 693-697, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"InvestigationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081296,\n  author = {O. Golokolenko and G. Schuller},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Investigation of electric network frequency for synchronization of low cost and wireless sound cards},\n  year = {2017},\n  pages = {693-697},\n  abstract = {An electric network frequency (ENF) signal can be found in multimedia recordings due to propagation from the power grid. There a variety of applications based on ENF signal use, such as video and audio stream synchronization, and origin determination of multimedia recordings. In this paper, we propose the use of ENF to synchronize real-time audio streams from different, non-synchronized sound devices, for instance from wireless sound cards or low cost USB sound cards. Synchronization of separate audio streams can be achieved by aligning their embedded ENF signals. Our goal is to find out how accurate a synchronization using ENF at different SNR levels can be. We show simulation and real-time experimental results of audio stream synchronization. We found that with sufficient ENF level we can achieve an accuracy of the estimated delay difference of four samples at 44.1 kHz sampling rate.},\n  keywords = {audio signal processing;audio streaming;synchronisation;audio stream synchronization;multimedia recordings;real-time audio streams;nonsynchronized sound devices;wireless sound cards;electric network frequency signal;low cost sound card;wirelesssound card;audio stream separation;frequency 44.1 kHz;Synchronization;Delays;Microphones;Signal to noise ratio;Real-time systems;Wireless sensor networks;Wireless communication;ENF;audio streams synchronization;low cost and wireless sound cards},\n  doi = {10.23919/EUSIPCO.2017.8081296},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346436.pdf},\n}\n\n
\n
\n\n\n
\n An electric network frequency (ENF) signal can be found in multimedia recordings due to propagation from the power grid. There a variety of applications based on ENF signal use, such as video and audio stream synchronization, and origin determination of multimedia recordings. In this paper, we propose the use of ENF to synchronize real-time audio streams from different, non-synchronized sound devices, for instance from wireless sound cards or low cost USB sound cards. Synchronization of separate audio streams can be achieved by aligning their embedded ENF signals. Our goal is to find out how accurate a synchronization using ENF at different SNR levels can be. We show simulation and real-time experimental results of audio stream synchronization. We found that with sufficient ENF level we can achieve an accuracy of the estimated delay difference of four samples at 44.1 kHz sampling rate.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimizing acoustic feature extractor for anomalous sound detection based on Neyman-Pearson lemma.\n \n \n \n \n\n\n \n Koizumi, Y.; Saito, S.; Uematsu, H.; and Harada, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 698-702, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OptimizingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081297,\n  author = {Y. Koizumi and S. Saito and H. Uematsu and N. Harada},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Optimizing acoustic feature extractor for anomalous sound detection based on Neyman-Pearson lemma},\n  year = {2017},\n  pages = {698-702},\n  abstract = {We propose a method for optimizing an acoustic feature extractor for anomalous sound detection (ASD). Most ASD systems adopt outlier-detection techniques because it is difficult to collect a massive amount of anomalous sound data. To improve the performance of such outlier-detection-based ASD, it is essential to extract a set of efficient acoustic features that is suitable for identifying anomalous sounds. However, the ideal property of a set of acoustic features that maximizes ASD performance has not been clarified. By considering outlier-detection-based ASD as a statistical hypothesis test, we defined optimality as an objective function that adopts Neyman-Pearson lemma; the acoustic feature extractor is optimized to extract a set of acoustic features which maximize the true positive rate under an arbitrary false positive rate. The variational auto-encoder is applied as an acoustic feature extractor and optimized to maximize the objective function. We confirmed that the proposed method improved the F-measure score from 0.02 to 0.06 points compared to those of conventional methods, and ASD results of a stereolithography 3D-printer in a real-environment show that the proposed method is effective in identifying anomalous sounds.},\n  keywords = {acoustic signal processing;encoding;feature extraction;statistical testing;stereolithography;three-dimensional printing;acoustic feature extractor;anomalous sound detection;Neyman-Pearson lemma;statistical hypothesis test;variational auto-encoder;stereolithography 3D-printer;outlier-detection-based ASD system;Feature extraction;Acoustics;Linear programming;Training data;Training;Europe;Signal processing;Anomalous sound detection;acoustic feature;objective function;deep neural network;Gaussian mixture model},\n  doi = {10.23919/EUSIPCO.2017.8081297},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341166.pdf},\n}\n\n
\n
\n\n\n
\n We propose a method for optimizing an acoustic feature extractor for anomalous sound detection (ASD). Most ASD systems adopt outlier-detection techniques because it is difficult to collect a massive amount of anomalous sound data. To improve the performance of such outlier-detection-based ASD, it is essential to extract a set of efficient acoustic features that is suitable for identifying anomalous sounds. However, the ideal property of a set of acoustic features that maximizes ASD performance has not been clarified. By considering outlier-detection-based ASD as a statistical hypothesis test, we defined optimality as an objective function that adopts Neyman-Pearson lemma; the acoustic feature extractor is optimized to extract a set of acoustic features which maximize the true positive rate under an arbitrary false positive rate. The variational auto-encoder is applied as an acoustic feature extractor and optimized to maximize the objective function. We confirmed that the proposed method improved the F-measure score from 0.02 to 0.06 points compared to those of conventional methods, and ASD results of a stereolithography 3D-printer in a real-environment show that the proposed method is effective in identifying anomalous sounds.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint active device identification and symbol detection using sparse constraints in massive MIMO systems.\n \n \n \n \n\n\n \n Hegde, G.; Pesavento, M.; and Pfetsch, M. E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 703-707, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081298,\n  author = {G. Hegde and M. Pesavento and M. E. Pfetsch},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Joint active device identification and symbol detection using sparse constraints in massive MIMO systems},\n  year = {2017},\n  pages = {703-707},\n  abstract = {In this paper, we consider a wireless system with a central station equipped with a large number of antennas surveilling a multitude of single antenna devices. The devices become active and transmit blocks of symbols sporadically. Our objective is to blindly identify the active devices and detect the transmit symbols. To this end, we exploit the sporadic nature of the device to station communication and formulate a sparse optimization problem as an integer program. Furthermore, we employ the convex relaxation of the discrete optimization variables in the problem in order reduce its computational complexity. A procedure to further lower the symbol detection errors is also discussed. Finally, the influence of system parameters on the performance of the proposed techniques is analysed using simulation results.},\n  keywords = {computational complexity;convex programming;integer programming;MIMO communication;wireless system;central station;single antenna devices;active devices;station communication;symbol detection errors;massive MIMO systems;sparse optimization;MIMO;Optimization;Antennas;Performance evaluation;Search problems;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081298},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346805.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we consider a wireless system with a central station equipped with a large number of antennas surveilling a multitude of single antenna devices. The devices become active and transmit blocks of symbols sporadically. Our objective is to blindly identify the active devices and detect the transmit symbols. To this end, we exploit the sporadic nature of the device to station communication and formulate a sparse optimization problem as an integer program. Furthermore, we employ the convex relaxation of the discrete optimization variables in the problem in order reduce its computational complexity. A procedure to further lower the symbol detection errors is also discussed. Finally, the influence of system parameters on the performance of the proposed techniques is analysed using simulation results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Restoration of multilayered single-photon 3D Lidar images.\n \n \n \n \n\n\n \n Halimi, A.; Tobin, R.; McCarthy, A.; McLaughlin, S.; and Buller, G. S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 708-712, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RestorationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081299,\n  author = {A. Halimi and R. Tobin and A. McCarthy and S. McLaughlin and G. S. Buller},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Restoration of multilayered single-photon 3D Lidar images},\n  year = {2017},\n  pages = {708-712},\n  abstract = {This paper presents a new algorithm for the restoration of multilayered three-dimensional laser detection and ranging (3D Lidar) images. For multilayered targets such as semitransparent surfaces or when the transmitted light of the laser beam is incident on multiple surfaces at different depths, the returned signal may contain multiple peaks. Considering the Poisson statistics of these observations leads to a convex data fidelity term that is regularized using appropriate functions accounting for the spatial correlation between pixels and the sparse depth repartition of targets. More precisely, the spatial correlation is introduced using a convex total variation (TV) regularizer, and a collaborative sparse prior is used to introduce the depth prior knowledge. The resulting minimization problem is solved using the alternating direction method of multipliers (ADMM) that offers good convergence properties. The algorithm was validated using field data representing a man standing 1 meter behind camouflage, at an approximate stand-off distance of 230m from the system. The results show the benefit of the proposed strategy in that it improves the quality of the imaged objects at different depths and under reduced acquisition times.},\n  keywords = {correlation methods;image restoration;minimisation;optical radar;stochastic processes;convex total variation regularizer;Poisson statistics;convex data fidelity term;spatial correlation;minimization problem;multilayered single-photon 3D Lidar image restoration;multilayered 3D laser detection and ranging images;sparse depth repartition;collaborative sparse prior;alternating direction method of multipliers;Signal processing algorithms;Surface emitting lasers;Photonics;Optimization;Laser beams;Three-dimensional displays;Laser radar;Lidar waveform;Poisson statistics;image restoration;ADMM;total variation;collaborative sparsity},\n  doi = {10.23919/EUSIPCO.2017.8081299},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343751.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a new algorithm for the restoration of multilayered three-dimensional laser detection and ranging (3D Lidar) images. For multilayered targets such as semitransparent surfaces or when the transmitted light of the laser beam is incident on multiple surfaces at different depths, the returned signal may contain multiple peaks. Considering the Poisson statistics of these observations leads to a convex data fidelity term that is regularized using appropriate functions accounting for the spatial correlation between pixels and the sparse depth repartition of targets. More precisely, the spatial correlation is introduced using a convex total variation (TV) regularizer, and a collaborative sparse prior is used to introduce the depth prior knowledge. The resulting minimization problem is solved using the alternating direction method of multipliers (ADMM) that offers good convergence properties. The algorithm was validated using field data representing a man standing 1 meter behind camouflage, at an approximate stand-off distance of 230m from the system. The results show the benefit of the proposed strategy in that it improves the quality of the imaged objects at different depths and under reduced acquisition times.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse reconstruction algorithms for nonlinear microwave imaging.\n \n \n \n \n\n\n \n Zaimaga, H.; Fraysse, A.; and Lambert, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 713-717, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SparsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081300,\n  author = {H. Zaimaga and A. Fraysse and M. Lambert},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Sparse reconstruction algorithms for nonlinear microwave imaging},\n  year = {2017},\n  pages = {713-717},\n  abstract = {This paper presents a two-step inverse process which allows sparse recovery of the unknown (complex) dielectric profiles of scatterers for nonlinear microwave imaging. The proposed approach is applied to a nonlinear inverse scattering problem arising in microwave imaging and correlated with joint sparsity which gives multiple sparse solutions that share a common nonzero support. Numerical results demonstrate the potential of the proposed two step inversion approach when compared to existing sparse recovery algorithm for the case of small scatterers.},\n  keywords = {image reconstruction;inverse problems;microwave imaging;nonlinear microwave imaging;nonlinear inverse scattering problem;multiple sparse solutions;step inversion approach;sparse reconstruction algorithms;unknown dielectric profiles;sparse recovery algorithm;Inverse problems;Receivers;Microwave imaging;Microwave theory and techniques;Microwave FET integrated circuits;Microwave integrated circuits},\n  doi = {10.23919/EUSIPCO.2017.8081300},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341812.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a two-step inverse process which allows sparse recovery of the unknown (complex) dielectric profiles of scatterers for nonlinear microwave imaging. The proposed approach is applied to a nonlinear inverse scattering problem arising in microwave imaging and correlated with joint sparsity which gives multiple sparse solutions that share a common nonzero support. Numerical results demonstrate the potential of the proposed two step inversion approach when compared to existing sparse recovery algorithm for the case of small scatterers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Complex domain nonlocal block-matching denoising based on high-order singular value decomposition (HOSVD).\n \n \n \n \n\n\n \n Katkovnik, V.; Ponomarenko, M.; and Egiazarian, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 718-722, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ComplexPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081301,\n  author = {V. Katkovnik and M. Ponomarenko and K. Egiazarian},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Complex domain nonlocal block-matching denoising based on high-order singular value decomposition (HOSVD)},\n  year = {2017},\n  pages = {718-722},\n  abstract = {Block matching 3D collaborative filtering (BM3D) is one of the most popular denoising technique based on data sparsity concept applied to specially structured data. In this paper we develop this technique for complex domain, i.e. for application to complex-valued data. Sparsity as an approximation technique can be addressed directly to complex-valued variables or to real-valued pairs phase/amplitude and real/imaginary parts of complex-valued variables. As a result we arrive to various ways of development and obtain a set of quite different algorithms. The algorithms proposed in this paper are composed from two components: nonlocal patch-wise grouping and high-order singular value decomposition (HOSVD) for grouped data processing. The latter gives data adaptive complex-valued bases for complex-valued data or real-valued bases for joint processing of the pairs phase/amplitude, real/imaginary parts of complex-valued variables. Comparative study of the developed algorithms is produced in order to select the most efficient ones.},\n  keywords = {collaborative filtering;frequency-domain analysis;image denoising;image filtering;image matching;singular value decomposition;real/imaginary parts;complex-valued variables;complex domain nonlocal block-matching;high-order singular value decomposition;HOSVD;BM3D;data sparsity concept;block matching 3D collaborative filtering;denoising technique;complex-valued data;grouped data processing;nonlocal patch-wise grouping;approximation technique;Algorithm design and analysis;Signal processing algorithms;Transforms;Three-dimensional displays;Noise reduction;Image reconstruction;Extraterrestrial measurements},\n  doi = {10.23919/EUSIPCO.2017.8081301},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570337362.pdf},\n}\n\n
\n
\n\n\n
\n Block matching 3D collaborative filtering (BM3D) is one of the most popular denoising technique based on data sparsity concept applied to specially structured data. In this paper we develop this technique for complex domain, i.e. for application to complex-valued data. Sparsity as an approximation technique can be addressed directly to complex-valued variables or to real-valued pairs phase/amplitude and real/imaginary parts of complex-valued variables. As a result we arrive to various ways of development and obtain a set of quite different algorithms. The algorithms proposed in this paper are composed from two components: nonlocal patch-wise grouping and high-order singular value decomposition (HOSVD) for grouped data processing. The latter gives data adaptive complex-valued bases for complex-valued data or real-valued bases for joint processing of the pairs phase/amplitude, real/imaginary parts of complex-valued variables. Comparative study of the developed algorithms is produced in order to select the most efficient ones.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning fast sparsifying overcomplete dictionaries.\n \n \n \n \n\n\n \n Rusu, C.; and Thompson, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 723-727, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081302,\n  author = {C. Rusu and J. Thompson},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Learning fast sparsifying overcomplete dictionaries},\n  year = {2017},\n  pages = {723-727},\n  abstract = {In this paper we propose a dictionary learning method that builds an over complete dictionary that is computationally efficient to manipulate, i.e., sparse approximation algorithms have sub-quadratic computationally complexity. To achieve this we consider two factors (both to be learned from data) in order to design the dictionary: an orthonormal component made up of a fixed number of fast fundamental orthonormal transforms and a sparse component that builds linear combinations of elements from the first, orthonormal component. We show how effective the proposed technique is to encode image data and compare against a previously proposed method from the literature. We expect the current work to contribute to the spread of sparsity and dictionary learning techniques to hardware scenarios where there are hard limits on the computational capabilities and energy consumption of the computer systems.},\n  keywords = {approximation theory;computational complexity;iterative methods;learning (artificial intelligence);sparse matrices;image data;sparse component;fast fundamental orthonormal transforms;orthonormal component;sub-quadratic computationally complexity;sparse approximation algorithms;dictionary learning method;overcomplete dictionaries;Dictionaries;Sparse matrices;Signal processing algorithms;Machine learning;Transforms;Linear programming;Approximation algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081302},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346733.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose a dictionary learning method that builds an over complete dictionary that is computationally efficient to manipulate, i.e., sparse approximation algorithms have sub-quadratic computationally complexity. To achieve this we consider two factors (both to be learned from data) in order to design the dictionary: an orthonormal component made up of a fixed number of fast fundamental orthonormal transforms and a sparse component that builds linear combinations of elements from the first, orthonormal component. We show how effective the proposed technique is to encode image data and compare against a previously proposed method from the literature. We expect the current work to contribute to the spread of sparsity and dictionary learning techniques to hardware scenarios where there are hard limits on the computational capabilities and energy consumption of the computer systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Spatio-spectro-temporal coded aperture design for multiresolution compressive spectral video sensing.\n \n \n \n \n\n\n \n López, K. L.; Galvis, L.; and Fuentes, H. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 728-732, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Spatio-spectro-temporalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081303,\n  author = {K. L. López and L. Galvis and H. A. Fuentes},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Spatio-spectro-temporal coded aperture design for multiresolution compressive spectral video sensing},\n  year = {2017},\n  pages = {728-732},\n  abstract = {Colored coded apertures (CCA) have been introduced in compressive spectral imaging (CSI) systems entailing richer coding strategies. CCA incorporate a wavelength-dependent coding procedure, which not only achieves spatial but also spectral coding in a single step. Even though the use of the CCA offers significant advantages and could also be applied to compressive spectral video sensing, this later application still exhibits diverse challenges originated by the temporal variable. The scene motion during the acquisition yields to motion artifacts such that these artifacts get aliased during the video reconstruction, damaging the entire data. As a result, multiresolution approaches have been proposed in order to alleviate the aliasing and enhance the video reconstruction. In this paper, it is proposed an algorithm to generate temporal colored coded aperture patterns that allow to sense the spatial, spectral and temporal information in an uniform way such that each spectral frame is spatially sensed at least once. In addition, it is proposed a multiresolution approach in the spectral multiplexing system allowing to extract optical flow estimates to address a higher quality reconstruction. Simulation results show an improvement up to 6 dB in terms of peak-signal to noise ratio (PSNR) in the reconstruction quality with the multiresolution approach using the designed patterns with respect to traditional random structures.},\n  keywords = {compressed sensing;image colour analysis;image reconstruction;image resolution;image sequences;multiplexing;video coding;video signal processing;spatio-spectro-temporal coded aperture design;multiresolution compressive spectral video sensing;CCA;compressive spectral imaging systems;wavelength-dependent coding procedure;spectral coding;scene motion;motion artifacts;video reconstruction;temporal colored coded aperture patterns;spatial information;spectral information;temporal information;spectral frame;spectral multiplexing system;reconstruction quality;colored coded apertures;spatial coding;optical flow estimates;Apertures;Optical sensors;Image reconstruction;Optical imaging;Optical filters;Correlation;Spatial resolution;Spectral video;compressive spectral video;coded aperture design;optical flow;optical filter},\n  doi = {10.23919/EUSIPCO.2017.8081303},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347306.pdf},\n}\n\n
\n
\n\n\n
\n Colored coded apertures (CCA) have been introduced in compressive spectral imaging (CSI) systems entailing richer coding strategies. CCA incorporate a wavelength-dependent coding procedure, which not only achieves spatial but also spectral coding in a single step. Even though the use of the CCA offers significant advantages and could also be applied to compressive spectral video sensing, this later application still exhibits diverse challenges originated by the temporal variable. The scene motion during the acquisition yields to motion artifacts such that these artifacts get aliased during the video reconstruction, damaging the entire data. As a result, multiresolution approaches have been proposed in order to alleviate the aliasing and enhance the video reconstruction. In this paper, it is proposed an algorithm to generate temporal colored coded aperture patterns that allow to sense the spatial, spectral and temporal information in an uniform way such that each spectral frame is spatially sensed at least once. In addition, it is proposed a multiresolution approach in the spectral multiplexing system allowing to extract optical flow estimates to address a higher quality reconstruction. Simulation results show an improvement up to 6 dB in terms of peak-signal to noise ratio (PSNR) in the reconstruction quality with the multiresolution approach using the designed patterns with respect to traditional random structures.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Weighted MSE based spatially adaptive BM3D.\n \n \n \n \n\n\n \n Ponomarenko, M.; Pismenskova, M.; and Egiazarian, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 733-737, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"WeightedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081304,\n  author = {M. Ponomarenko and M. Pismenskova and K. Egiazarian},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Weighted MSE based spatially adaptive BM3D},\n  year = {2017},\n  pages = {733-737},\n  abstract = {Weighted MSE (wMSE), recently introduced modification of MSE, is an image quality metric used to estimate visual quality of filtered images. It provides better than MSE correspondence to a human perception in consideration of distortions introduced by image filters. In this paper, wMSE is used both as a criterion to evaluate filtering efficiency of the modification of BM3D filter with spatially varying parameters, as well as to train a specially designed neural network to predict filters' parameters. Extensive analysis on three image datasets demonstrates that the proposed modification of BM3D provides lower values of wMSE than those of BM3D, both effectively suppressing noise in homogeneous regions as well as preserving fine details and texture.},\n  keywords = {image denoising;image filtering;mean square error methods;neural nets;weighted MSE;spatially adaptive BM3D;wMSE;image quality metric;visual quality;filtered images;human perception;image filters;filtering efficiency;image datasets;neural network;MSE modification;BM3D filter modification;noise suppression;homogeneous regions;Noise measurement;Biological neural networks;Filtering;Signal processing;Visualization;image denoising;image visual quality assessment;neural networks;BM3D},\n  doi = {10.23919/EUSIPCO.2017.8081304},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347801.pdf},\n}\n\n
\n
\n\n\n
\n Weighted MSE (wMSE), recently introduced modification of MSE, is an image quality metric used to estimate visual quality of filtered images. It provides better than MSE correspondence to a human perception in consideration of distortions introduced by image filters. In this paper, wMSE is used both as a criterion to evaluate filtering efficiency of the modification of BM3D filter with spatially varying parameters, as well as to train a specially designed neural network to predict filters' parameters. Extensive analysis on three image datasets demonstrates that the proposed modification of BM3D provides lower values of wMSE than those of BM3D, both effectively suppressing noise in homogeneous regions as well as preserving fine details and texture.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n EMOEEG: A new multimodal dataset for dynamic EEG-based emotion recognition with audiovisual elicitation.\n \n \n \n \n\n\n \n Conneau, A.; Hajlaoui, A.; Chetouani, M.; and Essid, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 738-742, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"EMOEEG:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081305,\n  author = {A. Conneau and A. Hajlaoui and M. Chetouani and S. Essid},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {EMOEEG: A new multimodal dataset for dynamic EEG-based emotion recognition with audiovisual elicitation},\n  year = {2017},\n  pages = {738-742},\n  abstract = {EMOEEG is a multimodal dataset where physiological responses to both visual and audiovisual stimuli were recorded, along with videos of the subjects, with a view to developing affective computing systems, especially automatic emotion recognition systems. The experimental setup involves various physiological sensors, among which electroencephalographic sensors. The experiment is performed with 8 participants, 4 from both genders. The stimuli include both sequences of static images from the IAPS dataset, and short video excerpts focusing on negative fear-type emotions. The annotation is obtained by participant self assessment, after a calibration phase. In the case of video stimuli, a novel simplified dynamic annotation strategy is used to enhance the quality and consistency of the self-assessments. This paper also analyses the annotation results and provides a statistical study of inter-annotator agreement. The dataset will continue to grow and will be made publicly available.},\n  keywords = {electroencephalography;emotion recognition;medical signal processing;physiological sensors;electroencephalographic sensors;static images;IAPS dataset;short video;negative fear-type emotions;video stimuli;inter-annotator agreement;EMOEEG;multimodal dataset;audiovisual elicitation;physiological responses;visual stimuli;audiovisual stimuli;affective computing systems;automatic emotion recognition systems;simplified dynamic annotation strategy;Videos;Calibration;Electroencephalography;Physiology;Emotion recognition;Visualization;Synchronization;Electroencephalography (EEG);Multimodal Data;Affective Computing;Fear-type Emotions;Valence;Arousal;Annotation;Inter-annotator agreement},\n  doi = {10.23919/EUSIPCO.2017.8081305},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347279.pdf},\n}\n\n
\n
\n\n\n
\n EMOEEG is a multimodal dataset where physiological responses to both visual and audiovisual stimuli were recorded, along with videos of the subjects, with a view to developing affective computing systems, especially automatic emotion recognition systems. The experimental setup involves various physiological sensors, among which electroencephalographic sensors. The experiment is performed with 8 participants, 4 from both genders. The stimuli include both sequences of static images from the IAPS dataset, and short video excerpts focusing on negative fear-type emotions. The annotation is obtained by participant self assessment, after a calibration phase. In the case of video stimuli, a novel simplified dynamic annotation strategy is used to enhance the quality and consistency of the self-assessments. This paper also analyses the annotation results and provides a statistical study of inter-annotator agreement. The dataset will continue to grow and will be made publicly available.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Human crowd detection for drone flight safety using convolutional neural networks.\n \n \n \n \n\n\n \n Tzelepi, M.; and Tefas, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 743-747, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"HumanPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081306,\n  author = {M. Tzelepi and A. Tefas},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Human crowd detection for drone flight safety using convolutional neural networks},\n  year = {2017},\n  pages = {743-747},\n  abstract = {In this paper a novel human crowd detection method, that utilizes deep Convolutional Neural Networks (CNN), for drone flight safety purposes is proposed. The aim of our work is to provide light architectures, as imposed by the computational restrictions of the application, that can effectively distinguish between crowded and non-crowded scenes, captured from drones, and provide crowd heatmaps that can be used to semantically enhance the flight maps by defining no-fly zones. To this end, we first propose to adapt a pre-trained CNN on our task, by totally discarding the fully-connected layers and attaching an additional convolutional one, transforming it to a fast fully-convolutional network that is able to produce crowd heatmaps. Second, we propose a two-loss-training model, which aims to enhance the separability of the crowd and non-crowd classes. The experimental validation is performed on a new drone dataset that has been created for the specific task, and indicates the effectiveness of the proposed detector.},\n  keywords = {convolution;neural nets;object detection;robot vision;human crowd detection;drone flight safety purposes;noncrowded scenes;crowd heatmaps;flight maps;pre-trained CNN;fully-convolutional network;drone dataset;deep convolutional neural networks;Drones;Convolution;Computational modeling;Training;Safety;Europe;Crowd detection;Drones;Safety;Convolutional Neural Networks;Deep Learning},\n  doi = {10.23919/EUSIPCO.2017.8081306},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347263.pdf},\n}\n\n
\n
\n\n\n
\n In this paper a novel human crowd detection method, that utilizes deep Convolutional Neural Networks (CNN), for drone flight safety purposes is proposed. The aim of our work is to provide light architectures, as imposed by the computational restrictions of the application, that can effectively distinguish between crowded and non-crowded scenes, captured from drones, and provide crowd heatmaps that can be used to semantically enhance the flight maps by defining no-fly zones. To this end, we first propose to adapt a pre-trained CNN on our task, by totally discarding the fully-connected layers and attaching an additional convolutional one, transforming it to a fast fully-convolutional network that is able to produce crowd heatmaps. Second, we propose a two-loss-training model, which aims to enhance the separability of the crowd and non-crowd classes. The experimental validation is performed on a new drone dataset that has been created for the specific task, and indicates the effectiveness of the proposed detector.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning local feature aggregation functions with backpropagation.\n \n \n \n \n\n\n \n Katharopoulos, A.; Paschalidou, D.; Diou, C.; and Delopoulos, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 748-752, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081307,\n  author = {A. Katharopoulos and D. Paschalidou and C. Diou and A. Delopoulos},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Learning local feature aggregation functions with backpropagation},\n  year = {2017},\n  pages = {748-752},\n  abstract = {This paper introduces a family of local feature aggregation functions and a novel method to estimate their parameters, such that they generate optimal representations for classification (or any task that can be expressed as a cost function minimization problem). To achieve that, we compose the local feature aggregation function with the classifier cost function and we backpropagate the gradient of this cost function in order to update the local feature aggregation function parameters. Experiments on synthetic datasets indicate that our method discovers parameters that model the class-relevant information in addition to the local feature space. Further experiments on a variety of motion and visual descriptors, both on image and video datasets, show that our method outperforms other state-of-the-art local feature aggregation functions, such as Bag of Words, Fisher Vectors and VLAD, by a large margin.},\n  keywords = {backpropagation;feature extraction;image classification;image representation;learning (artificial intelligence);cost function minimization problem;classifier cost function;local feature aggregation function parameters;local feature space;Training;Covariance matrices;Cost function;Feature extraction;Noise measurement;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081307},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347199.pdf},\n}\n\n
\n
\n\n\n
\n This paper introduces a family of local feature aggregation functions and a novel method to estimate their parameters, such that they generate optimal representations for classification (or any task that can be expressed as a cost function minimization problem). To achieve that, we compose the local feature aggregation function with the classifier cost function and we backpropagate the gradient of this cost function in order to update the local feature aggregation function parameters. Experiments on synthetic datasets indicate that our method discovers parameters that model the class-relevant information in addition to the local feature space. Further experiments on a variety of motion and visual descriptors, both on image and video datasets, show that our method outperforms other state-of-the-art local feature aggregation functions, such as Bag of Words, Fisher Vectors and VLAD, by a large margin.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n User adaptation of convolutional neural network for human activity recognition.\n \n \n \n \n\n\n \n Matsui, S.; Inoue, N.; Akagi, Y.; Nagino, G.; and Shinoda, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 753-757, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"UserPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081308,\n  author = {S. Matsui and N. Inoue and Y. Akagi and G. Nagino and K. Shinoda},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {User adaptation of convolutional neural network for human activity recognition},\n  year = {2017},\n  pages = {753-757},\n  abstract = {Recently, monitoring human activities using smart-phone sensors, such as accelerometers, magnetometers, and gyro-scopes, has been proved effective to improve productivity in daily work. Since human activities differ largely among individuals, it is important to adapt their model to each individual with a small amount of his/her data. In this paper, we propose a user adaptation method using Learning Hidden Unit Contributions (LHUC) for Convolutional Neural Networks (CNN). It inserts a special layer with a small number of free parameters between each of two CNN layers and estimates the free parameters using a small amount of data. We collected smartphone data of 43 hours from 9 users and utilized them to evaluate our method. It improved the recognition performance by 3.0% from a user-independent model on average. The largest improvement among users was 13.6%.},\n  keywords = {data analysis;feedforward neural nets;learning (artificial intelligence);mobile computing;sensors;smart phones;Learning Hidden Unit Contributions;user adaptation method;gyro-scopes;smart-phone sensors;human activity recognition;convolutional neural network;user-independent model;recognition performance;smartphone data;CNN layers;time 43.0 hour;Adaptation models;Convolution;Neural networks;Data models;Magnetic sensors;Hidden Markov models;Human activity recognition;User adaptation;Convolutional neural network;Learning hidden unit contributions},\n  doi = {10.23919/EUSIPCO.2017.8081308},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346392.pdf},\n}\n\n
\n
\n\n\n
\n Recently, monitoring human activities using smart-phone sensors, such as accelerometers, magnetometers, and gyro-scopes, has been proved effective to improve productivity in daily work. Since human activities differ largely among individuals, it is important to adapt their model to each individual with a small amount of his/her data. In this paper, we propose a user adaptation method using Learning Hidden Unit Contributions (LHUC) for Convolutional Neural Networks (CNN). It inserts a special layer with a small number of free parameters between each of two CNN layers and estimates the free parameters using a small amount of data. We collected smartphone data of 43 hours from 9 users and utilized them to evaluate our method. It improved the recognition performance by 3.0% from a user-independent model on average. The largest improvement among users was 13.6%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-unsupervised Bayesian convex image restoration with location mixture of Gaussian.\n \n \n \n \n\n\n \n Orieux, F.; and Chinchilla, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 758-762, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-unsupervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081309,\n  author = {F. Orieux and R. Chinchilla},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Semi-unsupervised Bayesian convex image restoration with location mixture of Gaussian},\n  year = {2017},\n  pages = {758-762},\n  abstract = {Convex image restoration is a major field in inverse problems. The problem is often addressed by hand-tuning hyper-parameters. We propose an incremental contribution about a Bayesian approach where a convex field is constructed via Location Mixture of Gaussian and the estimator computed with a fast MCMC algorithm. Main contributions are a new field with several operator avoiding crosslike artifacts and a fallback sampling algorithm to prevent numerical errors. Results, in comparison to standard supervised results, have equivalent quality in a quasi-unsupervised approach and go with uncertainty quantification.},\n  keywords = {Bayes methods;Gaussian processes;image restoration;image sampling;inverse problems;Markov processes;Monte Carlo methods;fallback sampling algorithm;semiunsupervised Bayesian convex image restoration;inverse problems;Bayesian approach;MCMC algorithm;Location Mixture of Gaussian;Signal processing algorithms;Bayes methods;Convolution;Europe;Image restoration;Uncertainty},\n  doi = {10.23919/EUSIPCO.2017.8081309},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342516.pdf},\n}\n\n
\n
\n\n\n
\n Convex image restoration is a major field in inverse problems. The problem is often addressed by hand-tuning hyper-parameters. We propose an incremental contribution about a Bayesian approach where a convex field is constructed via Location Mixture of Gaussian and the estimator computed with a fast MCMC algorithm. Main contributions are a new field with several operator avoiding crosslike artifacts and a fallback sampling algorithm to prevent numerical errors. Results, in comparison to standard supervised results, have equivalent quality in a quasi-unsupervised approach and go with uncertainty quantification.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph-based interpolation for zooming in 3D scenes.\n \n \n \n \n\n\n \n Akyazi, P.; and Frossard, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 736-767, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Graph-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081310,\n  author = {P. Akyazi and P. Frossard},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Graph-based interpolation for zooming in 3D scenes},\n  year = {2017},\n  pages = {736-767},\n  abstract = {In multiview systems, color plus depth format builds 3D representations of scenes within which the users can freely navigate by changing their viewpoints. In this paper we present a framework for view synthesis when the user requests an arbitrary viewpoint that is closer to the 3D scene than the reference image. On the target image plane, the requested view obtained via depth-image-based-rendering (DIBR) is irregularly structured and has missing information due to the expansion of objects. We propose a novel framework that adopts a graph-based representation of the target view in order to interpolate the missing image pixels under sparsity priors. More specifically, we impose that the target image is reconstructed with a few atoms of a graph-based dictionary. Experimental results show that the reconstructed views have better PSNR and MSSIM quality than the ones generated within the same framework with analytical dictionaries, and are comparable to the ones reconstructed with TV regularization and linear interpolation on graphs. Visual results, however, show that our method better preserves the details and results in fewer disturbing artifacts than the other interpolation methods.},\n  keywords = {graph theory;image reconstruction;image representation;image resolution;interpolation;rendering (computer graphics);three-dimensional displays;color plus depth format;view synthesis;depth-image-based-rendering;missing image pixels;sparsity priors;reconstructed views;analytical dictionaries;linear interpolation;graphs;interpolation methods;TV regularization;Interpolation;Dictionaries;Three-dimensional displays;Image reconstruction;Navigation;Signal processing algorithms;Signal processing;Graph signal processing (GSP);depth-image-based-rendering (DIBR);free viewpoint navigation;interpolation},\n  doi = {10.23919/EUSIPCO.2017.8081310},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342186.pdf},\n}\n\n
\n
\n\n\n
\n In multiview systems, color plus depth format builds 3D representations of scenes within which the users can freely navigate by changing their viewpoints. In this paper we present a framework for view synthesis when the user requests an arbitrary viewpoint that is closer to the 3D scene than the reference image. On the target image plane, the requested view obtained via depth-image-based-rendering (DIBR) is irregularly structured and has missing information due to the expansion of objects. We propose a novel framework that adopts a graph-based representation of the target view in order to interpolate the missing image pixels under sparsity priors. More specifically, we impose that the target image is reconstructed with a few atoms of a graph-based dictionary. Experimental results show that the reconstructed views have better PSNR and MSSIM quality than the ones generated within the same framework with analytical dictionaries, and are comparable to the ones reconstructed with TV regularization and linear interpolation on graphs. Visual results, however, show that our method better preserves the details and results in fewer disturbing artifacts than the other interpolation methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A multi-objective optimization for video orchestration.\n \n \n \n \n\n\n \n Colangelo, F.; Battisti, F.; Carli, M.; and Neri, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 768-772, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081311,\n  author = {F. Colangelo and F. Battisti and M. Carli and A. Neri},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A multi-objective optimization for video orchestration},\n  year = {2017},\n  pages = {768-772},\n  abstract = {In this work, the problem of video orchestration performed by combining information extracted by multiple video sequences is considered. The novelty of the proposed approach relies on the use of aesthetic features and of cinematographic composition rules for automatically aggregating the inputs from different cameras in a unique video. While prior methodologies have separately addressed the issues of aesthetic feature extraction from videos and video orchestration, in this work we exploit a set of features of a scene for automatically selecting the shots being characterized by the best aesthetic score. In order to evaluate the effectiveness of the proposed method, a preliminary subjective experiment has been carried out with experts from the audiovisual field. The achieved results are encouraging and show that there is space for improving the performances.},\n  keywords = {cinematography;feature extraction;image sequences;optimisation;video signal processing;video orchestration;multiple video sequences;cinematographic composition rules;aesthetic feature extraction;Cameras;Feature extraction;Optimization;Image color analysis;Signal processing;Europe;Guidelines;Data analysis;multimodal signal processing;aesthetics},\n  doi = {10.23919/EUSIPCO.2017.8081311},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347073.pdf},\n}\n\n
\n
\n\n\n
\n In this work, the problem of video orchestration performed by combining information extracted by multiple video sequences is considered. The novelty of the proposed approach relies on the use of aesthetic features and of cinematographic composition rules for automatically aggregating the inputs from different cameras in a unique video. While prior methodologies have separately addressed the issues of aesthetic feature extraction from videos and video orchestration, in this work we exploit a set of features of a scene for automatically selecting the shots being characterized by the best aesthetic score. In order to evaluate the effectiveness of the proposed method, a preliminary subjective experiment has been carried out with experts from the audiovisual field. The achieved results are encouraging and show that there is space for improving the performances.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Person identity recognition on motion capture data using label propagation.\n \n \n \n \n\n\n \n Symeonidis, N. N. C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 773-777, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PersonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081312,\n  author = {N. N. C. Symeonidis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Person identity recognition on motion capture data using label propagation},\n  year = {2017},\n  pages = {773-777},\n  abstract = {Most activity-based person identity recognition methods operate on video data. Moreover, the vast majority of these methods focus on gait recognition. Obviously, recognition of a subject's identity using only gait imposes limitations to the applicability of the corresponding methods whereas a method capable of recognizing the subject's identity from various activities would be much more widely applicable. In this paper, a new method for activity-based identity recognition operating on motion capture data, that can recognize the subject's identity from a variety of activities is proposed. The method combines an existing approach for feature extraction from motion capture sequences with a label propagation algorithm for classification. The method and its variants (including a novel one, that takes advantage of the fact that, in certain cases, both activity and person identity labels might exist for the labeled sequences) have been tested in two different datasets. Experimental analysis proves that the proposed approach provides very good person identity recognition results, surpassing those obtained by two other methods.},\n  keywords = {biometrics (access control);feature extraction;image motion analysis;image recognition;image representation;motion capture data;activity-based person identity recognition methods;video data;gait recognition;motion capture sequences;label propagation algorithm;person identity labels;labeled sequences;Histograms;Training;Europe;Signal processing;Gait recognition;Feature extraction;Signal processing algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081312},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347832.pdf},\n}\n\n
\n
\n\n\n
\n Most activity-based person identity recognition methods operate on video data. Moreover, the vast majority of these methods focus on gait recognition. Obviously, recognition of a subject's identity using only gait imposes limitations to the applicability of the corresponding methods whereas a method capable of recognizing the subject's identity from various activities would be much more widely applicable. In this paper, a new method for activity-based identity recognition operating on motion capture data, that can recognize the subject's identity from a variety of activities is proposed. The method combines an existing approach for feature extraction from motion capture sequences with a label propagation algorithm for classification. The method and its variants (including a novel one, that takes advantage of the fact that, in certain cases, both activity and person identity labels might exist for the labeled sequences) have been tested in two different datasets. Experimental analysis proves that the proposed approach provides very good person identity recognition results, surpassing those obtained by two other methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Oriented asymmetric kernels for corner detection.\n \n \n \n \n\n\n \n Abdulrahman, H.; Magnier, B.; and Montesinos, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 778-782, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OrientedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081313,\n  author = {H. Abdulrahman and B. Magnier and P. Montesinos},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Oriented asymmetric kernels for corner detection},\n  year = {2017},\n  pages = {778-782},\n  abstract = {Corners and junctions play an important role in many image analysis applications. Nevertheless, these features extracted by the majority of the proposed algorithms in the literature do not correspond to the exact position of the corners. In this paper, an approach for corner detection based on the combination of different asymmetric kernels is proposed. Informations captured by the directional kernels enable to describe precisely all the grayscale variations and the directions of the crossing edges around the considered pixel. Compared to other corner detection algorithms on synthetic and real images, the proposed approach remains more stable and robust to noise than the comparative methods.},\n  keywords = {edge detection;feature extraction;image colour analysis;directional kernels;corner detection algorithms;synthetic images;oriented asymmetric kernels;image analysis applications;grayscale variations;Kernel;Image edge detection;Iron;Junctions;Tensile stress;Feature extraction;Oriented filters;detection of corners},\n  doi = {10.23919/EUSIPCO.2017.8081313},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347010.pdf},\n}\n\n
\n
\n\n\n
\n Corners and junctions play an important role in many image analysis applications. Nevertheless, these features extracted by the majority of the proposed algorithms in the literature do not correspond to the exact position of the corners. In this paper, an approach for corner detection based on the combination of different asymmetric kernels is proposed. Informations captured by the directional kernels enable to describe precisely all the grayscale variations and the directions of the crossing edges around the considered pixel. Compared to other corner detection algorithms on synthetic and real images, the proposed approach remains more stable and robust to noise than the comparative methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Expression recognition for severely demented patients in music reminiscence-therapy.\n \n \n \n \n\n\n \n Dantcheva, A.; Bilinski, P.; Nguyen, H. T.; Broutart, J.; and Bremond, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 783-787, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ExpressionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081314,\n  author = {A. Dantcheva and P. Bilinski and H. T. Nguyen and J. Broutart and F. Bremond},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Expression recognition for severely demented patients in music reminiscence-therapy},\n  year = {2017},\n  pages = {783-787},\n  abstract = {Recognizing expressions in severely demented Alzheimer's disease (AD) patients is essential, since such patients have lost a substantial amount of their cognitive capacity, and some even their verbal communication ability (e.g., aphasia). This leaves patients dependent on clinical staff to assess their verbal and non-verbal language, in order to communicate important messages, as of the discomfort associated to potential complications of the AD. Such assessment classically requires the patients' presence in a clinic, and time consuming examination involving medical personnel. Thus, expression monitoring is costly and logistically inconvenient for patients and clinical staff, which hinders among others large-scale monitoring. In this work we present a novel approach for automated recognition of facial activities and expressions of severely demented patients, where we distinguish between four activity and expression states, namely talking, singing, neutral and smiling. Our approach caters to the challenging setting of current medical recordings of music-therapy sessions, which include continuous pose variations, occlusions, camera-movements, camera-artifacts, as well as changing illumination. Additionally and importantly, the (elderly) patients exhibit generally less profound facial activities and expressions in a range of intensities and predominantly occurring in combinations (e.g., talking and smiling). Our proposed approach is based on the extension of the Improved Fisher Vectors (IFV) for videos, representing a video-sequence using both, local, as well as the related spatio-temporal features. We test our algorithm on a dataset of over 229 video sequences, acquired from 10 AD patients, with promising results, which have sparked substantial interest in the medical community. The proposed approach can play a key role in assessment of different therapy treatments, as well as in remote large-scale healthcare-frameworks.},\n  keywords = {cognition;diseases;face recognition;feature extraction;geriatrics;health care;medical disorders;patient treatment;expression recognition;severely demented patients;music reminiscence-therapy;severely demented Alzheimer's disease;verbal communication ability;clinical staff;expression monitoring;facial activities;expression states;music-therapy sessions;camera-artifacts;talking;singing;neutral;smiling;Feature extraction;Trajectory;Medical treatment;Dementia;Face recognition;Histograms;Videos},\n  doi = {10.23919/EUSIPCO.2017.8081314},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346757.pdf},\n}\n\n
\n
\n\n\n
\n Recognizing expressions in severely demented Alzheimer's disease (AD) patients is essential, since such patients have lost a substantial amount of their cognitive capacity, and some even their verbal communication ability (e.g., aphasia). This leaves patients dependent on clinical staff to assess their verbal and non-verbal language, in order to communicate important messages, as of the discomfort associated to potential complications of the AD. Such assessment classically requires the patients' presence in a clinic, and time consuming examination involving medical personnel. Thus, expression monitoring is costly and logistically inconvenient for patients and clinical staff, which hinders among others large-scale monitoring. In this work we present a novel approach for automated recognition of facial activities and expressions of severely demented patients, where we distinguish between four activity and expression states, namely talking, singing, neutral and smiling. Our approach caters to the challenging setting of current medical recordings of music-therapy sessions, which include continuous pose variations, occlusions, camera-movements, camera-artifacts, as well as changing illumination. Additionally and importantly, the (elderly) patients exhibit generally less profound facial activities and expressions in a range of intensities and predominantly occurring in combinations (e.g., talking and smiling). Our proposed approach is based on the extension of the Improved Fisher Vectors (IFV) for videos, representing a video-sequence using both, local, as well as the related spatio-temporal features. We test our algorithm on a dataset of over 229 video sequences, acquired from 10 AD patients, with promising results, which have sparked substantial interest in the medical community. The proposed approach can play a key role in assessment of different therapy treatments, as well as in remote large-scale healthcare-frameworks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Local frame match distance: A novel approach for exemplar gesture recognition.\n \n \n \n \n\n\n \n Ionescu, R. T.; Popescu, M.; Conly, C.; and Athitsos, V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 788-792, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LocalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081315,\n  author = {R. T. Ionescu and M. Popescu and C. Conly and V. Athitsos},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Local frame match distance: A novel approach for exemplar gesture recognition},\n  year = {2017},\n  pages = {788-792},\n  abstract = {Gesture recognition using a training set of limited size for a large vocabulary of gestures is a challenging problem in computer vision. With few examples per gesture class, researchers often employ state-of-the-art exemplar-based methods such as Dynamic Time Warping (DTW). This paper makes two contributions in the area of exemplar-based gesture recognition. As an alternative to DTW, we first introduce the Local Frame Match Distance (LFMD), a novel approach for matching gestures inspired by a distance measure for strings, namely Local Rank Distance (LRD). While LRD efficiently approximates the non-alignment of character n-grams between two strings, we employ LFMD to efficiently measure the non-alignment of hand locations between two video sequences. Second of all, we transform LFMD into a kernel and use it in combination with Kernel Discriminant Analysis for sign language recognition with exemplars. The empirical results indicate that our method can generally yield better performance than a state-of-the-art DTW approach on the challenging task of American Sign Language recognition, while reducing the computational time by 30%.},\n  keywords = {computational complexity;computer vision;feature extraction;sign language recognition;LRD;LFMD;computer vision;exemplar-based gesture recognition;matching gestures;distance measure;dynamic time warping;local frame match distance;kernel discriminant analysis;American sign language recognition;local rank distance;Gesture recognition;Assistive technology;Kernel;Hidden Markov models;Vocabulary;Training},\n  doi = {10.23919/EUSIPCO.2017.8081315},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341203.pdf},\n}\n\n
\n
\n\n\n
\n Gesture recognition using a training set of limited size for a large vocabulary of gestures is a challenging problem in computer vision. With few examples per gesture class, researchers often employ state-of-the-art exemplar-based methods such as Dynamic Time Warping (DTW). This paper makes two contributions in the area of exemplar-based gesture recognition. As an alternative to DTW, we first introduce the Local Frame Match Distance (LFMD), a novel approach for matching gestures inspired by a distance measure for strings, namely Local Rank Distance (LRD). While LRD efficiently approximates the non-alignment of character n-grams between two strings, we employ LFMD to efficiently measure the non-alignment of hand locations between two video sequences. Second of all, we transform LFMD into a kernel and use it in combination with Kernel Discriminant Analysis for sign language recognition with exemplars. The empirical results indicate that our method can generally yield better performance than a state-of-the-art DTW approach on the challenging task of American Sign Language recognition, while reducing the computational time by 30%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph scaling cut with L1-norm for classification of hyperspectral images.\n \n \n \n \n\n\n \n Mohanty, R.; Happy, S. L.; and Routray, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 793-797, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"GraphPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081316,\n  author = {R. Mohanty and S. L. Happy and A. Routray},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Graph scaling cut with L1-norm for classification of hyperspectral images},\n  year = {2017},\n  pages = {793-797},\n  abstract = {In this paper, we propose an L1 normalized graph based dimensionality reduction method for Hyperspectral images, called as `L1-Scaling Cut' (L1-SC). The underlying idea of this method is to generate the optimal projection matrix by retaining the original distribution of the data. Though L2-norm is generally preferred for computation, it is sensitive to noise and outliers. However, L1-norm is robust to them. Therefore, we obtain the optimal projection matrix by maximizing the ratio of between-class dispersion to within-class dispersion using L1-norm. Furthermore, an iterative algorithm is described to solve the optimization problem. The experimental results of the HSI classification confirm the effectiveness of the proposed L1-SC method on both noisy and noiseless data.},\n  keywords = {feature extraction;graph theory;hyperspectral imaging;image classification;iterative methods;matrix algebra;optimisation;optimal projection matrix;optimization problem;HSI classification;graph scaling cut;hyperspectral images;L1 normalized graph based dimensionality reduction method;L1-Scaling Cut;data distribution;between-class dispersion to within-class dispersion ratio;iterative algorithm;L1-SC method;HSI;SC;Optimization;Signal processing algorithms;Linear programming;Hyperspectral imaging;Iterative methods;Europe;Dimensionality reduction;Hyperspectral classification;L1-norm;L1-SC;scaling cut;Supervised learning},\n  doi = {10.23919/EUSIPCO.2017.8081316},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347062.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose an L1 normalized graph based dimensionality reduction method for Hyperspectral images, called as `L1-Scaling Cut' (L1-SC). The underlying idea of this method is to generate the optimal projection matrix by retaining the original distribution of the data. Though L2-norm is generally preferred for computation, it is sensitive to noise and outliers. However, L1-norm is robust to them. Therefore, we obtain the optimal projection matrix by maximizing the ratio of between-class dispersion to within-class dispersion using L1-norm. Furthermore, an iterative algorithm is described to solve the optimization problem. The experimental results of the HSI classification confirm the effectiveness of the proposed L1-SC method on both noisy and noiseless data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CNN-based transform index prediction in multiple transforms framework to assist entropy coding.\n \n \n \n \n\n\n \n Puri, S.; Lasserre, S.; and Le Callet, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 798-802, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CNN-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081317,\n  author = {S. Puri and S. Lasserre and P. {Le Callet}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {CNN-based transform index prediction in multiple transforms framework to assist entropy coding},\n  year = {2017},\n  pages = {798-802},\n  abstract = {Recent work in video compression has shown that using multiple 2D transforms instead of a single transform in order to de-correlate residuals provides better compression efficiency. These transforms are tested competitively inside a video encoder and the optimal transform is selected based on the Rate Distortion Optimization (RDO) cost. However, one needs to encode a syntax to indicate the chosen transform per residual block to the decoder for successful reconstruction of the pixels. Conventionally, the transform index is binarized using fixed length coding and a CABAC context model is attached to it. In this work, we provide a novel method that utilizes Convolutional Neural Network to predict the chosen transform index from the quantized coefficient block. The prediction probabilities are used to binarize the index by employing a variable length coding instead of a fixed length coding. Results show that by employing this modified transform index coding scheme inside HEVC, one can achieve up to 0.59% BD-rate gain.},\n  keywords = {binary codes;data compression;entropy codes;neural nets;probability;rate distortion theory;video coding;rate distortion optimization cost;CABAC context model;CNN-based transform index prediction;2D transforms;HEVC;video encoder;compression efficiency;de-correlate residuals;video compression;entropy coding;index prediction;modified transform index coding scheme;Transforms;Indexes;Encoding;Training;Predictive models;Decoding;Feature extraction},\n  doi = {10.23919/EUSIPCO.2017.8081317},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347040.pdf},\n}\n\n
\n
\n\n\n
\n Recent work in video compression has shown that using multiple 2D transforms instead of a single transform in order to de-correlate residuals provides better compression efficiency. These transforms are tested competitively inside a video encoder and the optimal transform is selected based on the Rate Distortion Optimization (RDO) cost. However, one needs to encode a syntax to indicate the chosen transform per residual block to the decoder for successful reconstruction of the pixels. Conventionally, the transform index is binarized using fixed length coding and a CABAC context model is attached to it. In this work, we provide a novel method that utilizes Convolutional Neural Network to predict the chosen transform index from the quantized coefficient block. The prediction probabilities are used to binarize the index by employing a variable length coding instead of a fixed length coding. Results show that by employing this modified transform index coding scheme inside HEVC, one can achieve up to 0.59% BD-rate gain.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Convolutional neural network-based infrared image super resolution under low light environment.\n \n \n \n \n\n\n \n Han, T. Y.; Kim, Y. J.; and Song, B. C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 803-807, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ConvolutionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081318,\n  author = {T. Y. Han and Y. J. Kim and B. C. Song},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Convolutional neural network-based infrared image super resolution under low light environment},\n  year = {2017},\n  pages = {803-807},\n  abstract = {Convolutional neural networks (CNN) have been successfully applied to visible image super-resolution (SR) methods. In this paper, for up-scaling near-infrared (NIR) image under low light environment, we propose a CNN-based SR algorithm using corresponding visible image. Our algorithm firstly extracts high-frequency (HF) components from low-resolution (LR) NIR image and its corresponding high-resolution (HR) visible image, and then takes them as the multiple inputs of the CNN. Next, the CNN outputs HR HF component of the input NIR image. Finally, HR NIR image is synthesized by adding the HR HF component to the up-scaled LR NIR image. Simulation results show that the proposed algorithm outperforms the state-of-the-art methods in terms of qualitative as well as quantitative metrics.},\n  keywords = {convolution;image resolution;infrared imaging;neural nets;convolutional neural network;image super resolution;low light environment;CNN;near-infrared image;Hafnium;Convolution;Signal processing algorithms;Spatial resolution;Europe;Near-infrared and visible images;super-resolution;convolutional neural networks;low light images},\n  doi = {10.23919/EUSIPCO.2017.8081318},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345289.pdf},\n}\n\n
\n
\n\n\n
\n Convolutional neural networks (CNN) have been successfully applied to visible image super-resolution (SR) methods. In this paper, for up-scaling near-infrared (NIR) image under low light environment, we propose a CNN-based SR algorithm using corresponding visible image. Our algorithm firstly extracts high-frequency (HF) components from low-resolution (LR) NIR image and its corresponding high-resolution (HR) visible image, and then takes them as the multiple inputs of the CNN. Next, the CNN outputs HR HF component of the input NIR image. Finally, HR NIR image is synthesized by adding the HR HF component to the up-scaled LR NIR image. Simulation results show that the proposed algorithm outperforms the state-of-the-art methods in terms of qualitative as well as quantitative metrics.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A novel global image description approach for long term vehicle localization.\n \n \n \n \n\n\n \n Bonardi, F.; Ainouz, S.; Boutteau, R.; Dupuis, Y.; Savatier, X.; and Vasseur, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 808-812, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081319,\n  author = {F. Bonardi and S. Ainouz and R. Boutteau and Y. Dupuis and X. Savatier and P. Vasseur},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A novel global image description approach for long term vehicle localization},\n  year = {2017},\n  pages = {808-812},\n  abstract = {Long-term place recognition for vehicles or robots in outdoor environment is still a tackling issue: numerous changes occur in appearance due to illumination variations or weather phenomena for instance, when using visual sensors. Few methods from the literature try to manage different visual sources while it could favor data interoperability across variable sensors. In this paper, we emphasis our works on cases where there is a need to associate data from different imaging sources (optics, sensors size and even spectral ranges). We developed a method with a first camera which composes the visual memory. Afterwards, we consider another camera which partially covers the same journey. Our goal is to associate live images to the prior visual memory thanks to visual features invariant to sensors changes, with the help of a probabilistic approach for the implementation part.},\n  keywords = {image recognition;image sensors;object recognition;object tracking;robot vision;global image description approach;visual memory;imaging sources;visual features invariant;data interoperability;visual sensors;long-term place recognition;long term vehicle localization;Visualization;Cameras;Image sensors;Sensor phenomena and characterization;Robustness;Lighting},\n  doi = {10.23919/EUSIPCO.2017.8081319},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342020.pdf},\n}\n\n
\n
\n\n\n
\n Long-term place recognition for vehicles or robots in outdoor environment is still a tackling issue: numerous changes occur in appearance due to illumination variations or weather phenomena for instance, when using visual sensors. Few methods from the literature try to manage different visual sources while it could favor data interoperability across variable sensors. In this paper, we emphasis our works on cases where there is a need to associate data from different imaging sources (optics, sensors size and even spectral ranges). We developed a method with a first camera which composes the visual memory. Afterwards, we consider another camera which partially covers the same journey. Our goal is to associate live images to the prior visual memory thanks to visual features invariant to sensors changes, with the help of a probabilistic approach for the implementation part.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A normalized mirrored correlation measure for data symmetry detection.\n \n \n \n \n\n\n \n Gnutti, A.; Guerrini, F.; and Leonardi, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 813-817, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081320,\n  author = {A. Gnutti and F. Guerrini and R. Leonardi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A normalized mirrored correlation measure for data symmetry detection},\n  year = {2017},\n  pages = {813-817},\n  abstract = {Symmetry detection algorithms are enjoying a renovated interest in the scientific community, fueled by recent advancements in computer vision and computer graphics applications. This paper is inspired by recent efforts in building a symmetric object detection system in natural images. In particular, it is first shown how correlation can be a core operator that allows finding local reflection symmetry points in 1-D sequences that are optimal in an energetic sense. Then, the importance of 2-D correlation in natural images to correctly align the symmetric object axis is demonstrated. Using the correlation as described is crucial in boosting the performance of the system, as proven by the results on a standard dataset.},\n  keywords = {correlation methods;image matching;object detection;symmetry;symmetric object detection system;natural images;local reflection symmetry points;symmetric object axis;normalized mirrored correlation measure;data symmetry detection;computer vision;computer graphics applications;2D correlation;1D sequences;Correlation;Object detection;Feature extraction;Convolution;Europe;Normalized Cross-Correlation;Reflection Symmetry Detection;Content-Based Analysis;Feature Extraction Methods},\n  doi = {10.23919/EUSIPCO.2017.8081320},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347823.pdf},\n}\n\n
\n
\n\n\n
\n Symmetry detection algorithms are enjoying a renovated interest in the scientific community, fueled by recent advancements in computer vision and computer graphics applications. This paper is inspired by recent efforts in building a symmetric object detection system in natural images. In particular, it is first shown how correlation can be a core operator that allows finding local reflection symmetry points in 1-D sequences that are optimal in an energetic sense. Then, the importance of 2-D correlation in natural images to correctly align the symmetric object axis is demonstrated. Using the correlation as described is crucial in boosting the performance of the system, as proven by the results on a standard dataset.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Variational stabilized linear forgetting in state-space models.\n \n \n \n \n\n\n \n van de Laar , T.; Cox, M.; van Diepen , A.; and de Vries , B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 818-822, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"VariationalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081321,\n  author = {T. {van de Laar} and M. Cox and A. {van Diepen} and B. {de Vries}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Variational stabilized linear forgetting in state-space models},\n  year = {2017},\n  pages = {818-822},\n  abstract = {State-space modeling of non-stationary natural signals is a notoriously difficult task. As a result of context switches, the memory depth of the model should ideally be adapted online. Stabilized linear forgetting (SLF) has been proposed as an elegant method for state-space tracking in context-switching environments. In practice, SLF leads to state and parameter estimation tasks for which no analytical solutions exist. In the literature, a few approximate solutions have been derived, making use of specific model simplifications. This paper proposes an alternative approach, in which SLF is described as an inference task on a generative probabilistic model. SLF is then executed by a variational message passing algorithm on a factor graph representation of the generative model. This approach enjoys a number of advantages relative to previous work. First, variational message passing (VMP) is an automatable procedure that adapts appropriately under changing model assumptions. This eases the search process for the best model. Secondly, VMP easily extends to estimate model parameters. Thirdly, the modular make-up of the factor graph framework allows SLF to be used as a click-on feature in a large variety of complex models. The functionality of the proposed method is verified by simulating an SLF state-space model in a context-switching data environment.},\n  keywords = {graph theory;message passing;parameter estimation;probability;signal processing;state-space methods;variational message passing algorithm;generative probabilistic model;inference task;specific model simplifications;estimation tasks;state-space tracking;nonstationary natural signals;state-space models;variational stabilized linear;context-switching data environment;SLF state-space model;complex models;model parameters;Adaptation models;Hidden Markov models;Message passing;Standards;Data models;Probabilistic logic;Signal processing algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081321},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340624.pdf},\n}\n\n
\n
\n\n\n
\n State-space modeling of non-stationary natural signals is a notoriously difficult task. As a result of context switches, the memory depth of the model should ideally be adapted online. Stabilized linear forgetting (SLF) has been proposed as an elegant method for state-space tracking in context-switching environments. In practice, SLF leads to state and parameter estimation tasks for which no analytical solutions exist. In the literature, a few approximate solutions have been derived, making use of specific model simplifications. This paper proposes an alternative approach, in which SLF is described as an inference task on a generative probabilistic model. SLF is then executed by a variational message passing algorithm on a factor graph representation of the generative model. This approach enjoys a number of advantages relative to previous work. First, variational message passing (VMP) is an automatable procedure that adapts appropriately under changing model assumptions. This eases the search process for the best model. Secondly, VMP easily extends to estimate model parameters. Thirdly, the modular make-up of the factor graph framework allows SLF to be used as a click-on feature in a large variety of complex models. The functionality of the proposed method is verified by simulating an SLF state-space model in a context-switching data environment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Probabilistic cross-validation estimators for Gaussian process regression.\n \n \n \n \n\n\n \n Martino, L.; Laparra, V.; and Camps-Valls, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 823-827, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ProbabilisticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081322,\n  author = {L. Martino and V. Laparra and G. Camps-Valls},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Probabilistic cross-validation estimators for Gaussian process regression},\n  year = {2017},\n  pages = {823-827},\n  abstract = {Gaussian Processes (GPs) are state-of-the-art tools for regression. Inference of GP hyperparameters is typically done by maximizing the marginal log-likelihood (ML). If the data truly follows the GP model, using the ML approach is optimal and computationally efficient. Unfortunately very often this is not case and suboptimal results are obtained in terms of prediction error. Alternative procedures such as cross-validation (CV) schemes are often employed instead, but they usually incur in high computational costs. We propose a probabilistic version of CV (PCV) based on two different model pieces in order to reduce the dependence on a specific model choice. PCV presents the benefits from both approaches, and allows us to find the solution for either the maximum a posteriori (MAP) or the Minimum Mean Square Error (MMSE) estimators. Experiments in controlled situations reveal that the PCV solution outperforms ML for both estimators, and that PCV-MMSE results outperforms other traditional approaches.},\n  keywords = {Gaussian processes;least mean squares methods;maximum likelihood estimation;mean square error methods;regression analysis;cross-validation schemes;high computational costs;specific model choice;PCV-MMSE results;probabilistic cross-validation estimators;Gaussian process regression;GP hyperparameters;marginal log-likelihood;GP model;ML approach;minimum mean square error estimators;CV probabilistic version;Probabilistic logic;Computational modeling;Standards;Mathematical model;Robustness;Data models;Optimization;Probabilistic Cross Validation;Marginal Likelihood;MAP estimator;MMSE estimator;Gaussian Processes},\n  doi = {10.23919/EUSIPCO.2017.8081322},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347427.pdf},\n}\n\n
\n
\n\n\n
\n Gaussian Processes (GPs) are state-of-the-art tools for regression. Inference of GP hyperparameters is typically done by maximizing the marginal log-likelihood (ML). If the data truly follows the GP model, using the ML approach is optimal and computationally efficient. Unfortunately very often this is not case and suboptimal results are obtained in terms of prediction error. Alternative procedures such as cross-validation (CV) schemes are often employed instead, but they usually incur in high computational costs. We propose a probabilistic version of CV (PCV) based on two different model pieces in order to reduce the dependence on a specific model choice. PCV presents the benefits from both approaches, and allows us to find the solution for either the maximum a posteriori (MAP) or the Minimum Mean Square Error (MMSE) estimators. Experiments in controlled situations reveal that the PCV solution outperforms ML for both estimators, and that PCV-MMSE results outperforms other traditional approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluating the RBM without integration using PDF projection.\n \n \n \n \n\n\n \n Baggenstoss, P. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 828-832, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"EvaluatingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081323,\n  author = {P. M. Baggenstoss},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Evaluating the RBM without integration using PDF projection},\n  year = {2017},\n  pages = {828-832},\n  abstract = {In this paper, we apply probability density function (PDF) projection to arrive at an exact closed-form expression for the marginal distribution of the visible data of a restricted Boltzmann machine (RBM) without requiring integrating over the distribution of the hidden variables or needing to know the partition function. We express the visible data marginal as a projected PDF based on a set of sufficient statistics. When a Gaussian mixture model (GMM) is used to estimate the PDF of the sufficient statistics, then we arrive at a combined RBM/GMM model that serves as a general-purpose PDF estimator and Bayesian classifier. The approach extends recusively to compute the input distribution of a multi-layer network. We demonstrate the method using a reduced subset of the MNIST handwritten character data set.},\n  keywords = {Bayes methods;Boltzmann machines;data analysis;Gaussian processes;handwritten character recognition;optical character recognition;probability;PDF projection;probability density function projection;closed-form expression;marginal distribution;visible data;restricted Boltzmann machine;hidden variables;partition function;projected PDF;sufficient statistics;Gaussian mixture model;general-purpose PDF estimator;MNIST handwritten character data set;combined RBM-GMM model;multilayer network;Bayes methods;Europe;Signal processing;Probability density function;Computational modeling;Gaussian mixture model},\n  doi = {10.23919/EUSIPCO.2017.8081323},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342845.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we apply probability density function (PDF) projection to arrive at an exact closed-form expression for the marginal distribution of the visible data of a restricted Boltzmann machine (RBM) without requiring integrating over the distribution of the hidden variables or needing to know the partition function. We express the visible data marginal as a projected PDF based on a set of sufficient statistics. When a Gaussian mixture model (GMM) is used to estimate the PDF of the sufficient statistics, then we arrive at a combined RBM/GMM model that serves as a general-purpose PDF estimator and Bayesian classifier. The approach extends recusively to compute the input distribution of a multi-layer network. We demonstrate the method using a reduced subset of the MNIST handwritten character data set.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data subset selection for efficient SVM training.\n \n \n \n \n\n\n \n Mourad, S.; Tewfik, A.; and Vikalo, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 833-837, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DataPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081324,\n  author = {S. Mourad and A. Tewfik and H. Vikalo},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Data subset selection for efficient SVM training},\n  year = {2017},\n  pages = {833-837},\n  abstract = {Training a support vector machine (SVM) on large data sets is a computationally intensive task. In this paper, we study the problem of selecting a subset of data for training the SVM classifier under requirement that the loss of performance due to training data reduction is low. A function quantifying suitability of a selected subset is proposed, and a greedy algorithm for solving the subset selection problem is introduced. The algorithm is evaluated on hand digit recognition and other binary classification tasks, and its performance is compared to stratified sampling methods.},\n  keywords = {data reduction;greedy algorithms;learning (artificial intelligence);pattern classification;sampling methods;support vector machines;binary classification tasks;data subset selection;support vector machine;computationally intensive task;SVM classifier;data reduction;function quantifying suitability;greedy algorithm;subset selection problem;SVM training;stratified sampling methods;Support vector machines;Training;Signal processing algorithms;Linear programming;Approximation algorithms;Nearest neighbor searches},\n  doi = {10.23919/EUSIPCO.2017.8081324},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347647.pdf},\n}\n\n
\n
\n\n\n
\n Training a support vector machine (SVM) on large data sets is a computationally intensive task. In this paper, we study the problem of selecting a subset of data for training the SVM classifier under requirement that the loss of performance due to training data reduction is low. A function quantifying suitability of a selected subset is proposed, and a greedy algorithm for solving the subset selection problem is introduced. The algorithm is evaluated on hand digit recognition and other binary classification tasks, and its performance is compared to stratified sampling methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unsupervised feature extraction, signal labeling, and blind signal separation in a state space world.\n \n \n \n \n\n\n \n Zalmai, N.; Keusch, R.; Malmberg, H.; and Loeliger, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 838-842, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"UnsupervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081325,\n  author = {N. Zalmai and R. Keusch and H. Malmberg and H. Loeliger},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Unsupervised feature extraction, signal labeling, and blind signal separation in a state space world},\n  year = {2017},\n  pages = {838-842},\n  abstract = {The paper addresses the problem of joint signal separation and estimation in a single-channel discrete-time signal composed of a wandering baseline and overlapping repetitions of unknown (or known) signal shapes. All signals are represented by a linear state space model (LSSM). The baseline model is driven by white Gaussian noise, but the other signal models are triggered by sparse inputs. Sparsity is achieved by normal priors with unknown variance (NUV) from sparse Bayesian learning. All signals and system parameters are jointly estimated with an efficient expectation maximization (EM) algorithm based on Gaussian message passing, which works both for known and unknown signal shapes. The proposed method outputs a sparse multi-channel representation of the given signal, which can be interpreted as a signal labeling.},\n  keywords = {Bayes methods;blind source separation;compressed sensing;expectation-maximisation algorithm;feature extraction;Gaussian noise;learning (artificial intelligence);message passing;signal representation;state-space methods;white Gaussian noise;sparse Bayesian learning;sparse multichannel representation;signal labeling;unsupervised feature extraction;blind signal separation;joint signal separation;single-channel discrete-time signal;linear state space model;joint signal estimation;expectation maximization algorithm;Gaussian message passing;Shape;Message passing;Signal processing algorithms;Electrocardiography;Estimation;Gaussian noise;Dictionaries},\n  doi = {10.23919/EUSIPCO.2017.8081325},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341584.pdf},\n}\n\n
\n
\n\n\n
\n The paper addresses the problem of joint signal separation and estimation in a single-channel discrete-time signal composed of a wandering baseline and overlapping repetitions of unknown (or known) signal shapes. All signals are represented by a linear state space model (LSSM). The baseline model is driven by white Gaussian noise, but the other signal models are triggered by sparse inputs. Sparsity is achieved by normal priors with unknown variance (NUV) from sparse Bayesian learning. All signals and system parameters are jointly estimated with an efficient expectation maximization (EM) algorithm based on Gaussian message passing, which works both for known and unknown signal shapes. The proposed method outputs a sparse multi-channel representation of the given signal, which can be interpreted as a signal labeling.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Clustering and causality inference using algorithmic complexity.\n \n \n \n \n\n\n \n Revolle, M.; Cayre, F.; and Le Bihan, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 843-847, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ClusteringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081326,\n  author = {M. Revolle and F. Cayre and N. {Le Bihan}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Clustering and causality inference using algorithmic complexity},\n  year = {2017},\n  pages = {843-847},\n  abstract = {We present a set of algorithmic complexity estimates. We derive a normalized semi-distance that is shown to outperform the state-of-the-art. We also propose estimators for causality inference on directed acyclic graphs. Illustrative applications include clustering of human writing systems and causality assessment on novel drafts.},\n  keywords = {computational complexity;directed graphs;graph theory;pattern clustering;causality inference;algorithmic complexity estimates;estimators;directed acyclic graphs;causality assessment;clustering;human writing systems;Complexity theory;Signal processing algorithms;Inference algorithms;Encoding;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081326},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347023.pdf},\n}\n\n
\n
\n\n\n
\n We present a set of algorithmic complexity estimates. We derive a normalized semi-distance that is shown to outperform the state-of-the-art. We also propose estimators for causality inference on directed acyclic graphs. Illustrative applications include clustering of human writing systems and causality assessment on novel drafts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unsupervised time domain nonlinear post-equalization for ACO-OFDM visible light communication systems.\n \n \n \n \n\n\n \n Ávila, F. R.; and Lovisolo, L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 848-852, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"UnsupervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081327,\n  author = {F. R. Ávila and L. Lovisolo},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Unsupervised time domain nonlinear post-equalization for ACO-OFDM visible light communication systems},\n  year = {2017},\n  pages = {848-852},\n  abstract = {LED nonlinearity is an important issue limiting the performance of Visible Light Communication (VLC) systems. This form of distortion is particularly problematic when the system employs Optical-OFDM because of the high Peak-to-Average Power Ratio (PAPR) of its time domain symbol. This paper proposes using ancillary statistical properties of the O-OFDM signal in order to mitigate LED nonlinearities in an unsupervised fashion. By exploring the Gaussianity of the time domain OFDM signal and the idea of distribution equalization, we propose a semi-parametric approach to blind nonlinear post-equalization for asymmetrically clipped O-OFDM (ACO-OFDM) VLC systems. In addition to not requiring training data, the equalizer is robust to different LED types and it is adaptive to time-varying nonlinearities. Simulations with a realistic LED model show that the developed tool is capable of substantially mitigating the effects of nonlinear distortion on system performance.},\n  keywords = {equalisers;free-space optical communication;interference (signal);light emitting diodes;nonlinear distortion;OFDM modulation;optical modulation;unsupervised time domain nonlinear post-equalization;ACO-OFDM visible light communication systems;LED nonlinearity;time domain symbol;time domain OFDM signal;blind nonlinear post-equalization;asymmetrically clipped O-OFDM VLC systems;peak-to-average power ratio;optical-OFDM;Light emitting diodes;Time-domain analysis;OFDM;Equalizers;Optical distortion;Nonlinear distortion;Nonlinear optics},\n  doi = {10.23919/EUSIPCO.2017.8081327},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347631.pdf},\n}\n\n
\n
\n\n\n
\n LED nonlinearity is an important issue limiting the performance of Visible Light Communication (VLC) systems. This form of distortion is particularly problematic when the system employs Optical-OFDM because of the high Peak-to-Average Power Ratio (PAPR) of its time domain symbol. This paper proposes using ancillary statistical properties of the O-OFDM signal in order to mitigate LED nonlinearities in an unsupervised fashion. By exploring the Gaussianity of the time domain OFDM signal and the idea of distribution equalization, we propose a semi-parametric approach to blind nonlinear post-equalization for asymmetrically clipped O-OFDM (ACO-OFDM) VLC systems. In addition to not requiring training data, the equalizer is robust to different LED types and it is adaptive to time-varying nonlinearities. Simulations with a realistic LED model show that the developed tool is capable of substantially mitigating the effects of nonlinear distortion on system performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimized block-diagonalization precoding technique using givens rotations QR decomposition.\n \n \n \n \n\n\n \n Crâşmariu, V.; Arvinte, M.; Enescu, A.; and Ciochină, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 853-857, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OptimizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081328,\n  author = {V. Crâşmariu and M. Arvinte and A. Enescu and S. Ciochină},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Optimized block-diagonalization precoding technique using givens rotations QR decomposition},\n  year = {2017},\n  pages = {853-857},\n  abstract = {The emerging 5G mobile communication standard aims to increase the throughput and, in the same time, to considerably increase the number of users serviced concurrently (Internet of Things). The key direction for achieving these requirements is to heavily extend the use of the spatial degrees of freedom, especially in the multi-user scenarios. Multi-User Massive MIMO (MU-MIMO) is one of the key technologies that responds well to the 5G needs. However, the use of MU-MIMO in the downlink direction raises the collaborative detection problem at the user side, thus the elimination of the inter-user interference becomes necessary. The paper presents a reduced complexity linear transmitter precoding technique that cancels the inter-user interference in a downlink MU-MIMO system. The reduced complexity is achieved through re-using as many low-level operations as possible. The method is suitable for implementation on any modern processor and proven to be scalable to a Massive MIMO scenario without any loss in performance.},\n  keywords = {5G mobile communication;Internet of Things;MIMO communication;multiuser detection;precoding;QR codes;radiofrequency interference;Internet of Things;downlink MU-MIMO system;QR decomposition;interuser interference;block-diagonalization precoding technique;5G mobile communication standard;multiuser massive MIMO;linear transmitter precoding technique;Precoding;Signal processing algorithms;MIMO;Matrix decomposition;Complexity theory;Null space;Interference;MU-MIMO;Massive MIMO;Precoding;Reduced complexity;Block-Diagonalization},\n  doi = {10.23919/EUSIPCO.2017.8081328},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347526.pdf},\n}\n\n
\n
\n\n\n
\n The emerging 5G mobile communication standard aims to increase the throughput and, in the same time, to considerably increase the number of users serviced concurrently (Internet of Things). The key direction for achieving these requirements is to heavily extend the use of the spatial degrees of freedom, especially in the multi-user scenarios. Multi-User Massive MIMO (MU-MIMO) is one of the key technologies that responds well to the 5G needs. However, the use of MU-MIMO in the downlink direction raises the collaborative detection problem at the user side, thus the elimination of the inter-user interference becomes necessary. The paper presents a reduced complexity linear transmitter precoding technique that cancels the inter-user interference in a downlink MU-MIMO system. The reduced complexity is achieved through re-using as many low-level operations as possible. The method is suitable for implementation on any modern processor and proven to be scalable to a Massive MIMO scenario without any loss in performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On differential modulation in downlink multiuser MIMO systems.\n \n \n \n \n\n\n \n Alsifiany, F.; Ikhlef, A.; and Chambers, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 558-562, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081329,\n  author = {F. Alsifiany and A. Ikhlef and J. Chambers},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {On differential modulation in downlink multiuser MIMO systems},\n  year = {2017},\n  pages = {558-562},\n  abstract = {In this paper, we consider a space time block coded multiuser multiple-input multiple-output (MU-MIMO) system with downlink transmission. Specifically, we propose to use downlink precoding combined with differential modulation (DM) to shift the complexity from the receivers to the transmitter. The block diagonalization (BD) precoding scheme is used to cancel co-channel interference (CCI) in addition to exploiting its advantage of enhancing diversity. Since the BD scheme requires channel knowledge at the transmitter, we propose to use downlink spreading along with DM, which does not require channel knowledge neither at the transmitter nor at the receivers. The orthogonal spreading (OS) scheme is employed in order to separate the data streams of different users. As a space time block code, we use the Alamouti code that can be encoded/decoded using DM thereby eliminating the need for channel knowledge at the receiver. The proposed schemes yield low complexity transceivers while providing good performance. Monte Carlo simulation results demonstrate the effectiveness of the proposed schemes.},\n  keywords = {cochannel interference;decoding;diversity reception;MIMO communication;multiuser channels;precoding;radio transceivers;space-time block codes;differential modulation;downlink multiuser MIMO systems;MU-MIMO;downlink transmission;downlink precoding;block diagonalization precoding scheme;co-channel interference;channel knowledge;downlink spreading;orthogonal spreading scheme;Alamouti code;low complexity transceivers;space time block coded multiuser multiple-input multiple-output system;Downlink;Transmitters;Complexity theory;Precoding;Channel estimation;Receiving antennas;Differential modulation;Alamouti STBC;multiuser MIMO;block diagonalization;orthogonal spreading code},\n  doi = {10.23919/EUSIPCO.2017.8081329},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347515.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we consider a space time block coded multiuser multiple-input multiple-output (MU-MIMO) system with downlink transmission. Specifically, we propose to use downlink precoding combined with differential modulation (DM) to shift the complexity from the receivers to the transmitter. The block diagonalization (BD) precoding scheme is used to cancel co-channel interference (CCI) in addition to exploiting its advantage of enhancing diversity. Since the BD scheme requires channel knowledge at the transmitter, we propose to use downlink spreading along with DM, which does not require channel knowledge neither at the transmitter nor at the receivers. The orthogonal spreading (OS) scheme is employed in order to separate the data streams of different users. As a space time block code, we use the Alamouti code that can be encoded/decoded using DM thereby eliminating the need for channel knowledge at the receiver. The proposed schemes yield low complexity transceivers while providing good performance. Monte Carlo simulation results demonstrate the effectiveness of the proposed schemes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Precoding under instantaneous per-antenna peak power constraint.\n \n \n \n \n\n\n \n Jedda, H.; Mezghani, A.; Swindlehurst, A. L.; and Nossek, J. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 863-867, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PrecodingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081330,\n  author = {H. Jedda and A. Mezghani and A. L. Swindlehurst and J. A. Nossek},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Precoding under instantaneous per-antenna peak power constraint},\n  year = {2017},\n  pages = {863-867},\n  abstract = {We consider a multi-user (MU) multiple-input-single-output (MISO) downlink system with M single-antenna users and N transmit antennas with a nonlinear power amplifier (PA) at each antenna. Instead of emitting constant envelope (CE) signals from the antennas to have highly power efficient PAs, we relax the CE constraint and allow the transmit signals to have instantaneous power less than or equal to the available power at each PA. The PA power efficiency decreases but simulation results show that the same performance in terms of bit-error-ratio (BER) can be achieved with less transmitted power and less PA power consumption. We propose a linear and a nonlinear precoder design to mitigate the multi-user interference (MUI) under the constraint of a maximal instantaneous per-antenna peak power.},\n  keywords = {MIMO communication;power amplifiers;precoding;radiofrequency interference;transmitting antennas;precoding;per-antenna peak power constraint;multiuser multiple-input-single-output;M single-antenna users;N transmit antennas;nonlinear power amplifier;PA power efficiency;PA power consumption;nonlinear precoder design;multiuser interference;PA;Nonlinear distortion;Signal processing algorithms;Interference;Optimization;Transmitting antennas},\n  doi = {10.23919/EUSIPCO.2017.8081330},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347051.pdf},\n}\n\n
\n
\n\n\n
\n We consider a multi-user (MU) multiple-input-single-output (MISO) downlink system with M single-antenna users and N transmit antennas with a nonlinear power amplifier (PA) at each antenna. Instead of emitting constant envelope (CE) signals from the antennas to have highly power efficient PAs, we relax the CE constraint and allow the transmit signals to have instantaneous power less than or equal to the available power at each PA. The PA power efficiency decreases but simulation results show that the same performance in terms of bit-error-ratio (BER) can be achieved with less transmitted power and less PA power consumption. We propose a linear and a nonlinear precoder design to mitigate the multi-user interference (MUI) under the constraint of a maximal instantaneous per-antenna peak power.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint carrier frequency offset and channel impulse response estimation for linear periodic channels.\n \n \n \n \n\n\n \n Shaked, R.; Shlezinger, N.; and Dabora, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 868-872, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081331,\n  author = {R. Shaked and N. Shlezinger and R. Dabora},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Joint carrier frequency offset and channel impulse response estimation for linear periodic channels},\n  year = {2017},\n  pages = {868-872},\n  abstract = {We study joint estimation of the channel impulse response (CIR) and of the carrier frequency offset (CFO) for linear channels in which both the CIR and the noise statistics vary periodically in time. This model corresponds to interference-limited communications as well as to power line communication and doubly selective channels. We first consider the joint maximum likelihood estimator (JMLE) for the CIR and the CFO and show it has a high computational complexity and relatively low spectral efficiency. This motivates the derivation of two estimation schemes with higher spectral efficiency and lower computational complexity compared to the JMLE, obtained by exploiting both the periodicity of the channel and the fact that, typically, the delay-Doppler spreading function of the CIR is approximately sparse, without requiring a-priori knowledge of the sparsity pattern. The proposed estimation schemes are numerically tested and the results demonstrate that substantial benefits can be obtained by properly accounting for the approximate sparsity and periodicity in the design of the estimation scheme.},\n  keywords = {carrier transmission on power lines;channel estimation;computational complexity;frequency estimation;interference (signal);maximum likelihood estimation;transient response;linear periodic channels;joint estimation;CFO;noise statistics;interference-limited communications;power line communication;doubly selective channels;joint maximum likelihood estimator;estimation scheme;delay-Doppler spreading function;computational complexity;spectral efficiency;joint carrier frequency offset and channel impulse response estimation;CIR estimation;JMLE;Channel estimation;Computational complexity;Maximum likelihood estimation;Europe;Signal processing;Computational modeling},\n  doi = {10.23919/EUSIPCO.2017.8081331},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342030.pdf},\n}\n\n
\n
\n\n\n
\n We study joint estimation of the channel impulse response (CIR) and of the carrier frequency offset (CFO) for linear channels in which both the CIR and the noise statistics vary periodically in time. This model corresponds to interference-limited communications as well as to power line communication and doubly selective channels. We first consider the joint maximum likelihood estimator (JMLE) for the CIR and the CFO and show it has a high computational complexity and relatively low spectral efficiency. This motivates the derivation of two estimation schemes with higher spectral efficiency and lower computational complexity compared to the JMLE, obtained by exploiting both the periodicity of the channel and the fact that, typically, the delay-Doppler spreading function of the CIR is approximately sparse, without requiring a-priori knowledge of the sparsity pattern. The proposed estimation schemes are numerically tested and the results demonstrate that substantial benefits can be obtained by properly accounting for the approximate sparsity and periodicity in the design of the estimation scheme.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low-complexity detection based on landweber method in the uplink of Massive MIMO systems.\n \n \n \n \n\n\n \n Zhang, W.; Bao, X.; and Dai, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 873-877, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Low-complexityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081332,\n  author = {W. Zhang and X. Bao and J. Dai},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Low-complexity detection based on landweber method in the uplink of Massive MIMO systems},\n  year = {2017},\n  pages = {873-877},\n  abstract = {In this paper, we present low-complexity uplink detection algorithms in Massive MIMO systems. We treat the uplink detection as an ill-posed problem and adopt Landweber Method to solve it. In order to reduce the computational complexity and increase the convergence rate, we optimize the relax factor and propose improved Landweber Method with optimal relax factor (ILM-O) algorithm. We also try to reduce the order of Landweber Method by introducing a set of coefficients and propose reduced order Landweber Method (ROLM) algorithm. A analysis on the convergence and the complexity is provided. Numerical results show that the proposed algorithms outperform the existing algorithm significantly when the system scale is large.},\n  keywords = {computational complexity;MIMO communication;low-complexity uplink detection algorithms;computational complexity;massive MIMO systems;reduced order Landweber method algorithm;ill-posed problem;optimal relax factor;Signal processing algorithms;Complexity theory;Uplink;MIMO;Convergence;Algorithm design and analysis;Detection algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081332},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347651.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we present low-complexity uplink detection algorithms in Massive MIMO systems. We treat the uplink detection as an ill-posed problem and adopt Landweber Method to solve it. In order to reduce the computational complexity and increase the convergence rate, we optimize the relax factor and propose improved Landweber Method with optimal relax factor (ILM-O) algorithm. We also try to reduce the order of Landweber Method by introducing a set of coefficients and propose reduced order Landweber Method (ROLM) algorithm. A analysis on the convergence and the complexity is provided. Numerical results show that the proposed algorithms outperform the existing algorithm significantly when the system scale is large.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive zero padded CB-FMT for LTE uplink transmission in the high mobility scenario.\n \n \n \n \n\n\n \n Girotto, M.; and Tonello, A. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 878-882, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081333,\n  author = {M. Girotto and A. M. Tonello},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive zero padded CB-FMT for LTE uplink transmission in the high mobility scenario},\n  year = {2017},\n  pages = {878-882},\n  abstract = {LTE is the most recent standard for the mobile cellular communication. To achieve high speed communications, multi-carrier modulations have been adopted both for the downlink and the uplink. In the LTE downlink, OFDMA (the multi-user version of OFDM) has been chosen. The LTE uplink uses instead SC-FDMA modulation, an OFDM alternative. In this work, the use of CB-FMT, jointly with SC-FDMA, is analyzed in the high mobility scenario. Numerical results show that CB-FMT outperforms SC-FDMA in several cases. Thus, an adaptive implementation architecture that allows to flexibly choose the modulation scheme is proposed to maximize the achievable rate. Furthermore, for compliance with existing LTE parameters, frequency domain zero padded CB-FMT is proposed, and the problem of designing optimal capacity wise waveforms is considered.},\n  keywords = {cellular radio;Long Term Evolution;OFDM modulation;multicarrier modulations;LTE downlink;SC-FDMA modulation;OFDM alternative;high mobility scenario;adaptive implementation architecture;frequency domain zero padded CB-FMT;adaptive zero padded CB-FMT;LTE uplink transmission;mobile cellular communication;high speed communications;LTE parameters;Prototypes;Discrete Fourier transforms;OFDM;Modulation;Long Term Evolution;Uplink;Receivers},\n  doi = {10.23919/EUSIPCO.2017.8081333},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347504.pdf},\n}\n\n
\n
\n\n\n
\n LTE is the most recent standard for the mobile cellular communication. To achieve high speed communications, multi-carrier modulations have been adopted both for the downlink and the uplink. In the LTE downlink, OFDMA (the multi-user version of OFDM) has been chosen. The LTE uplink uses instead SC-FDMA modulation, an OFDM alternative. In this work, the use of CB-FMT, jointly with SC-FDMA, is analyzed in the high mobility scenario. Numerical results show that CB-FMT outperforms SC-FDMA in several cases. Thus, an adaptive implementation architecture that allows to flexibly choose the modulation scheme is proposed to maximize the achievable rate. Furthermore, for compliance with existing LTE parameters, frequency domain zero padded CB-FMT is proposed, and the problem of designing optimal capacity wise waveforms is considered.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low complexity hybrid precoding in finite dimensional channel for massive MIMO systems.\n \n \n \n \n\n\n \n Chen, Y.; Boussakta, S.; Tsimenidis, C.; Chambers, J.; and Jin, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 883-887, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081334,\n  author = {Y. Chen and S. Boussakta and C. Tsimenidis and J. Chambers and S. Jin},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Low complexity hybrid precoding in finite dimensional channel for massive MIMO systems},\n  year = {2017},\n  pages = {883-887},\n  abstract = {Massive multiple-input multiple-output (MIMO) is an emerging technology for future wireless networks, scaling up conventional MIMO to an unprecedented number of antennas at base stations. Such a large antenna array has the potential to make the system achieve high channel capacity and spectral efficiency, but it also leads to high cost in terms of hardware complexity. In this paper, we consider a finite dimensional channel model in which finite distinct directions are applied with M angular bins. In massive multi-user MIMO systems, a hybrid precoding method is proposed to reduce the required number of radio frequency (RF) chains at the base station, employing a single antenna per mobile station. The proposed precoder is partitioned into a high-dimensional RF precoder and a low-dimensional baseband precoder. The RF precoder is designed to obtain power gain with phase-only control and the baseband precoder is designed to facilitate multi-stream processing. For realistic scenarios, we consider the situation where the RF phase control is quantized up to B bits of precision. Furthermore, an upper bound on spectral efficiency is derived with the proposed precoding scheme. The simulation results show that hybrid precoding achieves desirable performance in terms of spectral efficiency, which approaches the performance of zero-forcing precoding.},\n  keywords = {antenna arrays;channel capacity;MIMO communication;precoding;spectral efficiency;precoding scheme;low complexity hybrid precoding;massive MIMO systems;base station;antenna array;high channel capacity;hardware complexity;finite dimensional channel model;massive multiuser MIMO systems;hybrid precoding method;single antenna;mobile station;high-dimensional RF precoder;low-dimensional baseband precoder;phase-only control;multistream processing;RF phase control;massive multiple-input multiple-output;wireless networks;Precoding;Radio frequency;Mobile communication;MIMO;Base stations;Baseband;Hardware;Massive MIMO;precoding;finite dimension;RF chain;hardware complexity},\n  doi = {10.23919/EUSIPCO.2017.8081334},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347101.pdf},\n}\n\n
\n
\n\n\n
\n Massive multiple-input multiple-output (MIMO) is an emerging technology for future wireless networks, scaling up conventional MIMO to an unprecedented number of antennas at base stations. Such a large antenna array has the potential to make the system achieve high channel capacity and spectral efficiency, but it also leads to high cost in terms of hardware complexity. In this paper, we consider a finite dimensional channel model in which finite distinct directions are applied with M angular bins. In massive multi-user MIMO systems, a hybrid precoding method is proposed to reduce the required number of radio frequency (RF) chains at the base station, employing a single antenna per mobile station. The proposed precoder is partitioned into a high-dimensional RF precoder and a low-dimensional baseband precoder. The RF precoder is designed to obtain power gain with phase-only control and the baseband precoder is designed to facilitate multi-stream processing. For realistic scenarios, we consider the situation where the RF phase control is quantized up to B bits of precision. Furthermore, an upper bound on spectral efficiency is derived with the proposed precoding scheme. The simulation results show that hybrid precoding achieves desirable performance in terms of spectral efficiency, which approaches the performance of zero-forcing precoding.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Intercarrier interference of multiple access UFMC with flexible subcarrier spacings.\n \n \n \n\n\n \n Marijanović, L.; Schwarz, S.; and Rupp, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 888-892, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081335,\n  author = {L. Marijanović and S. Schwarz and M. Rupp},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Intercarrier interference of multiple access UFMC with flexible subcarrier spacings},\n  year = {2017},\n  pages = {888-892},\n  abstract = {In this paper we consider multi-user multicarrier transmissions with flexible subcarrier spacings. In a scenario of multiple users access with different subcarrier spacings the orthogonality between subcarriers is disrupted. The paper presents the Intercarrier Interference (ICI) that occurs between two users with different subcarrier spacings in a Universal Filtered Multicarrier (UFMC) system. We investigate the interference induced by one user to another. We propose closed form functions for ICI for both users and validate them with simulation results. Also, we provide simulation results of the ICI power in terms of filter lengths and number of guard subcarriers and we calculate the corresponding achieved time-frequency efficiencies.},\n  keywords = {intercarrier interference;multi-access systems;Intercarrier interference;multiple access UFMC;flexible subcarrier spacings;multiuser multicarrier transmissions;ICI;Universal Filtered Multicarrier system;guard subcarriers;time-frequency efficiencies;OFDM;Receivers;Integrated circuits;Interference;Signal processing;Indexes;Bandwidth;Intercarrier interference;UFMC;guard subcarriers;subcarrier spacing;filter length},\n  doi = {10.23919/EUSIPCO.2017.8081335},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this paper we consider multi-user multicarrier transmissions with flexible subcarrier spacings. In a scenario of multiple users access with different subcarrier spacings the orthogonality between subcarriers is disrupted. The paper presents the Intercarrier Interference (ICI) that occurs between two users with different subcarrier spacings in a Universal Filtered Multicarrier (UFMC) system. We investigate the interference induced by one user to another. We propose closed form functions for ICI for both users and validate them with simulation results. Also, we provide simulation results of the ICI power in terms of filter lengths and number of guard subcarriers and we calculate the corresponding achieved time-frequency efficiencies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Highly efficient representation of reconfigurable code based on a radio virtual machine: Optimization to any target platform.\n \n \n \n \n\n\n \n Ivanov, V.; Jin, Y.; Choi, S.; Destino, G.; Mueck, M.; and Frascolla, V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 893-897, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"HighlyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081336,\n  author = {V. Ivanov and Y. Jin and S. Choi and G. Destino and M. Mueck and V. Frascolla},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Highly efficient representation of reconfigurable code based on a radio virtual machine: Optimization to any target platform},\n  year = {2017},\n  pages = {893-897},\n  abstract = {ETSI has developed a novel Software Radio Reconfiguration framework encompassing technical, certification and security solutions. Compared to legacy Software Reconfiguration technology, such as the Software Communications Architecture, the ETSI solution is designed for lowest overall power consumption and efficiency. For this purpose, a novel approach for Code Portability has been developed - a Radio Virtual Machine based mechanism allows converting a given algorithm into a generic representation, which is then, optimized for the specific hardware resources available on a target platform. This contribution explains the basic principles and outlines how Code Portability is achieved while meeting the objectives in terms of power consumption and complexity.},\n  keywords = {optimisation;reconfigurable architectures;security of data;software radio;virtual machines;highly efficient representation;reconfigurable code;certification;security solutions;legacy Software Reconfiguration technology;Software Communications Architecture;ETSI solution;generic representation;power consumption;code portability;radio virtual machine based mechanism;optimization;Software Radio Reconfiguration framework;Software;Computer architecture;Virtual machining;Hardware;Software radio;Mobile handsets;Europe;Software Reconfiguration;Software Defined Radio;Radio Virtual Machine},\n  doi = {10.23919/EUSIPCO.2017.8081336},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347596.pdf},\n}\n\n
\n
\n\n\n
\n ETSI has developed a novel Software Radio Reconfiguration framework encompassing technical, certification and security solutions. Compared to legacy Software Reconfiguration technology, such as the Software Communications Architecture, the ETSI solution is designed for lowest overall power consumption and efficiency. For this purpose, a novel approach for Code Portability has been developed - a Radio Virtual Machine based mechanism allows converting a given algorithm into a generic representation, which is then, optimized for the specific hardware resources available on a target platform. This contribution explains the basic principles and outlines how Code Portability is achieved while meeting the objectives in terms of power consumption and complexity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reconfiguration of 5G radio interface for positioning and communication.\n \n \n \n \n\n\n \n Saloranta, J.; and Destino, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 898-902, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ReconfigurationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081337,\n  author = {J. Saloranta and G. Destino},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Reconfiguration of 5G radio interface for positioning and communication},\n  year = {2017},\n  pages = {898-902},\n  abstract = {In addition to high data rate, millimeter-wave technology has great potential to provide extremely high localization accuracy. In this paper, we outline the benefits of this technology for positioning and their main applications, which are no longer confined to services only but also to improve communication. We shall focus on the trade-off between data communication and positioning looking the reconfiguration mechanisms of the radio interface. Specifically, in this paper we investigate a trade-off between achievable data rate and positioning capability via position and rotation error bound analysis, with the aim of achieving an optimal trade-off.},\n  keywords = {5G mobile communication;data communication;positioning;millimeter-wave technology;reconfiguration mechanisms;rotation error bound analysis;high localization accuracy;data communication;5G radio interface;5G mobile communication;Data communication;Measurement;Receivers;OFDM;Signal to noise ratio;Antennas},\n  doi = {10.23919/EUSIPCO.2017.8081337},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347003.pdf},\n}\n\n
\n
\n\n\n
\n In addition to high data rate, millimeter-wave technology has great potential to provide extremely high localization accuracy. In this paper, we outline the benefits of this technology for positioning and their main applications, which are no longer confined to services only but also to improve communication. We shall focus on the trade-off between data communication and positioning looking the reconfiguration mechanisms of the radio interface. Specifically, in this paper we investigate a trade-off between achievable data rate and positioning capability via position and rotation error bound analysis, with the aim of achieving an optimal trade-off.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive automotive communications solutions of 10 years lifetime enabled by ETSI RRS software reconfiguration technology.\n \n \n \n \n\n\n \n Jin, Y.; Ahn, H.; Kim, K.; Choi, S.; Mueck, M.; Frascolla, V.; and Haustein, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 903-906, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081338,\n  author = {Y. Jin and H. Ahn and K. Kim and S. Choi and M. Mueck and V. Frascolla and T. Haustein},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive automotive communications solutions of 10 years lifetime enabled by ETSI RRS software reconfiguration technology},\n  year = {2017},\n  pages = {903-906},\n  abstract = {The vehicles typically have a lifetime of several years, possibly even longer than 10 years, over which communication technology will almost certainly evolve dramatically. The challenge of automotive communication platform is to ensure that a radio communication component remains relevant over the entire lifetime of a vehicle. A highly efficient software reconfiguration solution is introduced in this paper. ETSI Reconfiguration Radio System technology provides a suitable framework for automotive communication platform which allows to either add or replace entire Radio Access Technologies or to upgrade specific components across any of the entire layers.},\n  keywords = {mobile radio;radio access networks;telecommunication standards;radio communication;radio access technologies;adaptive automotive communications;ETSI reconfiguration radio system technology;ETSI RRS software reconfiguration technology;time 10.0 year;Automotive engineering;Software;Standards;Computer architecture;Hardware;Rats;Access control;Automotive Communication;ETSI RRS;Software Reconfiguration;Standard Architecture;Standard Interface},\n  doi = {10.23919/EUSIPCO.2017.8081338},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347589.pdf},\n}\n\n
\n
\n\n\n
\n The vehicles typically have a lifetime of several years, possibly even longer than 10 years, over which communication technology will almost certainly evolve dramatically. The challenge of automotive communication platform is to ensure that a radio communication component remains relevant over the entire lifetime of a vehicle. A highly efficient software reconfiguration solution is introduced in this paper. ETSI Reconfiguration Radio System technology provides a suitable framework for automotive communication platform which allows to either add or replace entire Radio Access Technologies or to upgrade specific components across any of the entire layers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The scaled reassigned spectrogram adapted for detection and localisation of transient signals.\n \n \n \n \n\n\n \n Reinhold, I.; Starkhammar, J.; and Sandsten, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 907-911, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081339,\n  author = {I. Reinhold and J. Starkhammar and M. Sandsten},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {The scaled reassigned spectrogram adapted for detection and localisation of transient signals},\n  year = {2017},\n  pages = {907-911},\n  abstract = {The reassigned spectrogram can be used to improve the readability of a time-frequency representation of a non-stationary and multi-component signal. However for transient signals the reassignment needs to be adapted in order to achieve good localisation of the signal components. One approach is to scale the reassignment. This paper shows that by adapting the shape of the time window used with the spectrogram and by scaling the reassignment, perfect localisation can be achieved for a transient signal component. It is also shown that without matching the shape of the window, perfect localisation is not achieved. This is used to both identify the time-frequency centres of components in a multi-component signal, and to detect the shapes of the signal components. The scaled reassigned spectrogram with the matching shape window is shown to be able to resolve close components and works well for multi-components signals with noise. An echolocation signal from a beluga whale (Delphinapterus leucas) provides an example of how the method performs on a measured signal.},\n  keywords = {estimation theory;signal detection;signal representation;time-frequency analysis;transient signals;time-frequency representation;multicomponent signal;time window;perfect localisation;transient signal component;time-frequency centres;matching shape window;multicomponents signals;scaled reassigned spectrogram;Spectrogram;Time-frequency analysis;Transient analysis;Shape;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081339},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342827.pdf},\n}\n\n
\n
\n\n\n
\n The reassigned spectrogram can be used to improve the readability of a time-frequency representation of a non-stationary and multi-component signal. However for transient signals the reassignment needs to be adapted in order to achieve good localisation of the signal components. One approach is to scale the reassignment. This paper shows that by adapting the shape of the time window used with the spectrogram and by scaling the reassignment, perfect localisation can be achieved for a transient signal component. It is also shown that without matching the shape of the window, perfect localisation is not achieved. This is used to both identify the time-frequency centres of components in a multi-component signal, and to detect the shapes of the signal components. The scaled reassigned spectrogram with the matching shape window is shown to be able to resolve close components and works well for multi-components signals with noise. An echolocation signal from a beluga whale (Delphinapterus leucas) provides an example of how the method performs on a measured signal.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Designing optimal sampling schemes.\n \n \n \n \n\n\n \n Swärd, J.; Elvander, F.; and Jakobsson, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 912-916, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DesigningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081340,\n  author = {J. Swärd and F. Elvander and A. Jakobsson},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Designing optimal sampling schemes},\n  year = {2017},\n  pages = {912-916},\n  abstract = {In this work, we propose a method for finding an optimal, non-uniform, sampling scheme for a general class of signals in which the signal measurements may be non-linear functions of the parameters to be estimated. Formulated as a convex optimization problem reminiscent of the sensor selection problem, the method determines an optimal sampling scheme given a suitable estimation bound on the parameters of interest. The formulation also allows for putting emphasis on a particular set of parameters of interest by scaling the optimization problem in such a way that the bound to be minimized becomes more sensitive to these parameters. For the case of imprecise a priori knowledge of these parameters, we present a framework for customizing the sampling scheme to take such uncertainty into account. Numerical examples illustrate the efficiency of the proposed scheme.},\n  keywords = {convex programming;signal sampling;signal measurements;nonlinear functions;convex optimization problem;sensor selection problem;optimal sampling schemes;Nuclear magnetic resonance;Optimization;Upper bound;Signal processing;Convex functions;Estimation;Uncertainty},\n  doi = {10.23919/EUSIPCO.2017.8081340},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345475.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we propose a method for finding an optimal, non-uniform, sampling scheme for a general class of signals in which the signal measurements may be non-linear functions of the parameters to be estimated. Formulated as a convex optimization problem reminiscent of the sensor selection problem, the method determines an optimal sampling scheme given a suitable estimation bound on the parameters of interest. The formulation also allows for putting emphasis on a particular set of parameters of interest by scaling the optimization problem in such a way that the bound to be minimized becomes more sensitive to these parameters. For the case of imprecise a priori knowledge of these parameters, we present a framework for customizing the sampling scheme to take such uncertainty into account. Numerical examples illustrate the efficiency of the proposed scheme.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sub-band equalization filter design for improving dynamic range performance of modulated wideband converter.\n \n \n \n \n\n\n \n Alp, Y. K.; Gok, G.; and Korucu, A. B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 917-921, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Sub-bandPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081341,\n  author = {Y. K. Alp and G. Gok and A. B. Korucu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Sub-band equalization filter design for improving dynamic range performance of modulated wideband converter},\n  year = {2017},\n  pages = {917-921},\n  abstract = {In this work, we propose an iterative method to improve the dynamic range performance of the Modulated Wideband Converter (MWC), which is multi-channel sampling system for digitizing wideband sparse signals below the Nyquist limit without loss of information by using compressive sensing techniques. Our method jointly designs FIR filters for each sub-band to equalize the frequency response characteristics of the all sub-bands of the MWC. Obtained results from the extensive computer simulations of the MWC system show that the proposed method improves the dynamic range performance of the MWC system significantly.},\n  keywords = {compressed sensing;FIR filters;frequency response;iterative methods;signal sampling;dynamic range performance;modulated wideband converter;iterative method;multichannel sampling system;wideband sparse signals;FIR filters;Nyquist limit;compressive sensing techniques;sub-band equalization filter design;frequency response;MWC system;Frequency response;Dynamic range;Finite impulse response filters;Wideband;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081341},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347402.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we propose an iterative method to improve the dynamic range performance of the Modulated Wideband Converter (MWC), which is multi-channel sampling system for digitizing wideband sparse signals below the Nyquist limit without loss of information by using compressive sensing techniques. Our method jointly designs FIR filters for each sub-band to equalize the frequency response characteristics of the all sub-bands of the MWC. Obtained results from the extensive computer simulations of the MWC system show that the proposed method improves the dynamic range performance of the MWC system significantly.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non-iterative filter bank phase (re)construction.\n \n \n \n \n\n\n \n Průša, Z.; and Holighaus, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 922-926, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Non-iterativePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081342,\n  author = {Z. Průša and N. Holighaus},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Non-iterative filter bank phase (re)construction},\n  year = {2017},\n  pages = {922-926},\n  abstract = {Signal reconstruction from magnitude-only measurements presents a long-standing problem in signal processing. In this contribution, we propose a phase (re)construction method for filter banks with uniform decimation and controlled frequency variation. The suggested procedure extends the recently introduced phase-gradient heap integration and relies on a phase-magnitude relationship for filter bank coefficients obtained from Gaussian filters. Admissible filter banks are modeled as the discretization of certain generalized translation-invariant systems, for which we derive the phase-magnitude relationship explicitly. The implementation for discrete signals is described and the performance of the algorithm is evaluated on a range of real and synthetic signals.},\n  keywords = {channel bank filters;signal reconstruction;signal reconstruction;signal processing;filter bank coefficients;Gaussian filters;magnitude-only measurements;phase-gradient heap integration;noniterative filter bank phasereconstruction method;Signal processing algorithms;Time-frequency analysis;Phase measurement;Europe;Image reconstruction;Filter banks},\n  doi = {10.23919/EUSIPCO.2017.8081342},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346927.pdf},\n}\n\n
\n
\n\n\n
\n Signal reconstruction from magnitude-only measurements presents a long-standing problem in signal processing. In this contribution, we propose a phase (re)construction method for filter banks with uniform decimation and controlled frequency variation. The suggested procedure extends the recently introduced phase-gradient heap integration and relies on a phase-magnitude relationship for filter bank coefficients obtained from Gaussian filters. Admissible filter banks are modeled as the discretization of certain generalized translation-invariant systems, for which we derive the phase-magnitude relationship explicitly. The implementation for discrete signals is described and the performance of the algorithm is evaluated on a range of real and synthetic signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A closed-form transfer function of 2-D maximally flat half-band FIR digital filters with arbitrary filter orders.\n \n \n \n \n\n\n \n Shinohara, T.; Yoshida, T.; and Aikawa, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 927-930, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081343,\n  author = {T. Shinohara and T. Yoshida and N. Aikawa},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A closed-form transfer function of 2-D maximally flat half-band FIR digital filters with arbitrary filter orders},\n  year = {2017},\n  pages = {927-930},\n  abstract = {2-D maximally flat diamond-shaped half-band linear phase FIR digital filters are used in sampling structure conversion. In some cases, this filter is expected to have different filter order for each dimension. However, the conventional methods can realize such filters only if difference between each order is 2, 4 and 6. In this paper, we proposed a closed-form transfer function of 2-D low-pass maximally flat diamond-shaped half-band FIR digital filters with arbitrary filter orders. The constraints to treat arbitrary filter orders are firstly proposed. Then, a closed-form transfer function is achieved by using Bernstein polynomial.},\n  keywords = {FIR filters;polynomials;transfer functions;Bernstein polynomial;2D low-pass maximally flat diamond;2D maximally flat half-band FIR digital filters;filter order;half-band linear phase FIR digital filters;arbitrary filter orders;closed-form transfer function;Finite impulse response filters;Transfer functions;Design methodology;Europe;Two dimensional displays;TV},\n  doi = {10.23919/EUSIPCO.2017.8081343},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347816.pdf},\n}\n\n
\n
\n\n\n
\n 2-D maximally flat diamond-shaped half-band linear phase FIR digital filters are used in sampling structure conversion. In some cases, this filter is expected to have different filter order for each dimension. However, the conventional methods can realize such filters only if difference between each order is 2, 4 and 6. In this paper, we proposed a closed-form transfer function of 2-D low-pass maximally flat diamond-shaped half-band FIR digital filters with arbitrary filter orders. The constraints to treat arbitrary filter orders are firstly proposed. Then, a closed-form transfer function is achieved by using Bernstein polynomial.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Co-prime arrays and difference set analysis.\n \n \n \n \n\n\n \n Dias, U. V.; and Srirangarajan, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 931-935, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Co-primePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081344,\n  author = {U. V. Dias and S. Srirangarajan},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Co-prime arrays and difference set analysis},\n  year = {2017},\n  pages = {931-935},\n  abstract = {Co-prime arrays have gained in popularity as an efficient way to estimate second order statistics at the Nyquist rate from sub-Nyquist samples without any sparsity constraint. We derive an expression for the degrees of freedom and the number of consecutive values in the difference set for the prototype co-prime array. This work shows that, under the wide sense stationarity (WSS) condition, larger consecutive difference values can be achieved by using the union of all the difference sets. We provide a closed-form expression in order to determine the number of sample pairs that are available for estimating the statistics for each value of the difference set, also known as the weight function. The estimation accuracy and latency depends on the number of sample pairs used for estimating the second order statistic. We also obtain the closed-form expression for the bias of the correlogram spectral estimate. Simulation results show that the co-prime based periodogram and biased correlogram estimate are equivalent, and the reconstruction using our proposed formulation provides lower latency.},\n  keywords = {estimation theory;higher order statistics;signal reconstruction;signal sampling;spectral analysis;Nyquist rate;sub-Nyquist samples;sparsity constraint;degrees of freedom;wide sense stationarity condition;larger consecutive difference values;closed-form expression;estimation accuracy;coprime arrays;second order statistics;correlogram spectral estimation;coprime based periodogram;biased correlogram estimatiob;Antenna arrays;Prototypes;Estimation;Correlation;Array signal processing;Spectral analysis;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081344},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570348264.pdf},\n}\n\n
\n
\n\n\n
\n Co-prime arrays have gained in popularity as an efficient way to estimate second order statistics at the Nyquist rate from sub-Nyquist samples without any sparsity constraint. We derive an expression for the degrees of freedom and the number of consecutive values in the difference set for the prototype co-prime array. This work shows that, under the wide sense stationarity (WSS) condition, larger consecutive difference values can be achieved by using the union of all the difference sets. We provide a closed-form expression in order to determine the number of sample pairs that are available for estimating the statistics for each value of the difference set, also known as the weight function. The estimation accuracy and latency depends on the number of sample pairs used for estimating the second order statistic. We also obtain the closed-form expression for the bias of the correlogram spectral estimate. Simulation results show that the co-prime based periodogram and biased correlogram estimate are equivalent, and the reconstruction using our proposed formulation provides lower latency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gait recognition using normalized shadows.\n \n \n \n \n\n\n \n Verlekar, T. T.; Correia, P. L.; and Soares, L. D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 936-940, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"GaitPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081345,\n  author = {T. T. Verlekar and P. L. Correia and L. D. Soares},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Gait recognition using normalized shadows},\n  year = {2017},\n  pages = {936-940},\n  abstract = {Surveillance of public spaces is often conducted with the help of cameras placed at elevated positions. Recently, drones with high resolution cameras have made it possible to perform overhead surveillance of critical spaces. However, images obtained in these conditions may not contain enough body features to allow conventional biometric recognition. This paper introduces a novel gait recognition system which uses the shadows cast by users, when available. It includes two main contributions: (i) a method for shadow segmentation, which analyzes the orientation of the silhouette contour to identify the feet position along time, in order to separate the body and shadow silhouettes connected at such positions; (ii) a method that normalizes the segmented shadow silhouettes, by applying a transformation derived from optimizing the low rank textures of a gait texture image, to compensate for changes in view and shadow orientation. The normalized shadow silhouettes can then undergo a gait recognition algorithm, which in this paper relies on the computation of a gait energy image, combined with linear discriminant analysis for user recognition. The proposed system outperforms the available state-of-the-art, being robust to changes in acquisition viewpoints.},\n  keywords = {cameras;feature extraction;gait analysis;image motion analysis;image recognition;image resolution;image segmentation;image texture;normalized shadows;high resolution cameras;overhead surveillance;body features;shadow segmentation;silhouette contour;feet position;segmented shadow silhouettes;low rank textures;gait texture image;shadow orientation;normalized shadow silhouettes;gait energy image;user recognition;biometric recognition;gait recognition system;Foot;Cameras;Gait recognition;Lighting;Robustness;Sun;Surveillance;Shadow Biometrics;Gait Recognition},\n  doi = {10.23919/EUSIPCO.2017.8081345},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346451.pdf},\n}\n\n
\n
\n\n\n
\n Surveillance of public spaces is often conducted with the help of cameras placed at elevated positions. Recently, drones with high resolution cameras have made it possible to perform overhead surveillance of critical spaces. However, images obtained in these conditions may not contain enough body features to allow conventional biometric recognition. This paper introduces a novel gait recognition system which uses the shadows cast by users, when available. It includes two main contributions: (i) a method for shadow segmentation, which analyzes the orientation of the silhouette contour to identify the feet position along time, in order to separate the body and shadow silhouettes connected at such positions; (ii) a method that normalizes the segmented shadow silhouettes, by applying a transformation derived from optimizing the low rank textures of a gait texture image, to compensate for changes in view and shadow orientation. The normalized shadow silhouettes can then undergo a gait recognition algorithm, which in this paper relies on the computation of a gait energy image, combined with linear discriminant analysis for user recognition. The proposed system outperforms the available state-of-the-art, being robust to changes in acquisition viewpoints.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Biometrie symmetry: Implications on template protection.\n \n \n \n\n\n \n Gomez-Barrero, M.; Rathgeb, C.; Raja, K. B.; Raghavendra, R.; and Busch, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 941-945, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081346,\n  author = {M. Gomez-Barrero and C. Rathgeb and K. B. Raja and R. Raghavendra and C. Busch},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Biometrie symmetry: Implications on template protection},\n  year = {2017},\n  pages = {941-945},\n  abstract = {In the past, many efforts have been directed to develop biometrie template protection schemes to guard bio-metric reference data, i.e. templates. One fundamental premise in the design of such schemes is that the average entropy of the templates should be maximised in order to improve the level of protection. In parallel, several works have addressed the difficult problem of measuring the average entropy of biometric characteristics. However, the impact of the correlation present in different regions of a single biometric characteristic (e.g., left and right part of the face) or within two instances of a single subject (e.g., left and right palmprints) on the joint entropy of a multi-biometric template has been overlooked os far. In this paper, we address this issue and propose a way to measure such correlation from an information theoretical perspective. We then apply the proposed measure to a particular case study based on periocular biometrics, using the MobBIO database. The results show that up to 70% of the information comprised in both periocular regions of a given subject is correlated. Finally, we analyse the implications of such average mutual information loss on biometric template protection schemes.},\n  keywords = {biometrics (access control);database management systems;entropy;face recognition;feature extraction;fingerprint identification;iris recognition;biometric template protection schemes;average entropy;biometric characteristics;single biometric characteristic;multibiometric template;periocular biometrics;periocular regions;average mutual information loss;biometric template protection;biometric symmetry;biometric reference data;Feature extraction;Entropy;Correlation;Face;Mutual information;Iris recognition;Privacy},\n  doi = {10.23919/EUSIPCO.2017.8081346},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In the past, many efforts have been directed to develop biometrie template protection schemes to guard bio-metric reference data, i.e. templates. One fundamental premise in the design of such schemes is that the average entropy of the templates should be maximised in order to improve the level of protection. In parallel, several works have addressed the difficult problem of measuring the average entropy of biometric characteristics. However, the impact of the correlation present in different regions of a single biometric characteristic (e.g., left and right part of the face) or within two instances of a single subject (e.g., left and right palmprints) on the joint entropy of a multi-biometric template has been overlooked os far. In this paper, we address this issue and propose a way to measure such correlation from an information theoretical perspective. We then apply the proposed measure to a particular case study based on periocular biometrics, using the MobBIO database. The results show that up to 70% of the information comprised in both periocular regions of a given subject is correlated. Finally, we analyse the implications of such average mutual information loss on biometric template protection schemes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ASePPI, an adaptive scrambling enabling privacy protection and intelligibility in H.264/AVC.\n \n \n \n \n\n\n \n Ruchaud, N.; and Dugelay, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 946-950, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ASePPI,Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081347,\n  author = {N. Ruchaud and J. Dugelay},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {ASePPI, an adaptive scrambling enabling privacy protection and intelligibility in H.264/AVC},\n  year = {2017},\n  pages = {946-950},\n  abstract = {The usage of video surveillance systems increases more and more every year and protecting people privacy becomes a serious concern. In this paper, we present ASePPI, an Adaptive Scrambling enabling Privacy Protection and Intelligibility. It operates in the DCT domain within the H.264 standard. For each residual block of the luminance channel inside the region of interest, we encrypt the coefficients. Whereas encrypted coefficients appear as noise in the protected image, the DC value is dedicated to restore some of the original information. Thus, the proposed approach automatically adapts the level of protection according to the resolution of the region of interest. Comparing to existing methods, our framework provides better privacy protection with some flexibilities on the appearance of the protected version yielding better visibility of the scene for monitoring. Moreover, the impact on the source coding stream is negligible. Indeed, the results demonstrate a slight decrease in the quality of the reconstructed images and a small percentage of bits overhead.},\n  keywords = {cryptography;data privacy;image reconstruction;video coding;video surveillance;ASePPI;adaptive scrambling;privacy protection;intelligibility;H.264/AVC;encrypted coefficients;protected image;protected version;video surveillance systems;people privacy protection;Privacy;Encryption;Image coding;Image resolution;Cameras;Signal processing algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081347},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346756.pdf},\n}\n\n
\n
\n\n\n
\n The usage of video surveillance systems increases more and more every year and protecting people privacy becomes a serious concern. In this paper, we present ASePPI, an Adaptive Scrambling enabling Privacy Protection and Intelligibility. It operates in the DCT domain within the H.264 standard. For each residual block of the luminance channel inside the region of interest, we encrypt the coefficients. Whereas encrypted coefficients appear as noise in the protected image, the DC value is dedicated to restore some of the original information. Thus, the proposed approach automatically adapts the level of protection according to the resolution of the region of interest. Comparing to existing methods, our framework provides better privacy protection with some flexibilities on the appearance of the protected version yielding better visibility of the scene for monitoring. Moreover, the impact on the source coding stream is negligible. Indeed, the results demonstrate a slight decrease in the quality of the reconstructed images and a small percentage of bits overhead.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Visually evoked potential for EEG biometrics using convolutional neural network.\n \n \n \n \n\n\n \n Das, R.; Maiorana, E.; and Campisi, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 951-955, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"VisuallyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081348,\n  author = {R. Das and E. Maiorana and P. Campisi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Visually evoked potential for EEG biometrics using convolutional neural network},\n  year = {2017},\n  pages = {951-955},\n  abstract = {In this paper we investigate the performance of electroencephalographic (EEG) signals, elicited by means of visual stimuli, for biometric identification. A deep learning method such as convolutional neural network (CNN), is used for automatic discriminative feature extraction and individual identification. Experiments are performed on a longitudinal database comprising of EEG data acquired from 40 subjects over two distinct sessions separated by a week time. The experimental results testify the existence of repeatable discriminative characteristics in individuals' EEG signals.},\n  keywords = {biometrics (access control);electroencephalography;feature extraction;learning (artificial intelligence);neural nets;visual evoked potentials;EEG biometrics;convolutional neural network;electroencephalographic signals;visual stimuli;biometric identification;deep learning method;automatic discriminative feature extraction;individual identification;visually evoked potential;longitudinal database;Electroencephalography;Biometrics (access control);Feature extraction;Visualization;Convolution;Protocols;Network topology;Electroencephalography;Visually evoked potential;Convolutional neural network},\n  doi = {10.23919/EUSIPCO.2017.8081348},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347024.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we investigate the performance of electroencephalographic (EEG) signals, elicited by means of visual stimuli, for biometric identification. A deep learning method such as convolutional neural network (CNN), is used for automatic discriminative feature extraction and individual identification. Experiments are performed on a longitudinal database comprising of EEG data acquired from 40 subjects over two distinct sessions separated by a week time. The experimental results testify the existence of repeatable discriminative characteristics in individuals' EEG signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Private authentication keys based on wearable device EEG recordings.\n \n \n \n \n\n\n \n Yang, H.; Mihajlović, V.; and Ignatenko, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 956-960, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PrivatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081349,\n  author = {H. Yang and V. Mihajlović and T. Ignatenko},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Private authentication keys based on wearable device EEG recordings},\n  year = {2017},\n  pages = {956-960},\n  abstract = {In this paper, we study an Electroencephalography (EEG) based biometric authentication system with privacy protection. We use motor imagery EEG, recorded using a wearable wireless device, as our biometric modality. To obtain EEG-based authentication keys we employ the fuzzy-commitment like scheme with soft-information at the decoder, see Ignatenko and Willems [2014]. In this work we study the effect of multi-level quantization together with binary encoding of EEG biometric at the encoder on the system performance, when EEG feature vectors have limited length. We demonstrate our findings on an experimental EEG dataset of ten healthy subjects.},\n  keywords = {biometrics (access control);data privacy;data protection;electroencephalography;medical signal processing;biometric authentication system;privacy protection;motor imagery EEG;wearable wireless device;biometric modality;multilevel quantization;binary encoding;private authentication keys;wearable device EEG recordings;electroencephalography;fuzzy-commitment like scheme;EEG feature vectors;Electroencephalography;Authentication;Decoding;Quantization (signal);Biometrics (access control);Encoding;Brain modeling},\n  doi = {10.23919/EUSIPCO.2017.8081349},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347072.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we study an Electroencephalography (EEG) based biometric authentication system with privacy protection. We use motor imagery EEG, recorded using a wearable wireless device, as our biometric modality. To obtain EEG-based authentication keys we employ the fuzzy-commitment like scheme with soft-information at the decoder, see Ignatenko and Willems [2014]. In this work we study the effect of multi-level quantization together with binary encoding of EEG biometric at the encoder on the system performance, when EEG feature vectors have limited length. We demonstrate our findings on an experimental EEG dataset of ten healthy subjects.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Privacy-safe linkage analysis with homomorphic encryption.\n \n \n \n \n\n\n \n Ugwuoke, C.; Erkin, Z.; and Lagendijk, R. L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 961-965, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Privacy-safePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081350,\n  author = {C. Ugwuoke and Z. Erkin and R. L. Lagendijk},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Privacy-safe linkage analysis with homomorphic encryption},\n  year = {2017},\n  pages = {961-965},\n  abstract = {Genetic data are important dataset utilised in genetic epidemiology to investigate biologically coded information within the human genome. Enormous research has been delved into in recent years in order to fully sequence and understand the genome. Personalised medicine, patient response to treatments and relationships between specific genes and certain characteristics such as phenotypes and diseases, are positive impacts of studying the genome, just to mention a few. The sensitivity, longevity and non-modifiable nature of genetic data make it even more interesting, consequently, the security and privacy for the storage and processing of genomic data beg for attention. A common activity carried out by geneticists is the association analysis between allele-allele, or even a genetic locus and a disease. We demonstrate the use of cryptographic techniques such as homomorphic encryption schemes and multiparty computations, how such analysis can be carried out in a privacy friendly manner. We compute a 3 × 3 contingency table, and then, genome analyses algorithms such as linkage disequilibrium (LD) measures, all on the encrypted domain. Our computation guarantees privacy of the genome data under our security settings, and provides up to 98.4% improvement, compared to an existing solution.},\n  keywords = {biology computing;cryptography;data privacy;diseases;genetics;genomics;privacy-safe linkage analysis;genetic data;genetic epidemiology;biologically coded information;human genome;personalised medicine;patient response;genomic data;association analysis;allele-allele;genetic locus;disease;homomorphic encryption schemes;genome analyses algorithms;linkage disequilibrium measures;encrypted domain;genome data;gene phenotypes;contingency table;genome data privacy;Genomics;Bioinformatics;Encryption;Data privacy;Diseases},\n  doi = {10.23919/EUSIPCO.2017.8081350},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347480.pdf},\n}\n\n
\n
\n\n\n
\n Genetic data are important dataset utilised in genetic epidemiology to investigate biologically coded information within the human genome. Enormous research has been delved into in recent years in order to fully sequence and understand the genome. Personalised medicine, patient response to treatments and relationships between specific genes and certain characteristics such as phenotypes and diseases, are positive impacts of studying the genome, just to mention a few. The sensitivity, longevity and non-modifiable nature of genetic data make it even more interesting, consequently, the security and privacy for the storage and processing of genomic data beg for attention. A common activity carried out by geneticists is the association analysis between allele-allele, or even a genetic locus and a disease. We demonstrate the use of cryptographic techniques such as homomorphic encryption schemes and multiparty computations, how such analysis can be carried out in a privacy friendly manner. We compute a 3 × 3 contingency table, and then, genome analyses algorithms such as linkage disequilibrium (LD) measures, all on the encrypted domain. Our computation guarantees privacy of the genome data under our security settings, and provides up to 98.4% improvement, compared to an existing solution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n End-to-end musical key estimation using a convolutional neural network.\n \n \n \n \n\n\n \n Korzeniowski, F.; and Widmer, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 966-970, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"End-to-endPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081351,\n  author = {F. Korzeniowski and G. Widmer},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {End-to-end musical key estimation using a convolutional neural network},\n  year = {2017},\n  pages = {966-970},\n  abstract = {We present an end-to-end system for musical key estimation, based on a convolutional neural network. The proposed system not only out-performs existing key estimation methods proposed in the academic literature; it is also capable of learning a unified model for diverse musical genres that performs comparably to existing systems specialised for specific genres. Our experiments confirm that different genres do differ in their interpretation of tonality, and thus a system tuned e.g. for pop music performs subpar on pieces of electronic music. They also reveal that such cross-genre setups evoke specific types of error (predicting the relative or parallel minor). However, using the data-driven approach proposed in this paper, we can train models that deal with multiple musical styles adequately, and without major losses in accuracy.},\n  keywords = {electronic music;feedforward neural nets;music;end-to-end system;convolutional neural network;academic literature;unified model;diverse musical genres;electronic music;cross-genre setups;multiple musical styles;pop music;end-to-end musical key estimation;data-driven approach;Music;Estimation;Neural networks;Training;Harmonic analysis;Spectrogram;Feature extraction},\n  doi = {10.23919/EUSIPCO.2017.8081351},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347645.pdf},\n}\n\n
\n
\n\n\n
\n We present an end-to-end system for musical key estimation, based on a convolutional neural network. The proposed system not only out-performs existing key estimation methods proposed in the academic literature; it is also capable of learning a unified model for diverse musical genres that performs comparably to existing systems specialised for specific genres. Our experiments confirm that different genres do differ in their interpretation of tonality, and thus a system tuned e.g. for pop music performs subpar on pieces of electronic music. They also reveal that such cross-genre setups evoke specific types of error (predicting the relative or parallel minor). However, using the data-driven approach proposed in this paper, we can train models that deal with multiple musical styles adequately, and without major losses in accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A model for music complexity applied to music preprocessing for cochlear implants.\n \n \n \n \n\n\n \n Buyens, W.; Moonen, M.; Wouters, J.; and van Dijk , B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 971-975, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081352,\n  author = {W. Buyens and M. Moonen and J. Wouters and B. {van Dijk}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A model for music complexity applied to music preprocessing for cochlear implants},\n  year = {2017},\n  pages = {971-975},\n  abstract = {Music appreciation remains challenging for cochlear implant users. In previous studies a strong negative correlation was found with cochlear implant subjects between music appreciation and music complexity. In this paper, music features that contribute to music complexity are investigated and related to a music preprocessing scheme for cochlear implants, in which a complexity reduction is achieved in an attempt to increase music appreciation. First, a complexity rating experiment is performed with pop/rock music excerpts and a linear regression model is developed to describe this (subjective) music complexity based on different music features. Subsequently, this model is used to validate the complexity reduction in the music preprocessing scheme and to provide an indication for the preferred setting for the balance between vocals/bass/drums and the other instruments for cochlear implant subjects.},\n  keywords = {acoustic signal processing;cochlear implants;hearing;medical signal processing;music;physiological models;regression analysis;music preprocessing;cochlear implants;music appreciation;music features;pop-rock music excerpts;music complexity reduction;music complexity rating experiment;linear regression model;vocals-bass-drums;Complexity theory;Attenuation;Correlation;Instruments;Multiple signal classification;Cochlear implants;Feature extraction;music complexity;music appreciation;cochlear implants},\n  doi = {10.23919/EUSIPCO.2017.8081352},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346899.pdf},\n}\n\n
\n
\n\n\n
\n Music appreciation remains challenging for cochlear implant users. In previous studies a strong negative correlation was found with cochlear implant subjects between music appreciation and music complexity. In this paper, music features that contribute to music complexity are investigated and related to a music preprocessing scheme for cochlear implants, in which a complexity reduction is achieved in an attempt to increase music appreciation. First, a complexity rating experiment is performed with pop/rock music excerpts and a linear regression model is developed to describe this (subjective) music complexity based on different music features. Subsequently, this model is used to validate the complexity reduction in the music preprocessing scheme and to provide an indication for the preferred setting for the balance between vocals/bass/drums and the other instruments for cochlear implant subjects.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Phase vocoder done right.\n \n \n \n \n\n\n \n Průša, Z.; and Holighaus, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 976-980, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PhasePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081353,\n  author = {Z. Průša and N. Holighaus},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Phase vocoder done right},\n  year = {2017},\n  pages = {976-980},\n  abstract = {The phase vocoder (PV) is a widely spread technique for processing audio signals. It employs a short-time Fourier transform (STFT) analysis-modify-synthesis loop and is typically used for time-scaling of signals by means of using different time steps for STFT analysis and synthesis. The main challenge of PV used for that purpose is the correction of the STFT phase. In this paper, we introduce a novel method for phase correction based on phase gradient estimation and its integration. The method does not require explicit peak picking and tracking nor does it require detection of transients and their separate treatment. Yet, the method does not suffer from the typical phase vocoder artifacts even for extreme time stretching factors.},\n  keywords = {audio coding;Fourier transforms;vocoders;PV;STFT phase;phase correction;phase gradient estimation;extreme time;widely spread technique;audio signals;phase vocoder;short-time Fourier transform;STFT analysis;Time-frequency analysis;Transient analysis;Signal processing algorithms;Vocoders;Frequency estimation;Coherence;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081353},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343436.pdf},\n}\n\n
\n
\n\n\n
\n The phase vocoder (PV) is a widely spread technique for processing audio signals. It employs a short-time Fourier transform (STFT) analysis-modify-synthesis loop and is typically used for time-scaling of signals by means of using different time steps for STFT analysis and synthesis. The main challenge of PV used for that purpose is the correction of the STFT phase. In this paper, we introduce a novel method for phase correction based on phase gradient estimation and its integration. The method does not require explicit peak picking and tracking nor does it require detection of transients and their separate treatment. Yet, the method does not suffer from the typical phase vocoder artifacts even for extreme time stretching factors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stereophonic music separation based on non-negative tensor factorization with cepstrum regularization.\n \n \n \n \n\n\n \n Seki, S.; Toda, T.; and Takeda, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 981-985, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"StereophonicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081354,\n  author = {S. Seki and T. Toda and K. Takeda},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Stereophonic music separation based on non-negative tensor factorization with cepstrum regularization},\n  year = {2017},\n  pages = {981-985},\n  abstract = {This paper presents a novel approach to stereophonic music separation based on Non-negative Tensor Factorization (NTF). Stereophonic music is roughly divided into two types; recorded music or synthesized music, which we focus on synthesized one in this paper. Synthesized music signals are often generated as linear combinations of many individual source signals with their mixing gains (i.e., time-invariant amplitude scaling) to each channel signal. Therefore, the synthesized stereophonic music separation is the underdetermined source separation problem where phase components are not helpful for the separation. NTF is one of the effective techniques to handle this problem, decomposing amplitude spectrograms of the stereo channel music signal into basis vectors and activations of individual music source signals and their corresponding mixing gains. However, it is essentially difficult to obtain sufficient separation performance in this separation problem as available acoustic cues for separation are limited. To address this issue, we propose a cepstrum regularization method for NTF-based stereo channel separation. The proposed method makes the separated music source signals follow the corresponding Gaussian mixture models of individual music source signals, which are trained in advance using their available samples. An experimental evaluation using real music signals is conducted to investigate the effectiveness of the proposed method in both supervised and unsupervised separation frameworks. The experimental results demonstrate that the proposed method yields significant improvements in separation performance in both frameworks.},\n  keywords = {acoustic signal processing;cepstral analysis;Gaussian processes;matrix decomposition;music;source separation;tensors;nonnegative tensor factorization;recorded music;synthesized music signals;time-invariant amplitude scaling;channel signal;synthesized stereophonic music separation;underdetermined source separation problem;stereo channel music signal;individual music source signals;separated music source signals;unsupervised separation frameworks;basis vectors;NTF-based stereo channel separation;cepstrum regularization;Gaussian mixture models;Multiple signal classification;Cepstrum;Music;Source separation;Spectrogram;Upper bound;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081354},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347376.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a novel approach to stereophonic music separation based on Non-negative Tensor Factorization (NTF). Stereophonic music is roughly divided into two types; recorded music or synthesized music, which we focus on synthesized one in this paper. Synthesized music signals are often generated as linear combinations of many individual source signals with their mixing gains (i.e., time-invariant amplitude scaling) to each channel signal. Therefore, the synthesized stereophonic music separation is the underdetermined source separation problem where phase components are not helpful for the separation. NTF is one of the effective techniques to handle this problem, decomposing amplitude spectrograms of the stereo channel music signal into basis vectors and activations of individual music source signals and their corresponding mixing gains. However, it is essentially difficult to obtain sufficient separation performance in this separation problem as available acoustic cues for separation are limited. To address this issue, we propose a cepstrum regularization method for NTF-based stereo channel separation. The proposed method makes the separated music source signals follow the corresponding Gaussian mixture models of individual music source signals, which are trained in advance using their available samples. An experimental evaluation using real music signals is conducted to investigate the effectiveness of the proposed method in both supervised and unsupervised separation frameworks. The experimental results demonstrate that the proposed method yields significant improvements in separation performance in both frameworks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An optimized embedded target detection system using acoustic and seismic sensors.\n \n \n \n \n\n\n \n Lee, K.; Riggan, B. S.; and Bhattacharyya, S. S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 986-990, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081355,\n  author = {K. Lee and B. S. Riggan and S. S. Bhattacharyya},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An optimized embedded target detection system using acoustic and seismic sensors},\n  year = {2017},\n  pages = {986-990},\n  abstract = {Detection of targets using low power embedded devices has important applications in border security and surveillance. In this paper, we build on recent algorithmic advances in sensor fusion, and present the design and implementation of a novel, multi-mode embedded signal processing system for detection of people and vehicles using acoustic and seismic sensors. Here, by {"}multi-mode{"}, we mean that the system has available a complementary set of configurations that are optimized for different trade-offs. The multimode capability delivered by the proposed system is useful to supporting long lifetime (long term, energy-efficient {"}standby{"} operation), while also supporting optimized accuracy during critical time periods (e.g., when a potential threat is detected). In our target detection system, we apply a strategically-configured suite of single- and dual-modality signal processing techniques together with dataflow-based design optimization for energy-efficient, real-time implementation. Through experiments using a Raspberry Pi platform, we demonstrate the capability of our target detection system to provide efficient operational tradeoffs among detection accuracy, energy efficiency, and processing speed.},\n  keywords = {data flow analysis;embedded systems;object detection;sensor fusion;optimized embedded target detection system;acoustic sensors;seismic sensors;low power embedded devices;border security;sensor fusion;border surveillance;multimode embedded signal processing system;long term energy-efficient standby operation;single-modality signal processing;dual-modality signal processing;dataflow-based design optimization;Raspberry Pi platform;Sensors;Feature extraction;Support vector machines;Acoustics;Signal processing algorithms;Algorithm design and analysis;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081355},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347422.pdf},\n}\n\n
\n
\n\n\n
\n Detection of targets using low power embedded devices has important applications in border security and surveillance. In this paper, we build on recent algorithmic advances in sensor fusion, and present the design and implementation of a novel, multi-mode embedded signal processing system for detection of people and vehicles using acoustic and seismic sensors. Here, by \"multi-mode\", we mean that the system has available a complementary set of configurations that are optimized for different trade-offs. The multimode capability delivered by the proposed system is useful to supporting long lifetime (long term, energy-efficient \"standby\" operation), while also supporting optimized accuracy during critical time periods (e.g., when a potential threat is detected). In our target detection system, we apply a strategically-configured suite of single- and dual-modality signal processing techniques together with dataflow-based design optimization for energy-efficient, real-time implementation. Through experiments using a Raspberry Pi platform, we demonstrate the capability of our target detection system to provide efficient operational tradeoffs among detection accuracy, energy efficiency, and processing speed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A nonuniform quantization scheme for high speed SAR ADC architecture.\n \n \n \n \n\n\n \n Kim, Y.; Guo, W.; and Tewfik, A. H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 991-995, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081356,\n  author = {Y. Kim and W. Guo and A. H. Tewfik},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A nonuniform quantization scheme for high speed SAR ADC architecture},\n  year = {2017},\n  pages = {991-995},\n  abstract = {We introduce a new signal sampling scheme which allows high quality signal conversion to overcome the constraint of effective number of bits in high speed signal acquisition. The proposed scheme is based on the popular successive approximation register (SAR) and employs compressive sensing technique to increase the resolution of a SAR analog-to-digital converter (ADC) architecture. We present signal acquisition and recovery model which provides better performance in signal acquisition. The sampled signal shows higher resolution after recovery than conventional compressive sensing based sampling schemes. Circuit level architecture is discussed to implement the proposed scheme using the SAR ADC architecture. Simulation result shows that the proposed nonuniform quantization strategy can be a way to overcome the sampling rate-resolution limitation which is a challenging problem in SAR ADC design even with the most advanced technology.},\n  keywords = {analogue-digital conversion;compressed sensing;quantisation (signal);signal sampling;circuit level architecture;nonuniform quantization strategy;sampling rate-resolution limitation;SAR ADC design;high speed SAR ADC architecture;signal sampling scheme;high quality signal conversion;high speed signal acquisition;compressive sensing technique;analog-to-digital converter architecture;recovery model;sampled signal;successive approximation register;Quantization (signal);Signal resolution;Sparse matrices;Compressed sensing;Computer architecture;Europe;Compressive sensing;SAR ADC;nonuniform quantization;signal recovery},\n  doi = {10.23919/EUSIPCO.2017.8081356},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347691.pdf},\n}\n\n
\n
\n\n\n
\n We introduce a new signal sampling scheme which allows high quality signal conversion to overcome the constraint of effective number of bits in high speed signal acquisition. The proposed scheme is based on the popular successive approximation register (SAR) and employs compressive sensing technique to increase the resolution of a SAR analog-to-digital converter (ADC) architecture. We present signal acquisition and recovery model which provides better performance in signal acquisition. The sampled signal shows higher resolution after recovery than conventional compressive sensing based sampling schemes. Circuit level architecture is discussed to implement the proposed scheme using the SAR ADC architecture. Simulation result shows that the proposed nonuniform quantization strategy can be a way to overcome the sampling rate-resolution limitation which is a challenging problem in SAR ADC design even with the most advanced technology.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Impact of temporal subsampling on accuracy and performance in practical video classification.\n \n \n \n \n\n\n \n Scheidegger, F.; Cavigelli, L.; Schaffner, M.; Malossi, A. C. I.; Bekas, C.; and Benini, L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 996-1000, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ImpactPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081357,\n  author = {F. Scheidegger and L. Cavigelli and M. Schaffner and A. C. I. Malossi and C. Bekas and L. Benini},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Impact of temporal subsampling on accuracy and performance in practical video classification},\n  year = {2017},\n  pages = {996-1000},\n  abstract = {In this paper we evaluate three state-of-the-art neural-network-based approaches for large-scale video classification, where the computational efficiency of the inference step is of particular importance due to the ever increasing amount of data throughput for video streams. Our evaluation focuses on finding good efficiency vs. accuracy tradeoffs by evaluating different network configurations and parameterizations. In particular, we investigate the use of different temporal subsampling strategies, and show that they can be used to effectively trade computational workload against classification accuracy. Using a subset of the YouTube-8M dataset, we demonstrate that workload reductions in the order of 10×, 50× and 100× can be achieved with accuracy reductions of only 1.3%, 6.2% and 10.8%, respectively. Our results show that temporal subsampling is a simple and generic approach that behaves consistently over the considered classification pipelines and which does not require retraining of the underlying networks.},\n  keywords = {image classification;learning (artificial intelligence);neural nets;video signal processing;video streaming;YouTube-8M dataset;neural-network;large-scale video classification;computational efficiency;inference step;video streams;temporal subsampling strategies;classification pipelines;Training;Feature extraction;Video sequences;Artificial neural networks;Europe;Graphics processing units},\n  doi = {10.23919/EUSIPCO.2017.8081357},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342773.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we evaluate three state-of-the-art neural-network-based approaches for large-scale video classification, where the computational efficiency of the inference step is of particular importance due to the ever increasing amount of data throughput for video streams. Our evaluation focuses on finding good efficiency vs. accuracy tradeoffs by evaluating different network configurations and parameterizations. In particular, we investigate the use of different temporal subsampling strategies, and show that they can be used to effectively trade computational workload against classification accuracy. Using a subset of the YouTube-8M dataset, we demonstrate that workload reductions in the order of 10×, 50× and 100× can be achieved with accuracy reductions of only 1.3%, 6.2% and 10.8%, respectively. Our results show that temporal subsampling is a simple and generic approach that behaves consistently over the considered classification pipelines and which does not require retraining of the underlying networks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An adaptive Clarke transform based estimator for the frequency of balanced and unbalanced three-phase power systems.\n \n \n \n \n\n\n \n Aboutanios, E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1001-1005, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081358,\n  author = {E. Aboutanios},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An adaptive Clarke transform based estimator for the frequency of balanced and unbalanced three-phase power systems},\n  year = {2017},\n  pages = {1001-1005},\n  abstract = {In this paper we examine the general problem of estimating the frequency of a balanced or unbalanced three-phase power system. The Clarke transform is commonly employed to transform the three real voltages to in-phase and quadrature components that are combined to form a complex exponential, the frequency of which can then be estimated. The imbalance between the voltages in an unbalanced system results in significant performance degradation. We address this problem by generalising the Clarke transformation to the case where the voltages are not equal. We then propose a new simple yet accurate algorithm for the estimation of the frequency. We simulate the algorithm and show that it achieves the performance that is obtained in the balanced case, practically sitting on the Cramer-Rao Bound.},\n  keywords = {inverse transforms;maximum likelihood estimation;parameter estimation;power grids;quadrature components;Clarke transformation;unbalanced three-phase power system;in-phase components;performance degradation;adaptive Clarke transform based estimator;balanced three-phase power system;Frequency estimation;Transforms;Signal processing algorithms;Signal to noise ratio;Power system stability;Europe;Three Phase Power Systems;frequency estimation;unbalanced power system;amplitude imbalance;Fast Iterative Interpolated DFT},\n  doi = {10.23919/EUSIPCO.2017.8081358},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347825.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we examine the general problem of estimating the frequency of a balanced or unbalanced three-phase power system. The Clarke transform is commonly employed to transform the three real voltages to in-phase and quadrature components that are combined to form a complex exponential, the frequency of which can then be estimated. The imbalance between the voltages in an unbalanced system results in significant performance degradation. We address this problem by generalising the Clarke transformation to the case where the voltages are not equal. We then propose a new simple yet accurate algorithm for the estimation of the frequency. We simulate the algorithm and show that it achieves the performance that is obtained in the balanced case, practically sitting on the Cramer-Rao Bound.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Motor parameters estimation from industrial electrical measurements.\n \n \n \n \n\n\n \n Angelosante, D.; Fagiano, L.; Grasso, F.; and Ragaini, E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1006-1010, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MotorPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081359,\n  author = {D. Angelosante and L. Fagiano and F. Grasso and E. Ragaini},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Motor parameters estimation from industrial electrical measurements},\n  year = {2017},\n  pages = {1006-1010},\n  abstract = {Voltage and current sensors integrated in modern electrical equipment can enable extraction of advanced information on the network and the connected devices. While traditional methods for protection and network managements rely upon processing of these signals at low speed, high-frequency processing of the raw current and voltage signals can unveil information about the type of electrical load in the networks. In particular, the common case of three-phase induction machines is considered in this paper. Motor parameters are instrumental information for control, monitoring and diagnostic. A classical approach is to measure motor parameters using off-line dedicated measurements. In this paper, we propose a method for motor parameters estimation from electrical measurements during motor start-up. Given samples of current and voltage signals during motor start-up, the model parameters are identified using classical non-linear system identification tools. While the classical theory is developed using current sensors, in this paper the method is extended to a common type of industrial current sensors, i.e., Rogowski coil sensors, and signal processing methods are presented to overcome the non-ideality caused by this type of sensors. Numerical tests performed on real data show that effective motor parameters identification can be achieved from the raw current and voltage measurements.},\n  keywords = {asynchronous machines;coils;electric current measurement;electric sensing devices;machine control;parameter estimation;signal processing;voltage measurement;motor start-up;model parameters estimation;voltage measurements;voltage sensors;advanced information extraction;protection managements;network managements;low speed high-frequency processing;off-line dedicated measurements;instrumental information;three-phase induction machines;electrical load;raw current voltage signals;modern electrical equipment;industrial electrical measurements;motor parameters estimation;effective motor parameters identification;signal processing methods;Rogowski coil sensors;industrial current sensors;nonlinear system identification tools;Induction motors;DC motors;Synchronous motors;Sensors;Current measurement;Stators;Voltage measurement},\n  doi = {10.23919/EUSIPCO.2017.8081359},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342085.pdf},\n}\n\n
\n
\n\n\n
\n Voltage and current sensors integrated in modern electrical equipment can enable extraction of advanced information on the network and the connected devices. While traditional methods for protection and network managements rely upon processing of these signals at low speed, high-frequency processing of the raw current and voltage signals can unveil information about the type of electrical load in the networks. In particular, the common case of three-phase induction machines is considered in this paper. Motor parameters are instrumental information for control, monitoring and diagnostic. A classical approach is to measure motor parameters using off-line dedicated measurements. In this paper, we propose a method for motor parameters estimation from electrical measurements during motor start-up. Given samples of current and voltage signals during motor start-up, the model parameters are identified using classical non-linear system identification tools. While the classical theory is developed using current sensors, in this paper the method is extended to a common type of industrial current sensors, i.e., Rogowski coil sensors, and signal processing methods are presented to overcome the non-ideality caused by this type of sensors. Numerical tests performed on real data show that effective motor parameters identification can be achieved from the raw current and voltage measurements.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improvement of HEVC inter-coding mode using multiple transforms.\n \n \n \n \n\n\n \n Philippe, P.; Biatek, T.; and Lorcy, V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1011-1015, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovementPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081360,\n  author = {P. Philippe and T. Biatek and V. Lorcy},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Improvement of HEVC inter-coding mode using multiple transforms},\n  year = {2017},\n  pages = {1011-1015},\n  abstract = {Multiple transforms have received considerable attention recently, especially in the course of an exploration conducted by MPEG and ITU toward the standardization of the next generation video compression algorithm. This joint team has developed a software, called the Joint Exploration Model (JEM) which outperforms by over 25% the HEVC standard. The transform step in JEM consists in Adaptive Multiple Transforms (AMT) and Non-Separable Secondary Transforms (NSST) which are designed and adapted to the intra-coding modes. In inter-coding, only the AMT is allowed and it is restricted to a single set of five transforms. In this paper, adaptive transforms schemes suitable for inter-predicted residuals are designed and proposed to improve the coding efficiency. Two configurations are evaluated for the proposed designs, providing an average bitrate saving of roughly 1% over HEVC with unchanged decoding time.},\n  keywords = {data compression;video coding;AMT;intra-coding modes;adaptive transforms schemes;coding efficiency;HEVC inter-coding mode;HEVC standard;video compression algorithm;joint exploration model;JEM model;decoding time;Transforms;Encoding;Kernel;Bit rate;Video coding;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081360},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341607.pdf},\n}\n\n
\n
\n\n\n
\n Multiple transforms have received considerable attention recently, especially in the course of an exploration conducted by MPEG and ITU toward the standardization of the next generation video compression algorithm. This joint team has developed a software, called the Joint Exploration Model (JEM) which outperforms by over 25% the HEVC standard. The transform step in JEM consists in Adaptive Multiple Transforms (AMT) and Non-Separable Secondary Transforms (NSST) which are designed and adapted to the intra-coding modes. In inter-coding, only the AMT is allowed and it is restricted to a single set of five transforms. In this paper, adaptive transforms schemes suitable for inter-predicted residuals are designed and proposed to improve the coding efficiency. Two configurations are evaluated for the proposed designs, providing an average bitrate saving of roughly 1% over HEVC with unchanged decoding time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Trapezoidal block split using orthogonal C2 transforms for HEVC video coding.\n \n \n \n \n\n\n \n Dvir, I.; Allouche, A.; Drezner, D.; Ecker, A.; Irony, D.; Peterfreund, N.; Yang, H.; and Jiantong, Z.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1016-1020, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"TrapezoidalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081361,\n  author = {I. Dvir and A. Allouche and D. Drezner and A. Ecker and D. Irony and N. Peterfreund and H. Yang and Z. Jiantong},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Trapezoidal block split using orthogonal C2 transforms for HEVC video coding},\n  year = {2017},\n  pages = {1016-1020},\n  abstract = {We present an extension for HEVC intra-frame coding with trapezoidal splits and orthogonal transforms. A block can be split into two 180-degrees rotationally-symmetric (C2) trapezoidal parts, each coded separately using standard DCT implementation. We also introduce part-to-part prediction from a diagonal edge. The optimal trapezoidal split of a quad tree block is selected in a rate-distortion sense. We achieved 0.8% reduction in BD-rate over HEVC in standard test conditions for intra coding.},\n  keywords = {discrete cosine transforms;optimisation;quadtrees;rate distortion theory;video coding;orthogonal C2 transforms;HEVC video coding;standard DCT implementation;optimal trapezoidal split;quad tree block;trapezoidal block split;HEVC intraframe coding;Discrete cosine transforms;Standards;Encoding;Two dimensional displays;Shape;Quantization (signal)},\n  doi = {10.23919/EUSIPCO.2017.8081361},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346681.pdf},\n}\n\n
\n
\n\n\n
\n We present an extension for HEVC intra-frame coding with trapezoidal splits and orthogonal transforms. A block can be split into two 180-degrees rotationally-symmetric (C2) trapezoidal parts, each coded separately using standard DCT implementation. We also introduce part-to-part prediction from a diagonal edge. The optimal trapezoidal split of a quad tree block is selected in a rate-distortion sense. We achieved 0.8% reduction in BD-rate over HEVC in standard test conditions for intra coding.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Depth modeling modes complexity control system for the 3D-HEVC video encoder.\n \n \n \n \n\n\n \n Sanchez, G.; Saldanha, M.; Agostini, L.; and Marcon, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1021-1025, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DepthPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081362,\n  author = {G. Sanchez and M. Saldanha and L. Agostini and C. Marcon},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Depth modeling modes complexity control system for the 3D-HEVC video encoder},\n  year = {2017},\n  pages = {1021-1025},\n  abstract = {This paper presents a complexity control system for depth maps intra-frame prediction of the 3D-High Efficiency Video Coding (3D-HEVC) standard. The proposed system uses a Proportional-Integral-Derivative controller over the Simplified Edge Detector heuristic to skip the Depth Modeling Modes (DMMs) evaluation dynamically according to a defined target rate. When analyzing the proposed system under Common Test Conditions, the proposed controller stabilizes the system to the target rate (i.e., the percentage of DMMs evaluation) after encoding a few frames, with negligible encoding efficiency impacts. The BD-rate degradation varies from 0.50% to 0.20%, on average, when the target rates vary from 5% to 15%. These target rates imply in an aggressive reduction in the DMMs evaluations, skipping the DMMs from 85% to 95% of the cases.},\n  keywords = {edge detection;video coding;Common Test Conditions;negligible encoding efficiency impacts;BD-rate degradation;3D-HEVC video encoder;depth maps intra-frame prediction;3D-High Efficiency Video Coding;proportional-integral-derivative controller;depth modeling modes complexity control system;DMM evaluation;simplified edge detector heuristic;depth modeling modes evaluation;Encoding;Complexity theory;Control systems;Image edge detection;Signal processing algorithms;Standards;Three-dimensional displays;3D-HEVC;Intra-Frame Prediction;Depth Maps;Complexity Control;Complexity Reduction},\n  doi = {10.23919/EUSIPCO.2017.8081362},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347230.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a complexity control system for depth maps intra-frame prediction of the 3D-High Efficiency Video Coding (3D-HEVC) standard. The proposed system uses a Proportional-Integral-Derivative controller over the Simplified Edge Detector heuristic to skip the Depth Modeling Modes (DMMs) evaluation dynamically according to a defined target rate. When analyzing the proposed system under Common Test Conditions, the proposed controller stabilizes the system to the target rate (i.e., the percentage of DMMs evaluation) after encoding a few frames, with negligible encoding efficiency impacts. The BD-rate degradation varies from 0.50% to 0.20%, on average, when the target rates vary from 5% to 15%. These target rates imply in an aggressive reduction in the DMMs evaluations, skipping the DMMs from 85% to 95% of the cases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reducing computational complexity in HEVC decoder for mobile energy saving.\n \n \n \n \n\n\n \n Sidaty, N.; Heulot, J.; Hamidouche, W.; Nogues, E.; Pelcat, M.; and Menard, D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1026-1030, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ReducingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081363,\n  author = {N. Sidaty and J. Heulot and W. Hamidouche and E. Nogues and M. Pelcat and D. Menard},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Reducing computational complexity in HEVC decoder for mobile energy saving},\n  year = {2017},\n  pages = {1026-1030},\n  abstract = {With the growing development of video applications and services for mobile devices, saving energy consumption when managing video is becoming a more and more important issue. The challenge is then to deliver video with high quality while reducing the energy consumption. In this paper, we investigate the relationship between subjective video quality and energy consumption in an HEVC decoder. By reducing the computational complexity of the decoder, drastic energy savings can be achieved without affecting the visual quality. In this paper, two computation methods and several filter configurations are tested. Results show that at least 10% of energy savings are obtained with the same subjective perceived quality. In addition, objective measurements have shown that only a slight quality degradation has been noticed.},\n  keywords = {computational complexity;telecommunication power management;video coding;computational complexity;HEVC decoder;mobile energy;growing development;mobile devices;energy consumption;subjective video quality;visual quality;subjective perceived quality;energy savings;Decoding;Quality assessment;Energy consumption;Video recording;Standards;Computational complexity},\n  doi = {10.23919/EUSIPCO.2017.8081363},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570344341.pdf},\n}\n\n
\n
\n\n\n
\n With the growing development of video applications and services for mobile devices, saving energy consumption when managing video is becoming a more and more important issue. The challenge is then to deliver video with high quality while reducing the energy consumption. In this paper, we investigate the relationship between subjective video quality and energy consumption in an HEVC decoder. By reducing the computational complexity of the decoder, drastic energy savings can be achieved without affecting the visual quality. In this paper, two computation methods and several filter configurations are tested. Results show that at least 10% of energy savings are obtained with the same subjective perceived quality. In addition, objective measurements have shown that only a slight quality degradation has been noticed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Complexity reduction of 3D-HEVC based on depth analysis for background and ROI classification.\n \n \n \n \n\n\n \n Avila, G.; Conceição, R.; Bubolz, T.; Zatt, B.; Porto, M.; Agostini, L.; and Correa, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1031-1035, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ComplexityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081364,\n  author = {G. Avila and R. Conceição and T. Bubolz and B. Zatt and M. Porto and L. Agostini and G. Correa},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Complexity reduction of 3D-HEVC based on depth analysis for background and ROI classification},\n  year = {2017},\n  pages = {1031-1035},\n  abstract = {The 3D extension of the High Efficiency Video Coding (HEVC) standard achieves large compression rates thanks to the addition of several tools to encode multiview and depth information on top of those available in HEVC. The use of such tools incur in a very large computational demand, which can be a serious problem in power and computationally-constrained devices and applications. However, not all information contained in an image is fundamental to the viewer, so that different levels of computational effort can be employed when encoding different image regions. The Region of Interest (ROI) concept is used in this work to classify each Coding Unit (CU) as foreground, heterogeneous background and homogeneous background. Then, a simplified encoding process is employed in those regions classified as homogeneous background, terminating earlier the partitioning process in texture CUs, while still still performing the regular decisions in areas classified as ROI. Experimental results show an average reduction of 22.6% in computational complexity for texture coding with negligible or non-perceived image quality degradation.},\n  keywords = {computational complexity;image classification;image texture;video coding;heterogeneous background;homogeneous background;computational complexity;texture coding;image quality degradation;ROI classification;high-efficiency video coding standard;CU;coding unit;3D-HEVC complexity reduction;Copper;Three-dimensional displays;Image coding;Streaming media;Complexity theory;Cameras;High efficiency video coding;Video Coding;3D Video;3D-HEVC;Complexity Reduction;Subjective Analysis;Region of Interest;Early Termination},\n  doi = {10.23919/EUSIPCO.2017.8081364},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347592.pdf},\n}\n\n
\n
\n\n\n
\n The 3D extension of the High Efficiency Video Coding (HEVC) standard achieves large compression rates thanks to the addition of several tools to encode multiview and depth information on top of those available in HEVC. The use of such tools incur in a very large computational demand, which can be a serious problem in power and computationally-constrained devices and applications. However, not all information contained in an image is fundamental to the viewer, so that different levels of computational effort can be employed when encoding different image regions. The Region of Interest (ROI) concept is used in this work to classify each Coding Unit (CU) as foreground, heterogeneous background and homogeneous background. Then, a simplified encoding process is employed in those regions classified as homogeneous background, terminating earlier the partitioning process in texture CUs, while still still performing the regular decisions in areas classified as ROI. Experimental results show an average reduction of 22.6% in computational complexity for texture coding with negligible or non-perceived image quality degradation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Airfare prices prediction using machine learning techniques.\n \n \n \n \n\n\n \n Tziridis, K.; Kalampokas, T.; Papakostas, G. A.; and Diamantaras, K. I.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1036-1039, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AirfarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081365,\n  author = {K. Tziridis and T. Kalampokas and G. A. Papakostas and K. I. Diamantaras},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Airfare prices prediction using machine learning techniques},\n  year = {2017},\n  pages = {1036-1039},\n  abstract = {This paper deals with the problem of airfare prices prediction. For this purpose a set of features characterizing a typical flight is decided, supposing that these features affect the price of an air ticket. The features are applied to eight state of the art machine learning (ML) models, used to predict the air tickets prices, and the performance of the models is compared to each other. Along with the prediction accuracy of each model, this paper studies the dependency of the accuracy on the feature set used to represent an airfare. For the experiments a novel dataset consisting of 1814 data flights of the Aegean Airlines for a specific international destination (from Thessaloniki to Stuttgart) is constructed and used to train each ML model. The derived experimental results reveal that the ML models are able to handle this regression problem with almost 88% accuracy, for a certain type of flight features.},\n  keywords = {learning (artificial intelligence);pricing;regression analysis;travel industry;ML model;flight features;Stuttgart;Thessaloniki;international destination;Aegean Airlines;air ticket prices;machine learning techniques;airfare price prediction;Regression tree analysis;Atmospheric modeling;Vegetation;Support vector machines;Predictive models;Bagging;Multilayer perceptrons;machine learning;prediction model;airfare price;pricing models},\n  doi = {10.23919/EUSIPCO.2017.8081365},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570348051.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with the problem of airfare prices prediction. For this purpose a set of features characterizing a typical flight is decided, supposing that these features affect the price of an air ticket. The features are applied to eight state of the art machine learning (ML) models, used to predict the air tickets prices, and the performance of the models is compared to each other. Along with the prediction accuracy of each model, this paper studies the dependency of the accuracy on the feature set used to represent an airfare. For the experiments a novel dataset consisting of 1814 data flights of the Aegean Airlines for a specific international destination (from Thessaloniki to Stuttgart) is constructed and used to train each ML model. The derived experimental results reveal that the ML models are able to handle this regression problem with almost 88% accuracy, for a certain type of flight features.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hand gesture recognition using machine learning and the Myo armband.\n \n \n \n \n\n\n \n Benalcázar, M. E.; Jaramillo, A. G.; Jonathan; Zea, A.; Páez, A.; and Andaluz, V. H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1040-1044, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"HandPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081366,\n  author = {M. E. Benalcázar and A. G. Jaramillo and {Jonathan} and A. Zea and A. Páez and V. H. Andaluz},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Hand gesture recognition using machine learning and the Myo armband},\n  year = {2017},\n  pages = {1040-1044},\n  abstract = {Gesture recognition has multiple applications in medical and engineering fields. The problem of hand gesture recognition consists of identifying, at any moment, a given gesture performed by the hand. In this work, we propose a new model for hand gesture recognition in real time. The input of this model is the surface electromyography measured by the commercial sensor the Myo armband placed on the forearm. The output is the label of the gesture executed by the user at any time. The proposed model is based on the Λ-nearest neighbor and dynamic time warping algorithms. This model can learn to recognize any gesture of the hand. To evaluate the performance of our model, we measured and compared its accuracy at recognizing 5 classes of gestures to the accuracy of the proprietary system of the Myo armband. As a result of this evaluation, we determined that our model performs better (86% accurate) than the Myo system (83%).},\n  keywords = {electromyography;gesture recognition;learning (artificial intelligence);hand gesture recognition;machine learning;EMG;k-nearest neighbor;dynamic time warping algorithm;electromyography;Electromyography;Muscles;Gesture recognition;Real-time systems;Feature extraction;Hidden Markov models;Heuristic algorithms;Hand gesture recogntion;EMG;machine learning;k-nearest neighbor;dynamic time warping algorithm},\n  doi = {10.23919/EUSIPCO.2017.8081366},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347665.pdf},\n}\n\n
\n
\n\n\n
\n Gesture recognition has multiple applications in medical and engineering fields. The problem of hand gesture recognition consists of identifying, at any moment, a given gesture performed by the hand. In this work, we propose a new model for hand gesture recognition in real time. The input of this model is the surface electromyography measured by the commercial sensor the Myo armband placed on the forearm. The output is the label of the gesture executed by the user at any time. The proposed model is based on the Λ-nearest neighbor and dynamic time warping algorithms. This model can learn to recognize any gesture of the hand. To evaluate the performance of our model, we measured and compared its accuracy at recognizing 5 classes of gestures to the accuracy of the proprietary system of the Myo armband. As a result of this evaluation, we determined that our model performs better (86% accurate) than the Myo system (83%).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multimodal detection of fake social media use through a fusion of classification and pairwise ranking systems.\n \n \n \n \n\n\n \n Agrawal, T.; Gupta, R.; and Narayanan, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1045-1049, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MultimodalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081367,\n  author = {T. Agrawal and R. Gupta and S. Narayanan},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multimodal detection of fake social media use through a fusion of classification and pairwise ranking systems},\n  year = {2017},\n  pages = {1045-1049},\n  abstract = {The problem of detecting misinformation and fake content on social media is gaining importance with the increase in popularity of these social media platforms. Researchers have addressed this content analysis problem using machine learning tools with innovations in feature engineering as well as algorithm design. However, most of the machine learning approaches use a conventional classification setting, involving training a classifier on a set of features. In this work, we propose a fusion of a pairwise ranking approach and a classification system in detecting tweets with misinformation that include multimedia content. Pairwise ranking allows comparison between two objects and returns a preference score for the first object in the pair in comparison to the second object. We design a ranking system to determine the legitimacy score for a tweet with reference to another tweet from the same topic of discussion (as hashtagged on Twitter), thereby allowing a contextual comparison. Finally, we incorporate the ranking system outputs within the classification system. The proposed fusion obtains an Unweighted Average Recall (UAR) of 83.5% in classifying misinforming tweets against genuine tweets, a significant improvement over a classification only baseline system (UAR: 80.1%).},\n  keywords = {data analysis;learning (artificial intelligence);pattern classification;social networking (online);multimodal detection;fake social media;misinformation;content analysis problem;machine learning tools;pairwise ranking approach;classification system;multimedia content;Twitter;Feature extraction;Training;Multimedia communication;Twitter;Transform coding;Fake multimedia detection;Learning to rank},\n  doi = {10.23919/EUSIPCO.2017.8081367},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347926.pdf},\n}\n\n
\n
\n\n\n
\n The problem of detecting misinformation and fake content on social media is gaining importance with the increase in popularity of these social media platforms. Researchers have addressed this content analysis problem using machine learning tools with innovations in feature engineering as well as algorithm design. However, most of the machine learning approaches use a conventional classification setting, involving training a classifier on a set of features. In this work, we propose a fusion of a pairwise ranking approach and a classification system in detecting tweets with misinformation that include multimedia content. Pairwise ranking allows comparison between two objects and returns a preference score for the first object in the pair in comparison to the second object. We design a ranking system to determine the legitimacy score for a tweet with reference to another tweet from the same topic of discussion (as hashtagged on Twitter), thereby allowing a contextual comparison. Finally, we incorporate the ranking system outputs within the classification system. The proposed fusion obtains an Unweighted Average Recall (UAR) of 83.5% in classifying misinforming tweets against genuine tweets, a significant improvement over a classification only baseline system (UAR: 80.1%).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Boosted multiple kernel learning for first-person activity recognition.\n \n \n \n \n\n\n \n Özkan, F.; Arabaci, M. A.; Surer, E.; and Temizel, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1050-1054, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BoostedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081368,\n  author = {F. Özkan and M. A. Arabaci and E. Surer and A. Temizel},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Boosted multiple kernel learning for first-person activity recognition},\n  year = {2017},\n  pages = {1050-1054},\n  abstract = {Activity recognition from first-person (ego-centric) videos has recently gained attention due to the increasing ubiquity of the wearable cameras. There has been a surge of efforts adapting existing feature descriptors and designing new descriptors for the first-person videos. An effective activity recognition system requires selection and use of complementary features and appropriate kernels for each feature. In this study, we propose a data-driven framework for first-person activity recognition which effectively selects and combines features and their respective kernels during the training. Our experimental results show that use of Multiple Kernel Learning (MKL) and Boosted MKL in first-person activity recognition problem exhibits improved results in comparison to the state-of-the-art. In addition, these techniques enable the expansion of the framework with new features in an efficient and convenient way.},\n  keywords = {feature extraction;image recognition;learning (artificial intelligence);first-person videos;complementary features;first-person activity recognition problem;Boosted multiple kernel;feature descriptors;activity recognition system;wearable cameras;multiple kernel learning;boosted MKL;Kernel;Videos;Activity recognition;Histograms;Feature extraction;Cameras;multiple kernel learning;kernel boosting;first-person;ego-centric videos;activity recognition},\n  doi = {10.23919/EUSIPCO.2017.8081368},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343800.pdf},\n}\n\n
\n
\n\n\n
\n Activity recognition from first-person (ego-centric) videos has recently gained attention due to the increasing ubiquity of the wearable cameras. There has been a surge of efforts adapting existing feature descriptors and designing new descriptors for the first-person videos. An effective activity recognition system requires selection and use of complementary features and appropriate kernels for each feature. In this study, we propose a data-driven framework for first-person activity recognition which effectively selects and combines features and their respective kernels during the training. Our experimental results show that use of Multiple Kernel Learning (MKL) and Boosted MKL in first-person activity recognition problem exhibits improved results in comparison to the state-of-the-art. In addition, these techniques enable the expansion of the framework with new features in an efficient and convenient way.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Precoder design in user-centric virtual cell networks.\n \n \n \n \n\n\n \n Shi, J.; Chen, M.; Zhang, W.; Yang, Z.; and Xu, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1055-1059, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PrecoderPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081369,\n  author = {J. Shi and M. Chen and W. Zhang and Z. Yang and H. Xu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Precoder design in user-centric virtual cell networks},\n  year = {2017},\n  pages = {1055-1059},\n  abstract = {We consider the coordinated transmission in the downlink of user-centric virtual cell networks where a number of Remote Radio Heads (RRHs) form a virtual cell to serve every user equipment (UE). We introduce two cell formation schemes and design the precoders in order to optimize the sum data rate with fairness among users. The original non-convex weighted sum-rate maximization problem is converted into an equivalent matrix-weighted sum-mean square error (MSE) minimization problem, which is solved by a distributed precoding algorithm. Simulation results show that the proposed weighted minimum mean square error (WMMSE) algorithm provides a substantial gain over existing algorithms in terms of sum data rates with moderate implementation cost.},\n  keywords = {cellular radio;mean square error methods;minimisation;precoding;sum data rate optimization;Remote Radio Heads;precoder design;weighted minimum mean square error algorithm;distributed precoding algorithm;square error minimization problem;equivalent matrix-weighted sum;nonconvex weighted sum-rate maximization problem;cell formation schemes;user-centric virtual cell networks;Precoding;Minimization;Downlink;Matrix converters;Signal processing algorithms;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081369},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347840.pdf},\n}\n\n
\n
\n\n\n
\n We consider the coordinated transmission in the downlink of user-centric virtual cell networks where a number of Remote Radio Heads (RRHs) form a virtual cell to serve every user equipment (UE). We introduce two cell formation schemes and design the precoders in order to optimize the sum data rate with fairness among users. The original non-convex weighted sum-rate maximization problem is converted into an equivalent matrix-weighted sum-mean square error (MSE) minimization problem, which is solved by a distributed precoding algorithm. Simulation results show that the proposed weighted minimum mean square error (WMMSE) algorithm provides a substantial gain over existing algorithms in terms of sum data rates with moderate implementation cost.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A sequential constraint relaxation algorithm for rank-one constrained problems.\n \n \n \n \n\n\n \n Cao, P.; Thompson, J.; and Poor, H. V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1060-1064, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081370,\n  author = {P. Cao and J. Thompson and H. V. Poor},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A sequential constraint relaxation algorithm for rank-one constrained problems},\n  year = {2017},\n  pages = {1060-1064},\n  abstract = {Many optimization problems in communications and signal processing can be formulated as rank-one constrained optimization problems. This has motivated the development of methods to solve such problem in specific scenarios. However, due to the non-convex nature of the rank-one constraint, limited progress has been made in solving generic rank-one constrained optimization problems. In particular, the problem of efficiently finding a locally optimal solution to a generic rank-one constrained problem remains open. This paper focuses on solving general rank-one constrained problems via relaxation techniques. However, instead of dropping the rank-one constraint completely as is done in traditional rank-one relaxation methods, a novel algorithm that gradually relaxes the rank-one constraint, termed the sequential rank-one constraint relaxation (SROCR) algorithm, is proposed. Compared with previous algorithms, the SROCR algorithm can solve general rank-one constrained problems, and can find feasible solutions with favorable complexity.},\n  keywords = {concave programming;relaxation theory;signal processing;sequential rank-one constraint relaxation algorithm;sequential constraint relaxation algorithm;generic rank-one constrained optimization problems;locally optimal solution;rank-one constrained problems;optimization problems;nonconvex nature;SROCR algorithm;Optimization;Signal processing algorithms;Signal processing;Complexity theory;Convex functions;Eigenvalues and eigenfunctions;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081370},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346857.pdf},\n}\n\n
\n
\n\n\n
\n Many optimization problems in communications and signal processing can be formulated as rank-one constrained optimization problems. This has motivated the development of methods to solve such problem in specific scenarios. However, due to the non-convex nature of the rank-one constraint, limited progress has been made in solving generic rank-one constrained optimization problems. In particular, the problem of efficiently finding a locally optimal solution to a generic rank-one constrained problem remains open. This paper focuses on solving general rank-one constrained problems via relaxation techniques. However, instead of dropping the rank-one constraint completely as is done in traditional rank-one relaxation methods, a novel algorithm that gradually relaxes the rank-one constraint, termed the sequential rank-one constraint relaxation (SROCR) algorithm, is proposed. Compared with previous algorithms, the SROCR algorithm can solve general rank-one constrained problems, and can find feasible solutions with favorable complexity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A primal-dual line search method and applications in image processing.\n \n \n \n \n\n\n \n Sopasakis, P.; Themelis, A.; Suykens, J.; and Patrinos, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1065-1069, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081371,\n  author = {P. Sopasakis and A. Themelis and J. Suykens and P. Patrinos},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A primal-dual line search method and applications in image processing},\n  year = {2017},\n  pages = {1065-1069},\n  abstract = {Operator splitting algorithms are enjoying wide acceptance in signal processing for their ability to solve generic convex optimization problems exploiting their structure and leading to efficient implementations. These algorithms are instances of the Krasnosel'skil-Mann scheme for finding fixed points of averaged operators. Despite their popularity, however, operator splitting algorithms are sensitive to ill conditioning and often converge slowly. In this paper we propose a line search primal-dual method to accelerate and robustify the Chambolle-Pock algorithm based on SuperMann: a recent extension of the Kras-nosel'skil-Mann algorithmic scheme. We discuss the convergence properties of this new algorithm and we showcase its strengths on the problem of image denoising using the anisotropic total variation regularization.},\n  keywords = {image denoising;search problems;primal-dual line search method;image processing;operator splitting algorithms;Chambolle-Pock algorithm;SuperMann;image denoising;anisotropic total variation regularization;Signal processing algorithms;1/f noise;Convergence;Europe;Optimization;Search methods},\n  doi = {10.23919/EUSIPCO.2017.8081371},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347502.pdf},\n}\n\n
\n
\n\n\n
\n Operator splitting algorithms are enjoying wide acceptance in signal processing for their ability to solve generic convex optimization problems exploiting their structure and leading to efficient implementations. These algorithms are instances of the Krasnosel'skil-Mann scheme for finding fixed points of averaged operators. Despite their popularity, however, operator splitting algorithms are sensitive to ill conditioning and often converge slowly. In this paper we propose a line search primal-dual method to accelerate and robustify the Chambolle-Pock algorithm based on SuperMann: a recent extension of the Kras-nosel'skil-Mann algorithmic scheme. We discuss the convergence properties of this new algorithm and we showcase its strengths on the problem of image denoising using the anisotropic total variation regularization.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayesian framework for mobility pattern discovery using mobile network events.\n \n \n \n \n\n\n \n Danafar, S.; Piorkowski, M.; and Krysczcuk, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1070-1074, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081372,\n  author = {S. Danafar and M. Piorkowski and K. Krysczcuk},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Bayesian framework for mobility pattern discovery using mobile network events},\n  year = {2017},\n  pages = {1070-1074},\n  abstract = {Understanding human mobility patterns is of great importance for planning urban and extra-urban spaces and communication infrastructures. The omnipresence of mobile telephony in today's society opens new avenues of discovering the patterns of human mobility by means of analyzing cellular network data. Of particular interest is analyzing passively collected Network Events (NEs) due to their scalability. However, mobility pattern analysis based on network events is challenging because of the coarse granularity of NEs. In this paper, we propose network event-based Bayesian approaches for mobility pattern recognition and reconstruction, mode of transport recognition and modeling the frequent trajectories.},\n  keywords = {Bayes methods;cellular radio;mobility management (mobile radio);pattern recognition;mobility pattern reconstruction;mobility pattern recognition;Bayesian approaches;mobility pattern analysis;cellular network data;mobile telephony;communication infrastructures;mobile network events;mobility pattern discovery;Bayesian framework;Trajectory;Global Positioning System;Mobile handsets;Roads;Mobile communication;Bayes methods;Pattern recognition},\n  doi = {10.23919/EUSIPCO.2017.8081372},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347626.pdf},\n}\n\n
\n
\n\n\n
\n Understanding human mobility patterns is of great importance for planning urban and extra-urban spaces and communication infrastructures. The omnipresence of mobile telephony in today's society opens new avenues of discovering the patterns of human mobility by means of analyzing cellular network data. Of particular interest is analyzing passively collected Network Events (NEs) due to their scalability. However, mobility pattern analysis based on network events is challenging because of the coarse granularity of NEs. In this paper, we propose network event-based Bayesian approaches for mobility pattern recognition and reconstruction, mode of transport recognition and modeling the frequent trajectories.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Radar detection schemes for joint temporal and spatial correlated clutter using vector ARMA models.\n \n \n \n \n\n\n \n Ben Abdallah, W.; Ovarlez, J. P.; and Bondon, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1075-1079, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RadarPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081373,\n  author = {W. {Ben Abdallah} and J. P. Ovarlez and P. Bondon},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Radar detection schemes for joint temporal and spatial correlated clutter using vector ARMA models},\n  year = {2017},\n  pages = {1075-1079},\n  abstract = {Adaptive radar detection and estimation schemes are often based on the independence of the training data used for building estimators and detectors. This paper relaxes this constraint and deals with the non-trivial problem of deriving detection and estimation schemes for joint spatial and temporal correlated radar measurements. In order to estimate these two joint correlation matrices, we propose to use the Vector ARMA (VARMA) methodology. The estimation of the VARMA model parameters are performed with Maximum Likelihood Estimators in Gaussian and non-Gaussian environment. These two joint estimates of the spatial and temporal covariance matrices leads to build Adaptive Radar Detectors, like Adaptive Normalized Matched Filter (ANMF). Their corresponding performance are analyzed through simulated datasets. We show that taking into account the spatial covariance matrix may lead to significant performance improvements compared to classical procedures ignoring the spatial correlation.},\n  keywords = {adaptive filters;adaptive radar;covariance matrices;maximum likelihood estimation;radar clutter;radar detection;training data;building estimators;nontrivial problem;temporal correlated radar measurements;joint correlation matrices;Vector ARMA methodology;VARMA model parameters;Maximum Likelihood Estimators;spatial covariance matrices;temporal covariance matrices;Adaptive Radar Detectors;Adaptive Normalized Matched Filter;spatial correlation;Radar detection schemes;joint temporal clutter;spatial correlated clutter;vector ARMA models;Adaptive radar detection;spatial covariance matrix;Clutter;Covariance matrices;Radar detection;Correlation;Maximum likelihood estimation;Detectors},\n  doi = {10.23919/EUSIPCO.2017.8081373},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347210.pdf},\n}\n\n
\n
\n\n\n
\n Adaptive radar detection and estimation schemes are often based on the independence of the training data used for building estimators and detectors. This paper relaxes this constraint and deals with the non-trivial problem of deriving detection and estimation schemes for joint spatial and temporal correlated radar measurements. In order to estimate these two joint correlation matrices, we propose to use the Vector ARMA (VARMA) methodology. The estimation of the VARMA model parameters are performed with Maximum Likelihood Estimators in Gaussian and non-Gaussian environment. These two joint estimates of the spatial and temporal covariance matrices leads to build Adaptive Radar Detectors, like Adaptive Normalized Matched Filter (ANMF). Their corresponding performance are analyzed through simulated datasets. We show that taking into account the spatial covariance matrix may lead to significant performance improvements compared to classical procedures ignoring the spatial correlation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Separation of delayed parameterized sources.\n \n \n \n \n\n\n \n Mortada, H.; Mazet, V.; Soussen, C.; and Collet, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1080-1084, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SeparationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081374,\n  author = {H. Mortada and V. Mazet and C. Soussen and C. Collet},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Separation of delayed parameterized sources},\n  year = {2017},\n  pages = {1080-1084},\n  abstract = {This paper addresses the delayed (or anechoic) source separation problem in the case of parameterized deterministic sources. An alternating least square scheme is proposed to estimate the source parameters, the mixing coefficients and the delays. For the challenging delay parameter we adapt a sparse approximation strategy. A first algorithm considers discrete delays; then an extension, inspired by the recent sparse deconvolution literature, allows for continuous delay estimation. Numerical simulations demonstrate the effectiveness of the proposed algorithms compared to state-of-the-art methods for highly correlated Gaussian sources.},\n  keywords = {deconvolution;delay estimation;Gaussian processes;least squares approximations;source separation;delayed parameterized sources;parameterized deterministic sources;alternating least square scheme;sparse approximation strategy;discrete delays;continuous delay estimation;highly correlated Gaussian sources;anechoic source separation problem;delayed source separation problem;sparse deconvolution;Source separation;Signal processing algorithms;Delay estimation;Shape;Matching pursuit algorithms;Approximation algorithms;anechoic source separation;alternating least squares;continuous delay estimation;decomposition of spectro-scopic signals;correlated sources},\n  doi = {10.23919/EUSIPCO.2017.8081374},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342181.pdf},\n}\n\n
\n
\n\n\n
\n This paper addresses the delayed (or anechoic) source separation problem in the case of parameterized deterministic sources. An alternating least square scheme is proposed to estimate the source parameters, the mixing coefficients and the delays. For the challenging delay parameter we adapt a sparse approximation strategy. A first algorithm considers discrete delays; then an extension, inspired by the recent sparse deconvolution literature, allows for continuous delay estimation. Numerical simulations demonstrate the effectiveness of the proposed algorithms compared to state-of-the-art methods for highly correlated Gaussian sources.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Jeffrey's divergence between autoregressive moving-average processes.\n \n \n \n \n\n\n \n Legrand, L.; Grivel, É.; and Giremus, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1085-1089, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Jeffrey'sPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081375,\n  author = {L. Legrand and É. Grivel and A. Giremus},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Jeffrey's divergence between autoregressive moving-average processes},\n  year = {2017},\n  pages = {1085-1089},\n  abstract = {Various works have been carried out about the Jeffrey's divergence (JD) which is the symmetric version of the Kullback-Leibler (KL) divergence. An expression of the JD for Gaussian processes can be deduced from the definition of the KL divergence and the expression of the Gaussian-multivariate distributions of k-dimensional random vectors. It depends on the k × k Toeplitz covariance matrices of the stationary processes. However, the resulting computational cost may be high as these matrices must be inverted and it is all the higher as k increases. To circumvent this problem, a recursive expression can be obtained for real 1st-order autoregressive (AR) processes. When they are disturbed by additive uncorrelated white noises, we showed that when k becomes large, the derivative of the JD with respect to k tends to be constant. This constant is sufficient to compare the noisy AR processes. In this paper, we propose to extend our work to AR moving-average (MA) processes with one AR term and one MA term. Some examples illustrate the theoretical analysis.},\n  keywords = {autoregressive moving average processes;autoregressive processes;covariance matrices;Gaussian distribution;Gaussian processes;matrix algebra;Toeplitz matrices;white noise;real 1st-order autoregressive process;autoregressive moving-average process;AR moving-average process;Toeplitz covariance matrices;Jeffreys divergence;additive uncorrelated white noises;resulting computational cost;k-dimensional random vectors;Gaussian-multivariate distributions;KL divergence;Gaussian processes;Kullback-Leibler divergence;JD;Noise measurement;Correlation;Covariance matrices;Estimation;Europe;White noise;Jeffrey's divergence;Kullback-Leibler divergence;autoregressive moving-average processes},\n  doi = {10.23919/EUSIPCO.2017.8081375},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346131.pdf},\n}\n\n
\n
\n\n\n
\n Various works have been carried out about the Jeffrey's divergence (JD) which is the symmetric version of the Kullback-Leibler (KL) divergence. An expression of the JD for Gaussian processes can be deduced from the definition of the KL divergence and the expression of the Gaussian-multivariate distributions of k-dimensional random vectors. It depends on the k × k Toeplitz covariance matrices of the stationary processes. However, the resulting computational cost may be high as these matrices must be inverted and it is all the higher as k increases. To circumvent this problem, a recursive expression can be obtained for real 1st-order autoregressive (AR) processes. When they are disturbed by additive uncorrelated white noises, we showed that when k becomes large, the derivative of the JD with respect to k tends to be constant. This constant is sufficient to compare the noisy AR processes. In this paper, we propose to extend our work to AR moving-average (MA) processes with one AR term and one MA term. Some examples illustrate the theoretical analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n New model order selection in large dimension regime for complex elliptically symmetric noise.\n \n \n \n \n\n\n \n Terreaux, E.; Ovarlez, J.; and Pascal, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1090-1094, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"NewPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081376,\n  author = {E. Terreaux and J. Ovarlez and F. Pascal},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {New model order selection in large dimension regime for complex elliptically symmetric noise},\n  year = {2017},\n  pages = {1090-1094},\n  abstract = {This paper presents a new model order selection technique for signal processing applications related to source localization or subspace orthogonal projection techniques in large dimensional regime (Random Matrix Theory) when the noise environment is Complex Elliptically Symmetric (CES) distributed, with unknown scatter matrix. The proposed method consists first in estimating the Toeplitz structure of the background covariance matrix. In a second step, after a whitening process, the eigenvalues distribution of any Maronna's M-estimators is exploited, leading to the order selection. Simulations made on different kinds of CES noise as well as analysis of real hyperspectral images demonstrate the superiority of the proposed technique compared to those of Akaike Information Criterion and the Minimum Description Length.},\n  keywords = {covariance matrices;eigenvalues and eigenfunctions;maximum likelihood estimation;signal processing;statistical distributions;Toeplitz matrices;Random Matrix Theory;noise environment;unknown scatter matrix;Toeplitz structure;background covariance matrix;whitening process;eigenvalues distribution;Maronna's M-estimators;CES noise;complex elliptically symmetric noise;signal processing applications;source localization;model order selection;subspace orthogonal projection technique;Covariance matrices;Eigenvalues and eigenfunctions;Signal processing;Estimation;Symmetric matrices;Convergence;Hyperspectral imaging},\n  doi = {10.23919/EUSIPCO.2017.8081376},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341225.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a new model order selection technique for signal processing applications related to source localization or subspace orthogonal projection techniques in large dimensional regime (Random Matrix Theory) when the noise environment is Complex Elliptically Symmetric (CES) distributed, with unknown scatter matrix. The proposed method consists first in estimating the Toeplitz structure of the background covariance matrix. In a second step, after a whitening process, the eigenvalues distribution of any Maronna's M-estimators is exploited, leading to the order selection. Simulations made on different kinds of CES noise as well as analysis of real hyperspectral images demonstrate the superiority of the proposed technique compared to those of Akaike Information Criterion and the Minimum Description Length.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A bootstrap method for sinusoid detection in colored noise and uneven sampling. Application to exoplanet detection.\n \n \n \n \n\n\n \n Sulis, S.; Mary, D.; and Bigot, L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1095-1099, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081377,\n  author = {S. Sulis and D. Mary and L. Bigot},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A bootstrap method for sinusoid detection in colored noise and uneven sampling. Application to exoplanet detection},\n  year = {2017},\n  pages = {1095-1099},\n  abstract = {This study is motivated by the problem of evaluating reliable false alarm (FA) rates for sinusoid detection tests applied to unevenly sampled time series involving colored noise, when a (small) training data set of this noise is available. While analytical expressions for the FA rate are out of reach in this situation, we show that it is possible to combine specific periodogram standardization and bootstrap techniques to consistently estimate the FA rate. We also show that the procedure can be improved by using generalized extreme-value distributions. The paper presents several numerical results including a case study in exoplanet detection from radial velocity data.},\n  keywords = {extrasolar planets;statistical analysis;time series;bootstrap method;colored noise;exoplanet detection;sinusoid detection tests;unevenly sampled time series;FA rate;specific periodogram standardization;radial velocity data;generalized extreme-value distributions;Time series analysis;Training data;Colored noise;Estimation;Computational modeling;Data models;Extrasolar planets;Bootstrap;colored noise;detection},\n  doi = {10.23919/EUSIPCO.2017.8081377},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347488.pdf},\n}\n\n
\n
\n\n\n
\n This study is motivated by the problem of evaluating reliable false alarm (FA) rates for sinusoid detection tests applied to unevenly sampled time series involving colored noise, when a (small) training data set of this noise is available. While analytical expressions for the FA rate are out of reach in this situation, we show that it is possible to combine specific periodogram standardization and bootstrap techniques to consistently estimate the FA rate. We also show that the procedure can be improved by using generalized extreme-value distributions. The paper presents several numerical results including a case study in exoplanet detection from radial velocity data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A multimodal asymmetric exponential power distribution: Application to risk measurement for financial high-frequency data.\n \n \n \n \n\n\n \n Thibault, A.; and Bondon, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1100-1104, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081378,\n  author = {A. Thibault and P. Bondon},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A multimodal asymmetric exponential power distribution: Application to risk measurement for financial high-frequency data},\n  year = {2017},\n  pages = {1100-1104},\n  abstract = {Interest in risk measurement for high-frequency data has increased since the volume of high-frequency trading stepped up over the two last decades. This paper proposes a multimodal extension of the Exponential Power Distribution (EPD), called the Multimodal Asymmetric Exponential Power Distribution (MAEPD). We derive moments and we propose a convenient stochastic representation of the MAEPD. We establish consistency, asymptotic normality and efficiency of the maximum likelihood estimators (MLE). An application to risk measurement for high-frequency data is presented. An autoregressive moving average multiplicative component generalized autoregressive conditional heteroskedastic (ARMA-mcsGARCH) model is fitted to Financial Times Stock Exchange (FTSE) 100 intraday returns. Performances for Value-at-Risk (VaR) and Expected Shortfall (ES) estimation are evaluated. We show that the MAEPD outperforms commonly used distributions in risk measurement.},\n  keywords = {autoregressive moving average processes;econophysics;maximum likelihood estimation;stock markets;time series;multimodal asymmetric exponential power distribution;risk measurement;high-frequency trading;MAEPD;value-at-risk;financial high-frequency data;consistency;asymptotic normality;maximum likelihood estimators;MLE;autoregressive moving average multiplicative component generalized autoregressive conditional heteroskedastic model;ARMA-mcsGARCH model;Financial Times Stock Exchange;FTSE;VaR;expected shortfall estimation;ES estimation;Reactive power;Technological innovation;Estimation;Computational modeling;Autoregressive processes;Predictive models;Hafnium;Multimodality;Asymmetric distributions;Expected shortfall;Value-at-Risk;Risk measurement},\n  doi = {10.23919/EUSIPCO.2017.8081378},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347246.pdf},\n}\n\n
\n
\n\n\n
\n Interest in risk measurement for high-frequency data has increased since the volume of high-frequency trading stepped up over the two last decades. This paper proposes a multimodal extension of the Exponential Power Distribution (EPD), called the Multimodal Asymmetric Exponential Power Distribution (MAEPD). We derive moments and we propose a convenient stochastic representation of the MAEPD. We establish consistency, asymptotic normality and efficiency of the maximum likelihood estimators (MLE). An application to risk measurement for high-frequency data is presented. An autoregressive moving average multiplicative component generalized autoregressive conditional heteroskedastic (ARMA-mcsGARCH) model is fitted to Financial Times Stock Exchange (FTSE) 100 intraday returns. Performances for Value-at-Risk (VaR) and Expected Shortfall (ES) estimation are evaluated. We show that the MAEPD outperforms commonly used distributions in risk measurement.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A fast and accurate chirp rate estimation algorithm based on the fractional Fourier transform.\n \n \n \n \n\n\n \n Serbes, A.; and Aldimashki, O.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1105-1109, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081379,\n  author = {A. Serbes and O. Aldimashki},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A fast and accurate chirp rate estimation algorithm based on the fractional Fourier transform},\n  year = {2017},\n  pages = {1105-1109},\n  abstract = {In this work, a fast and accurate chirp-rate estimation algorithm is presented. The algorithm is based on the fractional Fourier transform. It is shown that utilization of the golden section search algorithm to find the maximum magnitude of the fractional Fourier transform domains not only accelerates the process, but also increases the accuracy in a noisy environment. Simulation results validate the proposed algorithm and show that the accuracy of parameter estimation nearly achieves the Cramer-Rao lower bound for SNR values as low as -7dB.},\n  keywords = {chirp modulation;Fourier transforms;parameter estimation;search problems;fractional Fourier transform;golden section search algorithm;parameter estimation;chirp rate estimation algorithm;noise figure 7.0 dB;Chirp;Signal processing algorithms;Time-frequency analysis;Fourier transforms;Noise measurement;Signal to noise ratio;Chirp signals;fractional Fourier transform;chirp rate;golden section search},\n  doi = {10.23919/EUSIPCO.2017.8081379},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342282.pdf},\n}\n\n
\n
\n\n\n
\n In this work, a fast and accurate chirp-rate estimation algorithm is presented. The algorithm is based on the fractional Fourier transform. It is shown that utilization of the golden section search algorithm to find the maximum magnitude of the fractional Fourier transform domains not only accelerates the process, but also increases the accuracy in a noisy environment. Simulation results validate the proposed algorithm and show that the accuracy of parameter estimation nearly achieves the Cramer-Rao lower bound for SNR values as low as -7dB.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Linear complex iterative frequency estimation of sparse and non-sparse pulse and point processes.\n \n \n \n \n\n\n \n Bernhard, H.; and Springer, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1110-1114, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LinearPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081380,\n  author = {H. Bernhard and A. Springer},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Linear complex iterative frequency estimation of sparse and non-sparse pulse and point processes},\n  year = {2017},\n  pages = {1110-1114},\n  abstract = {Clock frequency estimation is a key issue in many signal processing applications, e.g. network clock estimation in wireless sensor networks. In wireless systems or harsh environments, it is likely that clock events can be missed and, therefore, the observed process has to be treated as a sparse periodic process. To parameterize the clock, current research is applying periodogram estimators at a complexity of at least O(N log N). We introduce a highly accurate iterative frequency estimator for pulse signals with low computational complexity. An unbiased frequency estimator is presented with a complexity of O(N). Furthermore the mean square error (MSE), which is proportional to O(N-3) is derived and it is shown by theory and simulation that this estimator performs as well as periodogram based methods. The work concludes with simulations on sparse and non-sparse processes including a discussion of the application of the method.},\n  keywords = {computational complexity;frequency estimation;iterative methods;mean square error methods;signal processing;wireless systems;harsh environments;clock events;sparse periodic process;periodogram estimators;highly accurate iterative frequency estimator;pulse signals;low computational complexity;unbiased frequency estimator;nonsparse processes;linear complex iterative frequency estimation;clock frequency estimation;signal processing applications;network clock estimation;wireless sensor networks;mean square error;Frequency estimation;Estimation;Clocks;Complexity theory;Random variables;Signal processing algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081380},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342970.pdf},\n}\n\n
\n
\n\n\n
\n Clock frequency estimation is a key issue in many signal processing applications, e.g. network clock estimation in wireless sensor networks. In wireless systems or harsh environments, it is likely that clock events can be missed and, therefore, the observed process has to be treated as a sparse periodic process. To parameterize the clock, current research is applying periodogram estimators at a complexity of at least O(N log N). We introduce a highly accurate iterative frequency estimator for pulse signals with low computational complexity. An unbiased frequency estimator is presented with a complexity of O(N). Furthermore the mean square error (MSE), which is proportional to O(N-3) is derived and it is shown by theory and simulation that this estimator performs as well as periodogram based methods. The work concludes with simulations on sparse and non-sparse processes including a discussion of the application of the method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Convergence acceleration of alternating least squares with a matrix polynomial predictive model for PARAFAC decomposition of a tensor.\n \n \n \n \n\n\n \n Shi, M.; Zhang, J.; Hu, B.; Wang, B.; and Lu, Q.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1115-1119, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ConvergencePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081381,\n  author = {M. Shi and J. Zhang and B. Hu and B. Wang and Q. Lu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Convergence acceleration of alternating least squares with a matrix polynomial predictive model for PARAFAC decomposition of a tensor},\n  year = {2017},\n  pages = {1115-1119},\n  abstract = {In this paper, a matrix polynomial whose coefficients are matrices is first defined. Its predictive model, called as the Matrix Polynomial Predictive Model (MPPM), is then derived. When the loading matrices of a decomposed tensor in the Alternating Least Squares (ALS) are replaced by the predicted ones of the MPPM, a new ALS algorithm with the MPPM (ALS-MPPM) is proposed. Analyses show that the convergent rate of the proposed ALS-MPPM is closely related to the degree of the matrix polynomial. Namely, when an accelerative convergence rate is expected, the polynomial with a high degree is preferred. Although a high degree means a high possibility of prediction failure, a simple solution can be used to handle such failure. Moreover, the relationship between our ALS-MPPM and the existing ALS-based algorithms is also analyzed. The results of numerical simulations show that the proposed ALS-MPPM outperforms the reported ALS-based algorithms in the literature while the analytical results are verified.},\n  keywords = {convergence of numerical methods;least squares approximations;matrix algebra;polynomials;tensors;convergence acceleration;alternating least squares;matrix polynomial predictive model;ALS-MPPM;accelerative convergence rate;prediction failure;PARAFAC tensor decomposition;numerical simulations;Convergence;Matrix decomposition;Tensile stress;Predictive models;Signal processing algorithms;Prediction algorithms;Acceleration},\n  doi = {10.23919/EUSIPCO.2017.8081381},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342730.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a matrix polynomial whose coefficients are matrices is first defined. Its predictive model, called as the Matrix Polynomial Predictive Model (MPPM), is then derived. When the loading matrices of a decomposed tensor in the Alternating Least Squares (ALS) are replaced by the predicted ones of the MPPM, a new ALS algorithm with the MPPM (ALS-MPPM) is proposed. Analyses show that the convergent rate of the proposed ALS-MPPM is closely related to the degree of the matrix polynomial. Namely, when an accelerative convergence rate is expected, the polynomial with a high degree is preferred. Although a high degree means a high possibility of prediction failure, a simple solution can be used to handle such failure. Moreover, the relationship between our ALS-MPPM and the existing ALS-based algorithms is also analyzed. The results of numerical simulations show that the proposed ALS-MPPM outperforms the reported ALS-based algorithms in the literature while the analytical results are verified.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparing a complex-valued sinusoidal process with an autoregressive process using Jeffrey's divergence.\n \n \n \n \n\n\n \n Grivel, E.; Saleh, M.; and Omar, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1120-1124, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ComparingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081382,\n  author = {E. Grivel and M. Saleh and S. Omar},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Comparing a complex-valued sinusoidal process with an autoregressive process using Jeffrey's divergence},\n  year = {2017},\n  pages = {1120-1124},\n  abstract = {This paper deals with the analysis of the Jeffrey's divergence (JD) between an autoregressive process (AR) and a sum of complex exponentials (SCE), whose magnitudes are Gaussian random values, which is then disturbed by an additive white noise. As interpreting the value of the JD may not be necessarily an easy task, we propose to give an expression of the JD and to analyze the influence of each process parameter on it. More particularly, we show that the ratios between the variance of the additive white noise and the variance of the AR-process driving process on the one hand, and the sum of the ratios between the SCE process power and the AR-process PSD at the normalized angular frequencies on the other hand, has a strong impact on the JD. The 2-norm of the AR-parameter has also an influence. Illustrations confirm the theoretical part.},\n  keywords = {autoregressive processes;Gaussian distribution;Gaussian processes;random processes;white noise;process parameter;additive white noise;AR-process driving process;SCE process power;AR-process PSD;JD;complex-valued sinusoidal process;autoregressive process;Gaussian random values;sum-of-complex exponentials;Jeffreys divergence;AR-parameter;Correlation;Covariance matrices;Resonant frequency;Europe;Autoregressive processes;Additive white noise;Jeffrey's divergence;Kullback-Leibler divergence;AR process;Sum of complex exponentials;model comparison},\n  doi = {10.23919/EUSIPCO.2017.8081382},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347102.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with the analysis of the Jeffrey's divergence (JD) between an autoregressive process (AR) and a sum of complex exponentials (SCE), whose magnitudes are Gaussian random values, which is then disturbed by an additive white noise. As interpreting the value of the JD may not be necessarily an easy task, we propose to give an expression of the JD and to analyze the influence of each process parameter on it. More particularly, we show that the ratios between the variance of the additive white noise and the variance of the AR-process driving process on the one hand, and the sum of the ratios between the SCE process power and the AR-process PSD at the normalized angular frequencies on the other hand, has a strong impact on the JD. The 2-norm of the AR-parameter has also an influence. Illustrations confirm the theoretical part.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Canonical piecewise-linear representation of curves in the wave digital domain.\n \n \n \n \n\n\n \n Bernardini, A.; and Sarti, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1125-1129, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CanonicalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081383,\n  author = {A. Bernardini and A. Sarti},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Canonical piecewise-linear representation of curves in the wave digital domain},\n  year = {2017},\n  pages = {1125-1129},\n  abstract = {Global, explicit representations of nonlinearities are desirable when implementing nonlinear Wave Digital (WD) structures, as they free us from the burden of managing look-up tables, performing data interpolation and/or using iterative solvers. In this paper we present a method that, starting from certain parameterized PieceWise-Linear (PWL) curves in the Kirchhoff domain, allows us to express them in the WD domain using a global and explicit representation. We will show how some curves (multi-valued functions in the Kirchhoff domain) can be represented as functions in canonical PWL form in the WD domain. In particular, we will present a procedure, which, in the most general case, also returns the conditions on the reference port resistance under which it is possible to find explicit mappings in the WD domain.},\n  keywords = {interpolation;iterative methods;piecewise linear techniques;table lookup;wave digital domain;data interpolation;iterative solvers;Kirchhoff domain;explicit representation;canonical PWL form;look-up tables;curves canonical piecewise-linear representation;nonlinear wave digital structures;parameterized piecewise-linear curves;WD domain explicit mappings;Indexes;Ports (Computers);Resistance;Europe;Signal processing;Immune system;Table lookup},\n  doi = {10.23919/EUSIPCO.2017.8081383},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346821.pdf},\n}\n\n
\n
\n\n\n
\n Global, explicit representations of nonlinearities are desirable when implementing nonlinear Wave Digital (WD) structures, as they free us from the burden of managing look-up tables, performing data interpolation and/or using iterative solvers. In this paper we present a method that, starting from certain parameterized PieceWise-Linear (PWL) curves in the Kirchhoff domain, allows us to express them in the WD domain using a global and explicit representation. We will show how some curves (multi-valued functions in the Kirchhoff domain) can be represented as functions in canonical PWL form in the WD domain. In particular, we will present a procedure, which, in the most general case, also returns the conditions on the reference port resistance under which it is possible to find explicit mappings in the WD domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The ASTRES toolbox for mode extraction of non-stationary multicomponent signals.\n \n \n \n \n\n\n \n Fourer, D.; Harmouche, J.; Schmitt, J.; Oberlin, T.; Meignen, S.; Auger, F.; and Flandrin, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1130-1134, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081384,\n  author = {D. Fourer and J. Harmouche and J. Schmitt and T. Oberlin and S. Meignen and F. Auger and P. Flandrin},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {The ASTRES toolbox for mode extraction of non-stationary multicomponent signals},\n  year = {2017},\n  pages = {1130-1134},\n  abstract = {In this paper, we introduce the ASTRES* toolbox which offers a set of Matlab functions for non-stationary multi-component signal processing. The main purposes of this proposal is to offer efficient tools for analysis, synthesis and transformation of any signal made of physically meaningful components (e.g. sinusoid, trend or noise). The proposed techniques contain some recent and new contributions, which are now unified and theoretically strengthened. They can provide efficient time-frequency or time-scale representations and they allow elementary components extraction. Usage and description of each method are then detailed and numerically illustrated.},\n  keywords = {signal representation;signal synthesis;time-frequency analysis;time-scale representations;time-frequency representations;nonstationary multicomponent signal processing;Matlab functions;ASTRES toolbox;Continuous wavelet transforms;Time-frequency analysis;Signal processing;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081384},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346557.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we introduce the ASTRES* toolbox which offers a set of Matlab functions for non-stationary multi-component signal processing. The main purposes of this proposal is to offer efficient tools for analysis, synthesis and transformation of any signal made of physically meaningful components (e.g. sinusoid, trend or noise). The proposed techniques contain some recent and new contributions, which are now unified and theoretically strengthened. They can provide efficient time-frequency or time-scale representations and they allow elementary components extraction. Usage and description of each method are then detailed and numerically illustrated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse frequency extrapolation of spectrograms.\n \n \n \n \n\n\n \n Akhtar, J.; and Olsen, K. E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1135-1139, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SparsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081385,\n  author = {J. Akhtar and K. E. Olsen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Sparse frequency extrapolation of spectrograms},\n  year = {2017},\n  pages = {1135-1139},\n  abstract = {The short-time Fourier transform is a prevalent method used to analyze the frequency composition of a signal as a function of time. In order to achieve high resolution in frequency a large sliding window needs to be applied which degrades the time resolution. This paper proposes the adoption of sparse reconstruction as a mean to extrapolate supplementary values in time domain for each segment. Over short durations a signal's frequency content is likely to contain a limited number of effective frequencies and a sparse regeneration approach can be advantageous as an extrapolating mechanism. An enlarged number of samples can thus yield spectrograms with high frequency resolution. The capabilities of the proposed techniques are demonstrated on several synthetic and real data signals.},\n  keywords = {extrapolation;Fourier transforms;signal reconstruction;spectral analysis;time-domain analysis;sparse frequency extrapolation;short-time Fourier transform;prevalent method;frequency composition;sparse reconstruction;time domain;sparse regeneration approach;high frequency resolution;synthetic data signals;real data signals;spectrograms frequency extrapolation;Spectrogram;Extrapolation;Time-frequency analysis;Signal resolution;Standards;Sparse matrices;Time-domain analysis;spectrogram;short-time Fourier transform;time-frequency analysis;super-resolution;signal extrapolation},\n  doi = {10.23919/EUSIPCO.2017.8081385},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340432.pdf},\n}\n\n
\n
\n\n\n
\n The short-time Fourier transform is a prevalent method used to analyze the frequency composition of a signal as a function of time. In order to achieve high resolution in frequency a large sliding window needs to be applied which degrades the time resolution. This paper proposes the adoption of sparse reconstruction as a mean to extrapolate supplementary values in time domain for each segment. Over short durations a signal's frequency content is likely to contain a limited number of effective frequencies and a sparse regeneration approach can be advantageous as an extrapolating mechanism. An enlarged number of samples can thus yield spectrograms with high frequency resolution. The capabilities of the proposed techniques are demonstrated on several synthetic and real data signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Co-prime sampling jitter analysis.\n \n \n \n \n\n\n \n Dias, U. V.; and Srirangarajan, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1140-1144, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Co-primePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081386,\n  author = {U. V. Dias and S. Srirangarajan},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Co-prime sampling jitter analysis},\n  year = {2017},\n  pages = {1140-1144},\n  abstract = {Co-prime arrays and samplers are popular sub-Nyquist schemes for estimating second order statistics at the Nyquist rate. This paper focuses on the perturbations in the array locations or sampling times, and analyzes its effect on the difference set. Based on this analysis we propose a method to estimate the autocorrelation which makes best use of the sampled data in order to improve the estimation accuracy of the autocorrelation and hence the spectral estimate. Our analysis indicates that such an advantage is limited only to samplers, and does not carry over to the antenna arrays. In addition, we obtain expressions for the computational complexity of the autocorrelation estimation and provide an upper bound on the number of multiplications and additions required for its hardware implementation.},\n  keywords = {array signal processing;computational complexity;jitter;signal sampling;statistical analysis;Nyquist rate;array locations;sampling times;spectral estimate;antenna arrays;autocorrelation estimation;second order statistics;co-prime sampling jitter analysis;sub-Nyquist schemes;co-prime arrays;computational complexity;Jitter;Estimation;Correlation;Europe;Arrays;Prototypes},\n  doi = {10.23919/EUSIPCO.2017.8081386},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347764.pdf},\n}\n\n
\n
\n\n\n
\n Co-prime arrays and samplers are popular sub-Nyquist schemes for estimating second order statistics at the Nyquist rate. This paper focuses on the perturbations in the array locations or sampling times, and analyzes its effect on the difference set. Based on this analysis we propose a method to estimate the autocorrelation which makes best use of the sampled data in order to improve the estimation accuracy of the autocorrelation and hence the spectral estimate. Our analysis indicates that such an advantage is limited only to samplers, and does not carry over to the antenna arrays. In addition, we obtain expressions for the computational complexity of the autocorrelation estimation and provide an upper bound on the number of multiplications and additions required for its hardware implementation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Full proportionate functional link adaptive filters for nonlinear acoustic echo cancellation.\n \n \n \n \n\n\n \n Comminiello, D.; Scarpiniti, M.; Azpicueta-Ruiz, L. A.; Arenas-García, J.; and Uncini, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1145-1149, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"FullPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081387,\n  author = {D. Comminiello and M. Scarpiniti and L. A. Azpicueta-Ruiz and J. Arenas-García and A. Uncini},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Full proportionate functional link adaptive filters for nonlinear acoustic echo cancellation},\n  year = {2017},\n  pages = {1145-1149},\n  abstract = {Nonlinear acoustic echo cancellation (NAEC) can be mainly addressed by solving two different sub-problems: the estimation of the acoustic impulse response and the modeling of the nonlinearities rebounding in it, mostly caused by the electroacoustic chain. Both the modeling processes share an important characteristic: the majority of the parameters to be estimated are very close to zero, with only a small fraction of them having non-negligible magnitude. In this paper, a novel NAEC model is proposed taking into account both the above sub-problems under a joint optimization problem. In particular, the proposed model involves two separate and parallel filters, one mainly focusing on the estimation of the acoustic impulse response (AIR) and the other one aiming at the nonlinear modeling. In order to optimize the modeling processes, both the filters are adapted by using a joint proportionate algorithm. Experimental results prove the effectiveness of the proposed model in NAEC problems.},\n  keywords = {acoustic signal processing;adaptive filters;echo suppression;nonlinear acoustics;nonlinear acoustic echo cancellation;acoustic impulse response;NAEC model;link adaptive filters;electroacoustic chain;Adaptation models;Atmospheric modeling;Acoustics;Estimation;Signal processing algorithms;Adaptive filters;Optimization},\n  doi = {10.23919/EUSIPCO.2017.8081387},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346064.pdf},\n}\n\n
\n
\n\n\n
\n Nonlinear acoustic echo cancellation (NAEC) can be mainly addressed by solving two different sub-problems: the estimation of the acoustic impulse response and the modeling of the nonlinearities rebounding in it, mostly caused by the electroacoustic chain. Both the modeling processes share an important characteristic: the majority of the parameters to be estimated are very close to zero, with only a small fraction of them having non-negligible magnitude. In this paper, a novel NAEC model is proposed taking into account both the above sub-problems under a joint optimization problem. In particular, the proposed model involves two separate and parallel filters, one mainly focusing on the estimation of the acoustic impulse response (AIR) and the other one aiming at the nonlinear modeling. In order to optimize the modeling processes, both the filters are adapted by using a joint proportionate algorithm. Experimental results prove the effectiveness of the proposed model in NAEC problems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Audio/video supervised independent vector analysis through multimodal pilot dependent components.\n \n \n \n \n\n\n \n Nesta, F.; Mosayyebpour, S.; Koldovský, Z.; and Paleček, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1150-1164, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Audio/videoPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081388,\n  author = {F. Nesta and S. Mosayyebpour and Z. Koldovský and K. Paleček},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Audio/video supervised independent vector analysis through multimodal pilot dependent components},\n  year = {2017},\n  pages = {1150-1164},\n  abstract = {Independent Vector Analysis is a powerful tool for estimating the broadband acoustic transfer function between multiple sources and the microphones in the frequency domain. In this work, we consider an extended IVA model which adopts the concept of pilot dependent signals. Without imposing any constraint on the de-mixing system, pilot signals depending on the target source are injected into the model enforcing the permutation of outputs to be consistent over time. A neural network trained on acoustic data and a lip motion detection are jointly used to produce a multimodal pilot signal dependent on the target source. It is shown through experimental results that this structure allows the enhancement of a predefined target source in very difficult and ambiguous scenarios.},\n  keywords = {audio signal processing;frequency-domain analysis;image motion analysis;independent component analysis;learning (artificial intelligence);neural nets;object detection;transfer functions;vectors;video signal processing;audio-video supervised independent vector analysis;multimodal pilot signal dependent component;acoustic transfer function;neural network;lip motion detection;pilot dependent signals;extended IVA model;frequency domain;multiple sources;broadband acoustic transfer function;multimodal pilot dependent components;independent vector analysis;acoustic data;Speech;Acoustics;Artificial neural networks;Time-frequency analysis;Microphones;Lips;Training;independent vector analysis;source separation;independent component analysis;speech enhancement;multimodal processing},\n  doi = {10.23919/EUSIPCO.2017.8081388},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341532.pdf},\n}\n\n
\n
\n\n\n
\n Independent Vector Analysis is a powerful tool for estimating the broadband acoustic transfer function between multiple sources and the microphones in the frequency domain. In this work, we consider an extended IVA model which adopts the concept of pilot dependent signals. Without imposing any constraint on the de-mixing system, pilot signals depending on the target source are injected into the model enforcing the permutation of outputs to be consistent over time. A neural network trained on acoustic data and a lip motion detection are jointly used to produce a multimodal pilot signal dependent on the target source. It is shown through experimental results that this structure allows the enhancement of a predefined target source in very difficult and ambiguous scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Orthogonally constrained independent component extraction: Blind MPDR beamforming.\n \n \n \n \n\n\n \n Koldovský, Z.; Tichavský, P.; and Kautský, V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1155-1159, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OrthogonallyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081389,\n  author = {Z. Koldovský and P. Tichavský and V. Kautský},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Orthogonally constrained independent component extraction: Blind MPDR beamforming},\n  year = {2017},\n  pages = {1155-1159},\n  abstract = {We propose a novel technique for the extraction of one independent component from an instantaneous linear complex-valued mixture of signals. The mixing model is optimized in terms of the number of parameters that are necessary to simultaneously estimate one column of the mixing matrix and one row of the de-mixing matrix, which both correspond to the desired source. The desired source is assumed to have a non-Gaussian distribution, while the other sources are modeled, for simplicity, as Gaussian-distributed, although in applications the other sources can be arbitrary. We propose an algorithm that can be interpreted as a blind self-steering Minimum-Power Distortionless Response (MPDR) beamformer. The method is compared with the popular Natural Gradient algorithm for general Independent Component Analysis. Their performances are comparable but the proposed method has a lower computational complexity; in examples, it is about four times faster.},\n  keywords = {array signal processing;blind source separation;computational complexity;Gaussian distribution;gradient methods;independent component analysis;Blind MPDR beamforming;instantaneous linear complex;mixing model;column;row;de-mixing matrix;nonGaussian distribution;blind self-steering Minimum-Power Distortionless Response beamformer;general Independent Component Analysis;natural gradient algorithm;computational complexity;orthogonally constrained independent component extraction;Ice;Signal processing algorithms;Europe;Signal processing;Electronic mail;Sensors;Concrete},\n  doi = {10.23919/EUSIPCO.2017.8081389},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342825.pdf},\n}\n\n
\n
\n\n\n
\n We propose a novel technique for the extraction of one independent component from an instantaneous linear complex-valued mixture of signals. The mixing model is optimized in terms of the number of parameters that are necessary to simultaneously estimate one column of the mixing matrix and one row of the de-mixing matrix, which both correspond to the desired source. The desired source is assumed to have a non-Gaussian distribution, while the other sources are modeled, for simplicity, as Gaussian-distributed, although in applications the other sources can be arbitrary. We propose an algorithm that can be interpreted as a blind self-steering Minimum-Power Distortionless Response (MPDR) beamformer. The method is compared with the popular Natural Gradient algorithm for general Independent Component Analysis. Their performances are comparable but the proposed method has a lower computational complexity; in examples, it is about four times faster.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n EEG-based attention-driven speech enhancement for noisy speech mixtures using N-fold multi-channel Wiener filters.\n \n \n \n \n\n\n \n Das, N.; Van Eyndhoven, S.; Francart, T.; and Bertrand, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1660-1664, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"EEG-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081390,\n  author = {N. Das and S. {Van Eyndhoven} and T. Francart and A. Bertrand},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {EEG-based attention-driven speech enhancement for noisy speech mixtures using N-fold multi-channel Wiener filters},\n  year = {2017},\n  pages = {1660-1664},\n  abstract = {Hearing prostheses have built-in algorithms to perform acoustic noise reduction and improve speech intelligibility. However, in a multi-speaker scenario the noise reduction algorithm has to determine which speaker the listener is focusing on, in order to enhance it while suppressing the other interfering sources. Recently, it has been demonstrated that it is possible to detect auditory attention using electroencephalography (EEG). In this paper, we use multi-channel Wiener filters (MWFs), to filter out each speech stream from the speech mixtures in the micro-phones of a binaural hearing aid, while also reducing background noise. From the demixed and denoised speech streams, we extract envelopes for an EEG-based auditory attention detection (AAD) algorithm. The AAD module can then select the output of the MWF corresponding to the attended speaker. We evaluate our algorithm in a two-speaker scenario in the presence of babble noise and compare it to a previously proposed algorithm. Our algorithm is observed to provide speech envelopes that yield better AAD accuracies, and is more robust to variations in speaker positions and diffuse background noise.},\n  keywords = {acoustic noise;electroencephalography;hearing;hearing aids;medical signal processing;noise abatement;signal denoising;speech enhancement;speech intelligibility;Wiener filters;speaker positions;diffuse background noise;noisy speech mixtures;hearing prostheses;acoustic noise reduction;speech intelligibility;speech stream;binaural hearing aid;demixed denoised speech streams;auditory attention detection algorithm;attended speaker;two-speaker scenario;babble noise;speech envelopes;noise reduction algorithm;multispeaker scenario;EEG-based attention-driven speech enhancement;N-fold multichannel Wiener filters;Speech;Signal processing algorithms;Microphones;Electroencephalography;Speech enhancement;Switches;Auditory system},\n  doi = {10.23919/EUSIPCO.2017.8081390},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343404.pdf},\n}\n\n
\n
\n\n\n
\n Hearing prostheses have built-in algorithms to perform acoustic noise reduction and improve speech intelligibility. However, in a multi-speaker scenario the noise reduction algorithm has to determine which speaker the listener is focusing on, in order to enhance it while suppressing the other interfering sources. Recently, it has been demonstrated that it is possible to detect auditory attention using electroencephalography (EEG). In this paper, we use multi-channel Wiener filters (MWFs), to filter out each speech stream from the speech mixtures in the micro-phones of a binaural hearing aid, while also reducing background noise. From the demixed and denoised speech streams, we extract envelopes for an EEG-based auditory attention detection (AAD) algorithm. The AAD module can then select the output of the MWF corresponding to the attended speaker. We evaluate our algorithm in a two-speaker scenario in the presence of babble noise and compare it to a previously proposed algorithm. Our algorithm is observed to provide speech envelopes that yield better AAD accuracies, and is more robust to variations in speaker positions and diffuse background noise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data-driven and physical model-based designs of probabilistic spatial dictionary for online meeting diarization and adaptive beamforming.\n \n \n \n \n\n\n \n Ito, N.; Araki, S.; and Nakatani, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1165-1169, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Data-drivenPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081391,\n  author = {N. Ito and S. Araki and T. Nakatani},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Data-driven and physical model-based designs of probabilistic spatial dictionary for online meeting diarization and adaptive beamforming},\n  year = {2017},\n  pages = {1165-1169},\n  abstract = {In this paper, we comparatively study alternative dictionary designs for recently proposed meeting diarization and adaptive beamforming based on a probabilistic spatial dictionary. This dictionary models the feature distribution for each possible direction of arrival (DOA) of speech signals and the feature distribution for background noise. The dictionary enables online DOA detection, which in turn enables online diarization. Here we describe data-driven and physical model-based designs of the dictionary. Experiments on a meeting dataset showed that a physical model-based dictionary gave a word error rate (WER) of 24.9 %, which is close to that for the best-performing data-driven dictionary (24.1%). Therefore, the former has a significant advantage over the latter that it allows us to bypass the cumbersome measurement of training data without much degrading the performance of the automatic speech recognition (ASR).},\n  keywords = {array signal processing;dictionaries;direction-of-arrival estimation;feature extraction;learning (artificial intelligence);probability;speaker recognition;speech processing;adaptive beamforming;probabilistic spatial dictionary;dictionary models;feature distribution;online DOA detection;online diarization;physical model-based dictionary;data-driven dictionary;training data;online meeting diarization;automatic speech recognition;Dictionaries;Speech;Array signal processing;Probabilistic logic;Noise measurement;Microphones;Estimation},\n  doi = {10.23919/EUSIPCO.2017.8081391},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346869.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we comparatively study alternative dictionary designs for recently proposed meeting diarization and adaptive beamforming based on a probabilistic spatial dictionary. This dictionary models the feature distribution for each possible direction of arrival (DOA) of speech signals and the feature distribution for background noise. The dictionary enables online DOA detection, which in turn enables online diarization. Here we describe data-driven and physical model-based designs of the dictionary. Experiments on a meeting dataset showed that a physical model-based dictionary gave a word error rate (WER) of 24.9 %, which is close to that for the best-performing data-driven dictionary (24.1%). Therefore, the former has a significant advantage over the latter that it allows us to bypass the cumbersome measurement of training data without much degrading the performance of the automatic speech recognition (ASR).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Experimental analysis of optimal window length for independent low-rank matrix analysis.\n \n \n \n \n\n\n \n Kitamura, D.; Ono, N.; and Saruwatari, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1170-1174, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ExperimentalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081392,\n  author = {D. Kitamura and N. Ono and H. Saruwatari},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Experimental analysis of optimal window length for independent low-rank matrix analysis},\n  year = {2017},\n  pages = {1170-1174},\n  abstract = {In this paper, we address the blind source separation (BSS) problem and analyze the optimal window length in the short-time Fourier transform (STFT) for independent low-rank matrix analysis (ILRMA). ILRMA is a state-of-the-art BSS technique that utilizes the statistical independence between low-rank matrix spectrogram models, which are estimated by nonnegative matrix factorization. In conventional frequency-domain BSS, the modeling error of a mixing system increases when the window length is too short, and the accuracy of statistical estimation decreases when the window length is too long. Therefore, the optimal window length is determined by both the reverberation time and the number of time frames. However, unlike classical BSS methods such as ICA and IVA, ILRMA enables the full modeling of spectrograms, which may improve the robustness to a decrease in the number of frames in a longer-window case. To confirm this hypothesis, the optimal window length for ILRMA is experimentally investigated, and the difference between the performances of ILRMA and conventional BSS is discussed.},\n  keywords = {blind source separation;Fourier transforms;frequency-domain analysis;matrix decomposition;reverberation;statistical analysis;ILRMA;low-rank matrix spectrogram models;nonnegative matrix factorization;conventional frequency-domain BSS;blind source separation problem;BSS technique;optimal window length experimental analysis;independent low-rank matrix analysis;short-time Fourier transform;statistical independence;mixing system;error modelling;statistical estimation;reverberation time;time frames;Spectrogram;Multiple signal classification;Speech;Frequency-domain analysis;Estimation;Matrix decomposition;Frequency estimation},\n  doi = {10.23919/EUSIPCO.2017.8081392},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347089.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we address the blind source separation (BSS) problem and analyze the optimal window length in the short-time Fourier transform (STFT) for independent low-rank matrix analysis (ILRMA). ILRMA is a state-of-the-art BSS technique that utilizes the statistical independence between low-rank matrix spectrogram models, which are estimated by nonnegative matrix factorization. In conventional frequency-domain BSS, the modeling error of a mixing system increases when the window length is too short, and the accuracy of statistical estimation decreases when the window length is too long. Therefore, the optimal window length is determined by both the reverberation time and the number of time frames. However, unlike classical BSS methods such as ICA and IVA, ILRMA enables the full modeling of spectrograms, which may improve the robustness to a decrease in the number of frames in a longer-window case. To confirm this hypothesis, the optimal window length for ILRMA is experimentally investigated, and the difference between the performances of ILRMA and conventional BSS is discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Initializing probabilistic linear discriminant analysis.\n \n \n \n \n\n\n \n Moschoglou, S.; Nicolaou, M.; Panagakis, Y.; and Zafeiriou, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1175-1179, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"InitializingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081393,\n  author = {S. Moschoglou and M. Nicolaou and Y. Panagakis and S. Zafeiriou},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Initializing probabilistic linear discriminant analysis},\n  year = {2017},\n  pages = {1175-1179},\n  abstract = {Component Analysis (CA) consists of a set of statistical techniques that decompose data to appropriate latent components that are relevant to the task-at-hand (e.g., clustering, segmentation, classification, alignment). During the past few years, an explosion of research in probabilistic CA has been witnessed, with the introduction of several novel methods (e.g., Probabilistic Principal Component Analysis, Probabilistic Linear Discriminant Analysis (PLDA), Probabilistic Canonical Correlation Analysis). PLDA constitutes one of the most widely used supervised CA techniques which is utilized in order to extract suitable, distinct subspaces by exploiting the knowledge of data annotated in terms of different labels. Nevertheless, an inherent difficulty in PLDA variants is the proper initialization of the parameters in order to avoid ending up in poor local maxima. In this light, we propose a novel method to initialize the parameters in PLDA in a consistent and robust way. The performance of the algorithm is demonstrated via a set of experiments on the modified XM2VTS database, which is provided by the authors of the original PLDA model.},\n  keywords = {principal component analysis;probability;probabilistic linear discriminant analysis;PLDA;statistical techniques;probabilistic principal component analysis;probabilistic canonical correlation analysis;supervised CA techniques;modified XM2VTS database;Probabilistic logic;Signal processing algorithms;Training;Principal component analysis;Signal processing;Lighting;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081393},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347452.pdf},\n}\n\n
\n
\n\n\n
\n Component Analysis (CA) consists of a set of statistical techniques that decompose data to appropriate latent components that are relevant to the task-at-hand (e.g., clustering, segmentation, classification, alignment). During the past few years, an explosion of research in probabilistic CA has been witnessed, with the introduction of several novel methods (e.g., Probabilistic Principal Component Analysis, Probabilistic Linear Discriminant Analysis (PLDA), Probabilistic Canonical Correlation Analysis). PLDA constitutes one of the most widely used supervised CA techniques which is utilized in order to extract suitable, distinct subspaces by exploiting the knowledge of data annotated in terms of different labels. Nevertheless, an inherent difficulty in PLDA variants is the proper initialization of the parameters in order to avoid ending up in poor local maxima. In this light, we propose a novel method to initialize the parameters in PLDA in a consistent and robust way. The performance of the algorithm is demonstrated via a set of experiments on the modified XM2VTS database, which is provided by the authors of the original PLDA model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Complex representations for learning statistical shape priors.\n \n \n \n \n\n\n \n Papaioannou, A.; Antonakos, E.; and Zafeiriou, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1180-1184, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ComplexPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081394,\n  author = {A. Papaioannou and E. Antonakos and S. Zafeiriou},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Complex representations for learning statistical shape priors},\n  year = {2017},\n  pages = {1180-1184},\n  abstract = {Parametrisation of the shape of deformable objects is of paramount importance in many computer vision applications. Many state-of-the-art statistical deformable models perform landmark localisation via optimising an objective function over a certain parametrisation of the object's shape. Arguably, the most popular way is by employing statistical techniques. The points of shape samples of an object lie in a 2D lattice and they are normally represented by concatenating the 2D coordinates into a vector. As the 2D coordinates can be naturally represented as a complex number, in this paper we study statistical complex number representations of an object's shape. In particular, we show that the real representation provides a similar statistical prior as the widely linear complex model, while the circular complex representation results in a much more condensed encoding.},\n  keywords = {computer vision;image representation;learning (artificial intelligence);statistical analysis;complex representations;statistical shape priors;parametrisation;deformable objects;computer vision applications;statistical deformable models;landmark localisation;objective function;statistical techniques;shape samples;statistical complex number representations;widely linear complex model;circular complex representation results;Shape;Principal component analysis;Covariance matrices;Two dimensional displays;Training;Signal processing;Computer vision},\n  doi = {10.23919/EUSIPCO.2017.8081394},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347043.pdf},\n}\n\n
\n
\n\n\n
\n Parametrisation of the shape of deformable objects is of paramount importance in many computer vision applications. Many state-of-the-art statistical deformable models perform landmark localisation via optimising an objective function over a certain parametrisation of the object's shape. Arguably, the most popular way is by employing statistical techniques. The points of shape samples of an object lie in a 2D lattice and they are normally represented by concatenating the 2D coordinates into a vector. As the 2D coordinates can be naturally represented as a complex number, in this paper we study statistical complex number representations of an object's shape. In particular, we show that the real representation provides a similar statistical prior as the widely linear complex model, while the circular complex representation results in a much more condensed encoding.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust low-rank tensor modelling using Tucker and CP decomposition.\n \n \n \n \n\n\n \n Xue, N.; Papamakarios, G.; Bahri, M.; Panagakis, Y.; and Zafeiriou, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1185-1189, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081395,\n  author = {N. Xue and G. Papamakarios and M. Bahri and Y. Panagakis and S. Zafeiriou},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust low-rank tensor modelling using Tucker and CP decomposition},\n  year = {2017},\n  pages = {1185-1189},\n  abstract = {A framework for reliable seperation of a low-rank subspace from grossly corrupted multi-dimensional signals is pivotal in modern signal processing applications. Current methods fall short of this separation either due to the radical simplification or the drastic transformation of data. This has motivated us to propose two new robust low-rank tensor models: Tensor Orthonormal Robust PCA (TORCPA) and Tensor Robust CP Decomposition (TRCPD). They seek Tucker and CP decomposition of a tensor respectively with lp norm regularisation. We compare our methods with state-of-the-art low-rank models on both synthetic and real-world data. Experimental results indicate that the proposed methods are faster and more accurate than the methods they compared to.},\n  keywords = {principal component analysis;signal processing;tensors;low-rank tensor modelling;Tucker;Tensor Robust CP Decomposition;Tensor Orthonormal Robust PCA;modern signal processing applications;grossly corrupted multidimensional signals;low-rank subspace;Tensile stress;Robustness;Signal processing algorithms;Signal processing;Brain modeling;Principal component analysis;Europe;Tensor Decomposition;Robust Principal Component Analysis;Tucker;CANDECOMP/PARAFAC},\n  doi = {10.23919/EUSIPCO.2017.8081395},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347625.pdf},\n}\n\n
\n
\n\n\n
\n A framework for reliable seperation of a low-rank subspace from grossly corrupted multi-dimensional signals is pivotal in modern signal processing applications. Current methods fall short of this separation either due to the radical simplification or the drastic transformation of data. This has motivated us to propose two new robust low-rank tensor models: Tensor Orthonormal Robust PCA (TORCPA) and Tensor Robust CP Decomposition (TRCPD). They seek Tucker and CP decomposition of a tensor respectively with lp norm regularisation. We compare our methods with state-of-the-art low-rank models on both synthetic and real-world data. Experimental results indicate that the proposed methods are faster and more accurate than the methods they compared to.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Lightweight two-stream convolutional face detection.\n \n \n \n \n\n\n \n Triantafyllidou, D.; Nousi, P.; and Tefas, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1190-1194, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LightweightPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081396,\n  author = {D. Triantafyllidou and P. Nousi and A. Tefas},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Lightweight two-stream convolutional face detection},\n  year = {2017},\n  pages = {1190-1194},\n  abstract = {Video capturing using Unmanned Aerial Vehicles provides cinematographers with impressive shots but requires very adept handling of both the drone and the camera. Deep Learning techniques can be utilized in this process to facilitate the video shooting process by allowing the drone to analyze its input and make intelligent decisions regarding its flight path. Fast and accurate on-board face detection for example can lead the drone towards capturing opportunistic shots, e.g., close ups of persons of importance. However, the constraints imposed by the drones' on-board processing power and memory prohibit the utilization of computationally expensive models. In this paper, we propose a lightweight two-stream fully Convolutional Neural Network for face detection, capable of detecting faces in various settings in real-time using the limited processing power Unmanned Aerial Vehicles possess.},\n  keywords = {autonomous aerial vehicles;convolution;face recognition;learning (artificial intelligence);mobile robots;neural nets;robot vision;video signal processing;Unmanned Aerial Vehicles;on-board face detection;Convolutional Neural Network;video shooting process;Deep Learning techniques;drone;video capturing;lightweight two-stream convolutional face detection;Face;Face detection;Convolution;Drones;Training;Computational modeling},\n  doi = {10.23919/EUSIPCO.2017.8081396},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347567.pdf},\n}\n\n
\n
\n\n\n
\n Video capturing using Unmanned Aerial Vehicles provides cinematographers with impressive shots but requires very adept handling of both the drone and the camera. Deep Learning techniques can be utilized in this process to facilitate the video shooting process by allowing the drone to analyze its input and make intelligent decisions regarding its flight path. Fast and accurate on-board face detection for example can lead the drone towards capturing opportunistic shots, e.g., close ups of persons of importance. However, the constraints imposed by the drones' on-board processing power and memory prohibit the utilization of computationally expensive models. In this paper, we propose a lightweight two-stream fully Convolutional Neural Network for face detection, capable of detecting faces in various settings in real-time using the limited processing power Unmanned Aerial Vehicles possess.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A robust ellipse fitting algorithm based on sparsity of outliers.\n \n \n \n \n\n\n \n Sobhani, E.; Sadeghi, M.; Babaie-Zadeh, M.; and Jutten, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1195-1199, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081397,\n  author = {E. Sobhani and M. Sadeghi and M. Babaie-Zadeh and C. Jutten},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A robust ellipse fitting algorithm based on sparsity of outliers},\n  year = {2017},\n  pages = {1195-1199},\n  abstract = {Ellipse fitting is widely used in computer vision and pattern recognition algorithms such as object segmentation and pupil/eye tracking. Generally, ellipse fitting is finding the best ellipse parameters that can be fitted on a set of data points, which are usually noisy and contain outliers. The algorithms of fitting the best ellipse should be both suitable for real-time applications and robust against noise and outliers. In this paper, we introduce a new method of ellipse fitting which is based on sparsity of outliers and robust Huber's data fitting measure. We will see that firstly this approach is theoretically better justified than a state-of-the-art ellipse fitting algorithm based on sparse representation. Secondly, simulation results show that it provides a better robustness against outliers compared to some previous ellipse fitting approaches, while being even faster.},\n  keywords = {computer vision;curve fitting;image representation;image segmentation;pattern recognition;sparse representation;robust Huber data fitting measure;real-time applications;pupil-eye tracking;outliers sparsity;previous ellipse fitting approaches;state-of-the-art ellipse;ellipse parameters;object segmentation;pattern recognition algorithms;computer vision;robust ellipse fitting algorithm;Robustness;Minimization;Gaussian noise;Signal processing algorithms;Mathematical model;Dictionaries;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081397},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346840.pdf},\n}\n\n
\n
\n\n\n
\n Ellipse fitting is widely used in computer vision and pattern recognition algorithms such as object segmentation and pupil/eye tracking. Generally, ellipse fitting is finding the best ellipse parameters that can be fitted on a set of data points, which are usually noisy and contain outliers. The algorithms of fitting the best ellipse should be both suitable for real-time applications and robust against noise and outliers. In this paper, we introduce a new method of ellipse fitting which is based on sparsity of outliers and robust Huber's data fitting measure. We will see that firstly this approach is theoretically better justified than a state-of-the-art ellipse fitting algorithm based on sparse representation. Secondly, simulation results show that it provides a better robustness against outliers compared to some previous ellipse fitting approaches, while being even faster.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Constrained subspace estimation via convex optimization.\n \n \n \n \n\n\n \n Santamaria, I.; Via, J.; Kirby, M.; Marrinan, T.; Peterson, C.; and Scharf, L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1200-1204, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ConstrainedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081398,\n  author = {I. Santamaria and J. Via and M. Kirby and T. Marrinan and C. Peterson and L. Scharf},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Constrained subspace estimation via convex optimization},\n  year = {2017},\n  pages = {1200-1204},\n  abstract = {Given a collection of M experimentally measured subspaces, and a model-based subspace, this paper addresses the problem of finding a subspace that approximates the collection, under the constraint that it intersects the model-based subspace in a predetermined number of dimensions. This constrained subspace estimation (CSE) problem arises in applications such as beamforming, where the model-based subspace encodes prior information about the direction-of-arrival of some sources impinging on the array. In this paper, we formulate the constrained subspace estimation (CSE) problem, and present an approximation based on a semidefinite relaxation (SDR) of this non-convex problem. The performance of the proposed CSE algorithm is demonstrated via numerical simulation, and its application to beamforming is also discussed.},\n  keywords = {array signal processing;convex programming;optimisation;constrained subspace estimation problem;nonconvex problem;convex optimization;model based subspace;beamforming application;direction-of-arrival estimation;approximation based on a semidefinite relaxation;Estimation;Array signal processing;Signal to noise ratio;Manifolds;Europe;Signal processing algorithms;Subspace averaging;Grassmann manifold;convex optimization;semidefinite relaxation},\n  doi = {10.23919/EUSIPCO.2017.8081398},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346779.pdf},\n}\n\n
\n
\n\n\n
\n Given a collection of M experimentally measured subspaces, and a model-based subspace, this paper addresses the problem of finding a subspace that approximates the collection, under the constraint that it intersects the model-based subspace in a predetermined number of dimensions. This constrained subspace estimation (CSE) problem arises in applications such as beamforming, where the model-based subspace encodes prior information about the direction-of-arrival of some sources impinging on the array. In this paper, we formulate the constrained subspace estimation (CSE) problem, and present an approximation based on a semidefinite relaxation (SDR) of this non-convex problem. The performance of the proposed CSE algorithm is demonstrated via numerical simulation, and its application to beamforming is also discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analog transmit signal optimization for undersampled delay-Doppler estimation.\n \n \n \n \n\n\n \n Lenz, A.; Stein, M. S.; and Swindlehurst, A. L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1205-1209, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnalogPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081399,\n  author = {A. Lenz and M. S. Stein and A. L. Swindlehurst},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Analog transmit signal optimization for undersampled delay-Doppler estimation},\n  year = {2017},\n  pages = {1205-1209},\n  abstract = {In this work, the optimization of the analog transmit waveform for joint delay-Doppler estimation under sub-Nyquist conditions is considered. Based on the Bayesian Cramer-Rao lower bound (BCRLB), we derive an estimation theoretic design rule for the Fourier coefficients of the analog transmit signal when violating the sampling theorem at the receiver through a wide analog pre-filtering bandwidth. For a wireless delay-Doppler channel, we obtain a system optimization problem which can be solved in compact form by using an Eigenvalue decomposition. The presented approach enables one to explore the Pareto region spanned by the optimized analog waveforms. Furthermore, we demonstrate how the framework can be used to reduce the sampling rate at the receiver while maintaining high estimation accuracy. Finally, we verify the practical impact by Monte-Carlo simulations of a channel estimation algorithm.},\n  keywords = {Bayes methods;channel estimation;Doppler radar;eigenvalues and eigenfunctions;filtering theory;Fourier transforms;Monte Carlo methods;Pareto optimisation;radar receivers;radar signal processing;signal sampling;synchronisation;analog transmit signal optimization;undersampled delay-Doppler estimation;analog transmit waveform;Bayesian Cramer-Rao lower bound;estimation theoretic design rule;Fourier coefficients;wireless delay-Doppler channel;system optimization problem;channel estimation algorithm;subNyquist conditions;analog prefiltering bandwidth;eigenvalue decomposition;Pareto region;Estimation;Optimization;Bandwidth;Receivers;Signal processing;Europe;Channel estimation;Bayesian Cramer-Rao lower bound;compressive sensing;delay-Doppler estimation;signal optimization;sub-Nyquist sampling;waveform design},\n  doi = {10.23919/EUSIPCO.2017.8081399},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347482.pdf},\n}\n\n
\n
\n\n\n
\n In this work, the optimization of the analog transmit waveform for joint delay-Doppler estimation under sub-Nyquist conditions is considered. Based on the Bayesian Cramer-Rao lower bound (BCRLB), we derive an estimation theoretic design rule for the Fourier coefficients of the analog transmit signal when violating the sampling theorem at the receiver through a wide analog pre-filtering bandwidth. For a wireless delay-Doppler channel, we obtain a system optimization problem which can be solved in compact form by using an Eigenvalue decomposition. The presented approach enables one to explore the Pareto region spanned by the optimized analog waveforms. Furthermore, we demonstrate how the framework can be used to reduce the sampling rate at the receiver while maintaining high estimation accuracy. Finally, we verify the practical impact by Monte-Carlo simulations of a channel estimation algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust trajectory-based density estimation for geometric structure recovery.\n \n \n \n \n\n\n \n Richmond, T.; Lokare, N.; and Lobaton, E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1210-1204, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081400,\n  author = {T. Richmond and N. Lokare and E. Lobaton},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust trajectory-based density estimation for geometric structure recovery},\n  year = {2017},\n  pages = {1210-1204},\n  abstract = {We propose a method to both quickly and robustly extract geometric information from trajectory data. While point density may be of interest in some applications, trajectories provide different guarantees about our data such as path densities as opposed to location densities provided by points. We aim to utilize the concise nature of quadtrees in two dimensions to reduce run time complexity of counting trajectories in a neighborhood. We compare the accuracy of our methodology to a common current practice for subsampling a structure. Our results show that the proposed method is able to capture the geometric structure. We find an improvement in performance over the current practice in that our method is able to extract only the salient data and ignore trajectory outliers.},\n  keywords = {computational complexity;computational geometry;data mining;feature extraction;quadtrees;geometric structure recovery;trajectory data;point density;trajectory-based density estimation;geometric information extraction;quadtrees;run time complexity reduction;salient data extraction;Trajectory;Activity recognition;Three-dimensional displays;Europe;Signal processing;Estimation;Tools;Trajectory counting;density estimation;landmark selection;quadtree},\n  doi = {10.23919/EUSIPCO.2017.8081400},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347674.pdf},\n}\n\n
\n
\n\n\n
\n We propose a method to both quickly and robustly extract geometric information from trajectory data. While point density may be of interest in some applications, trajectories provide different guarantees about our data such as path densities as opposed to location densities provided by points. We aim to utilize the concise nature of quadtrees in two dimensions to reduce run time complexity of counting trajectories in a neighborhood. We compare the accuracy of our methodology to a common current practice for subsampling a structure. Our results show that the proposed method is able to capture the geometric structure. We find an improvement in performance over the current practice in that our method is able to extract only the salient data and ignore trajectory outliers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Crowdsource-based signal strength field estimation by Gaussian processes.\n \n \n \n \n\n\n \n Santos, I.; and Djurić, P. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1215-1219, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Crowdsource-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081401,\n  author = {I. Santos and P. M. Djurić},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Crowdsource-based signal strength field estimation by Gaussian processes},\n  year = {2017},\n  pages = {1215-1219},\n  abstract = {We address the problem of estimating a spatial field of signal strength from measurements of low accuracy. The measurements are obtained by users whose locations are inaccurately estimated. The spatial field is defined on a grid of nodes with known locations. The users report their locations and received signal strength to a central unit where all the measurements are processed. After the processing of the measurements, the estimated spatial field of signal strength is updated. We use a propagation model of the signal that includes an unknown path loss exponent. Furthermore, our model takes into account the inaccurate locations of the reporting users. In this paper, we employ a Bayesian approach for crowdsourcing that is based on Gaussian Processes. Unlike methods that provide only point estimates, with this approach we get the complete joint distribution of the spatial field. We demonstrate the performance of our method and compare it with the performance of some other methods by computer simulations. The results show that our approach outperforms the other approaches.},\n  keywords = {Bayes methods;crowdsourcing;Gaussian processes;RSSI;Gaussian processes;crowdsourcing;crowdsource-based signal strength field estimation;received signal strength;Bayesian approach;Loss measurement;Estimation;Kernel;Europe;Gaussian processes;Monitoring;Transmitters;Sensor networks;Bayesian estimation;regression;spectrum sensing;Gaussian processes},\n  doi = {10.23919/EUSIPCO.2017.8081401},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347490.pdf},\n}\n\n
\n
\n\n\n
\n We address the problem of estimating a spatial field of signal strength from measurements of low accuracy. The measurements are obtained by users whose locations are inaccurately estimated. The spatial field is defined on a grid of nodes with known locations. The users report their locations and received signal strength to a central unit where all the measurements are processed. After the processing of the measurements, the estimated spatial field of signal strength is updated. We use a propagation model of the signal that includes an unknown path loss exponent. Furthermore, our model takes into account the inaccurate locations of the reporting users. In this paper, we employ a Bayesian approach for crowdsourcing that is based on Gaussian Processes. Unlike methods that provide only point estimates, with this approach we get the complete joint distribution of the spatial field. We demonstrate the performance of our method and compare it with the performance of some other methods by computer simulations. The results show that our approach outperforms the other approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A general non-smooth Hamiltonian Monte Carlo scheme using Bayesian proximity operator calculation.\n \n \n \n \n\n\n \n Chaari, L.; Tourneret, J.; and Batatia, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1220-1224, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081402,\n  author = {L. Chaari and J. Tourneret and H. Batatia},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A general non-smooth Hamiltonian Monte Carlo scheme using Bayesian proximity operator calculation},\n  year = {2017},\n  pages = {1220-1224},\n  abstract = {Sampling from multi-dimensional and complex distributions is still a challenging issue for the signal processing community. In this research area, Hamiltonian Monte Carlo (HMC) schemes have been proposed several years ago, using the target distribution geometry to perform efficient sampling. More recently, a non-smooth HMC (ns-HMC) scheme has been proposed to generalize HMC for distributions having non-smooth energy functions. This new scheme relies on the use of a proximity operator, which cannot be explicitly calculated for a large class of energy functions. We propose in this paper a fast and more general ns-HMC scheme that can be applied to any energy function by using a Bayesian calculation of the proximity operator, which makes the proposed scheme applicable to any energy function. Moreover, the proposed scheme relies on an interesting property of the proximity operator avoiding heavy calculations at each sampling step. The proposed scheme is tested on different sampling examples involving ℓp and total variation energy functions.},\n  keywords = {Bayes methods;mathematical operators;Monte Carlo methods;sampling methods;target distribution geometry;nonsmooth HMC scheme;nonsmooth energy functions;Bayesian calculation;total variation energy functions;nonsmooth Hamiltonian Monte Carlo scheme;Bayesian proximity operator calculation;Hamiltonian Monte Carlo schemes;Bayes methods;Monte Carlo methods;Heuristic algorithms;Signal processing algorithms;Signal processing;Europe;Standards;MCMC;HMC;ns-HMC;proximity operator},\n  doi = {10.23919/EUSIPCO.2017.8081402},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347444.pdf},\n}\n\n
\n
\n\n\n
\n Sampling from multi-dimensional and complex distributions is still a challenging issue for the signal processing community. In this research area, Hamiltonian Monte Carlo (HMC) schemes have been proposed several years ago, using the target distribution geometry to perform efficient sampling. More recently, a non-smooth HMC (ns-HMC) scheme has been proposed to generalize HMC for distributions having non-smooth energy functions. This new scheme relies on the use of a proximity operator, which cannot be explicitly calculated for a large class of energy functions. We propose in this paper a fast and more general ns-HMC scheme that can be applied to any energy function by using a Bayesian calculation of the proximity operator, which makes the proposed scheme applicable to any energy function. Moreover, the proposed scheme relies on an interesting property of the proximity operator avoiding heavy calculations at each sampling step. The proposed scheme is tested on different sampling examples involving ℓp and total variation energy functions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D exterior soundfield capture using pressure and gradient microphone array on 2D plane.\n \n \n \n \n\n\n \n Birnie, L.; Samarasinghe, P. N.; and Abhayapala, T. D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1225-1229, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081403,\n  author = {L. Birnie and P. N. Samarasinghe and T. D. Abhayapala},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {3D exterior soundfield capture using pressure and gradient microphone array on 2D plane},\n  year = {2017},\n  pages = {1225-1229},\n  abstract = {Two-dimensional (2D) planar array of first order microphones have been utilised in spherical harmonic decomposition based interior soundfield analysis. This paper proposes an efficient method in designing two-dimensional planar arrays of first order microphones that are capable of completely capturing three-dimensional (3D) spatial exterior soundfields. First order microphones are utilised within the array for measurements of pressure gradients, allowing the microphone array to capture soundfield components that conventional planar omni-directional microphone arrays are unable to detect. While spherical microphone arrays are capable of detecting all soundfield components, they have drawbacks in feasibility due to their 3D geometric configuration. The proposed planar array of first-order microphone provides the same functionality as a large spherical array which needs to encompass all sound sources while having a scalable geometry that lies purely in a two-dimensional horizontal plane. Simulations show the accuracy and feasibility of the proposed microphone array design in capturing a fully developed exterior soundfield.},\n  keywords = {decomposition;microphone arrays;pressure measurement;interior soundfield analysis;two-dimensional planar arrays;first order microphones;three-dimensional spatial exterior soundfields;spherical microphone arrays;first-order microphone;spherical array;two-dimensional horizontal plane;gradient microphone array;spherical harmonic decomposition;2D planar array;pressure microphone array;planar omnidirectional microphone arrays;3D spatial exterior soundfield;pressure gradient measurement;soundfield component detection;3D geometric configuration;sound sources;Harmonic analysis;Microphone arrays;Three-dimensional displays;Pressure measurement;Frequency measurement;Planar arrays},\n  doi = {10.23919/EUSIPCO.2017.8081403},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347524.pdf},\n}\n\n
\n
\n\n\n
\n Two-dimensional (2D) planar array of first order microphones have been utilised in spherical harmonic decomposition based interior soundfield analysis. This paper proposes an efficient method in designing two-dimensional planar arrays of first order microphones that are capable of completely capturing three-dimensional (3D) spatial exterior soundfields. First order microphones are utilised within the array for measurements of pressure gradients, allowing the microphone array to capture soundfield components that conventional planar omni-directional microphone arrays are unable to detect. While spherical microphone arrays are capable of detecting all soundfield components, they have drawbacks in feasibility due to their 3D geometric configuration. The proposed planar array of first-order microphone provides the same functionality as a large spherical array which needs to encompass all sound sources while having a scalable geometry that lies purely in a two-dimensional horizontal plane. Simulations show the accuracy and feasibility of the proposed microphone array design in capturing a fully developed exterior soundfield.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n TDOA estimation based on phase-voting cross correlation and circular standard deviation.\n \n \n \n \n\n\n \n Kato, M.; Senda, Y.; and Kondo, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1230-1234, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"TDOAPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081404,\n  author = {M. Kato and Y. Senda and R. Kondo},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {TDOA estimation based on phase-voting cross correlation and circular standard deviation},\n  year = {2017},\n  pages = {1230-1234},\n  abstract = {This paper proposes a new TDOA estimation based on phase-voting cross correlation and circular standard deviation. Based on phase delay and kernel function, the proposed method generates a probability density function (PDF) of TDOA for each frequency bin. TDOA estimate is determined by voting the PDFs generated for all frequency bins. Peak positions of the bin-wise PDFs for the target signal are concentrated only at the target time difference because peak positions for the noise totally differ among bins and periodicity of peaks depends on frequency. Therefore, by voting the PDFs for all frequency bins, the peak position for the target can be easily identified. The kernel width of PDF is determined by circular standard deviation of cross spectral phase for each frequency bin. This width control enhances peaks of PDFs for high SNR frequency bins since phases for high SNR bins are more stable than those for low ones. Evaluation with ship and drone sounds shows that the RMSE of TDOA estimation by the proposed method reaches 0.37 times that by GCC-PHAT.},\n  keywords = {correlation methods;delay estimation;direction-of-arrival estimation;probability;statistical analysis;time-of-arrival estimation;phase delay;kernel function;frequency bin;bin-wise PDFs;target time difference;circular standard deviation;cross spectral phase;high SNR frequency bins;TDOA estimation;phase-voting cross correlation;probability density function;kernel width;GCC-PHAT;Estimation;Kernel;Probability density function;Standards;Signal to noise ratio;Discrete Fourier transforms;Marine vehicles},\n  doi = {10.23919/EUSIPCO.2017.8081404},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345579.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a new TDOA estimation based on phase-voting cross correlation and circular standard deviation. Based on phase delay and kernel function, the proposed method generates a probability density function (PDF) of TDOA for each frequency bin. TDOA estimate is determined by voting the PDFs generated for all frequency bins. Peak positions of the bin-wise PDFs for the target signal are concentrated only at the target time difference because peak positions for the noise totally differ among bins and periodicity of peaks depends on frequency. Therefore, by voting the PDFs for all frequency bins, the peak position for the target can be easily identified. The kernel width of PDF is determined by circular standard deviation of cross spectral phase for each frequency bin. This width control enhances peaks of PDFs for high SNR frequency bins since phases for high SNR bins are more stable than those for low ones. Evaluation with ship and drone sounds shows that the RMSE of TDOA estimation by the proposed method reaches 0.37 times that by GCC-PHAT.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Successive relative transfer function identification using single microphone speech enhancement.\n \n \n \n \n\n\n \n Cherkassky, D.; Chazan, S. E.; Goldberger, J.; and Gannot, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1235-1239, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SuccessivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081405,\n  author = {D. Cherkassky and S. E. Chazan and J. Goldberger and S. Gannot},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Successive relative transfer function identification using single microphone speech enhancement},\n  year = {2017},\n  pages = {1235-1239},\n  abstract = {A distortionless speech extraction in a reverberant environment can be achieved by an application of a beamforming algorithm, provided that the relative transfer functions (RTFs) of the sources and the covariance matrix of the noise are known. In this contribution, we consider the RTF identification challenge in a multi-source scenario. We propose a successive RTF identification (SRI), based on a sole assumption that sources become successively active. The proposed algorithm identifies the RTF of the ith speech source assuming that the RTFs of all other sources in the environment and the power spectral density (PSD) matrix of the noise were previously estimated. The proposed RTF identification algorithm is based on the neural network Mix-Max (NN-MM) single microphone speech enhancement algorithm, followed by a least-squares (LS) system identification method. The proposed RTF estimation algorithm is validated by simulation.},\n  keywords = {array signal processing;microphones;neural nets;reverberation;speech enhancement;transfer functions;successive relative transfer function identification;distortionless speech extraction;reverberant environment;beamforming algorithm;relative transfer functions;covariance matrix;RTF identification challenge;multisource scenario;power spectral density matrix;RTF identification algorithm;neural network Mix-Max single microphone speech enhancement algorithm;least-squares system identification method;RTF estimation algorithm;speech source;Speech;Signal processing algorithms;Speech enhancement;Microphones;Array signal processing;Estimation;Artificial neural networks},\n  doi = {10.23919/EUSIPCO.2017.8081405},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346562.pdf},\n}\n\n
\n
\n\n\n
\n A distortionless speech extraction in a reverberant environment can be achieved by an application of a beamforming algorithm, provided that the relative transfer functions (RTFs) of the sources and the covariance matrix of the noise are known. In this contribution, we consider the RTF identification challenge in a multi-source scenario. We propose a successive RTF identification (SRI), based on a sole assumption that sources become successively active. The proposed algorithm identifies the RTF of the ith speech source assuming that the RTFs of all other sources in the environment and the power spectral density (PSD) matrix of the noise were previously estimated. The proposed RTF identification algorithm is based on the neural network Mix-Max (NN-MM) single microphone speech enhancement algorithm, followed by a least-squares (LS) system identification method. The proposed RTF estimation algorithm is validated by simulation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multiple DOA estimation based on estimation consistency and spherical harmonic multiple signal classification.\n \n \n \n \n\n\n \n Hafezi, S.; Moore, A. H.; and Naylor, P. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1240-1244, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MultiplePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081406,\n  author = {S. Hafezi and A. H. Moore and P. A. Naylor},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multiple DOA estimation based on estimation consistency and spherical harmonic multiple signal classification},\n  year = {2017},\n  pages = {1240-1244},\n  abstract = {A common approach to multiple Direction-of-Arrival (DOA) estimation of speech sources is to identify Time-Frequency (TF) bins with dominant Single Source (SS) and apply DOA estimation such as Multiple Signal Classification (MUSIC) only on those TF bins. In the state-of-the-art Direct Path Dominance (DPD)-MUSIC, the covariance matrix, used as the input to MUSIC, is calculated using only the TF bins over a local TF region where only a SS is dominant. In this work, we propose an alternative approach to MUSIC in which all the SS-dominant TF bins for each speaker across TF domain are globally used to improve the quality of covariance matrix for MUSIC. Our recently proposed Multi-Source Estimation Consistency (MSEC) technique, which exploits the consistency of initial DOA estimates within a time frame based on adaptive clustering, is used to estimate the SS-dominant TF bins for each speaker. The simulation using spherical microphone array shows that our proposed MSEC-MUSIC significantly outperforms the state-of-the-art DPD-MUSIC with less than 6.5° mean estimation error and strong robustness to widely varying source separation for up to 5 sources in the presence of realistic reverberation and sensor noise.},\n  keywords = {covariance matrices;direction-of-arrival estimation;signal classification;speech processing;time-frequency analysis;Multiple DOA estimation;spherical harmonic multiple signal classification;Direction-of-Arrival estimation;speech sources;Time-Frequency bins;dominant Single Source;covariance matrix;SS-dominant TF bins;TF domain;MSEC-MUSIC;estimation error;DPD-MUSIC;MultiSource Estimation Consistency technique;Direct Path Dominance;Direction-of-arrival estimation;Multiple signal classification;Covariance matrices;Estimation;Speech;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081406},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347437.pdf},\n}\n\n
\n
\n\n\n
\n A common approach to multiple Direction-of-Arrival (DOA) estimation of speech sources is to identify Time-Frequency (TF) bins with dominant Single Source (SS) and apply DOA estimation such as Multiple Signal Classification (MUSIC) only on those TF bins. In the state-of-the-art Direct Path Dominance (DPD)-MUSIC, the covariance matrix, used as the input to MUSIC, is calculated using only the TF bins over a local TF region where only a SS is dominant. In this work, we propose an alternative approach to MUSIC in which all the SS-dominant TF bins for each speaker across TF domain are globally used to improve the quality of covariance matrix for MUSIC. Our recently proposed Multi-Source Estimation Consistency (MSEC) technique, which exploits the consistency of initial DOA estimates within a time frame based on adaptive clustering, is used to estimate the SS-dominant TF bins for each speaker. The simulation using spherical microphone array shows that our proposed MSEC-MUSIC significantly outperforms the state-of-the-art DPD-MUSIC with less than 6.5° mean estimation error and strong robustness to widely varying source separation for up to 5 sources in the presence of realistic reverberation and sensor noise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Design and analysis of second-order steerable differential microphone arrays.\n \n \n \n \n\n\n \n Wu, X.; and Chen, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1245-1249, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DesignPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081407,\n  author = {X. Wu and H. Chen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Design and analysis of second-order steerable differential microphone arrays},\n  year = {2017},\n  pages = {1245-1249},\n  abstract = {Second-order differential microphone arrays (DMAs) are one of the most commonly used DMAs in practice due to the sensitivity of higher-order DMAs to microphone mis-matches and self-noise. However, conventional second-order DMAs are non-steerable with their mainlobe orientation fixed along the array end fire direction, which are not applicable to the case where sound sources may move around a large angular range. In this paper, we propose a design of second-order steerable DMAs (SOSDAs) using seven microphones. The design procedure is discussed, followed by the theoretical analysis on directivity factor and white noise gain of the proposed SOSDAs. Numerical examples are shown to demonstrate the effectiveness of the proposed design and its theoretical analysis.},\n  keywords = {array signal processing;microphone arrays;white noise;second-order steerable differential microphone arrays;higher-order DMA;microphone mismatches;self-noise;second-order steerable DMA;SOSDA;directivity factor;white noise gain;superdirective beamforming;Microphone arrays;Two dimensional displays;Array signal processing;Europe;White noise;Differential microphone array;steerable beamforming;superdirective beamforming},\n  doi = {10.23919/EUSIPCO.2017.8081407},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342356.pdf},\n}\n\n
\n
\n\n\n
\n Second-order differential microphone arrays (DMAs) are one of the most commonly used DMAs in practice due to the sensitivity of higher-order DMAs to microphone mis-matches and self-noise. However, conventional second-order DMAs are non-steerable with their mainlobe orientation fixed along the array end fire direction, which are not applicable to the case where sound sources may move around a large angular range. In this paper, we propose a design of second-order steerable DMAs (SOSDAs) using seven microphones. The design procedure is discussed, followed by the theoretical analysis on directivity factor and white noise gain of the proposed SOSDAs. Numerical examples are shown to demonstrate the effectiveness of the proposed design and its theoretical analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Zadoff-Chu coded ultrasonic signal for accurate range estimation.\n \n \n \n \n\n\n \n AlSharif, M. H.; Saad, M.; Siala, M.; Ballal, T.; Boujemaa, H.; and Al-Naffouri, T. Y.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1250-1254, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Zadoff-ChuPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081408,\n  author = {M. H. AlSharif and M. Saad and M. Siala and T. Ballal and H. Boujemaa and T. Y. Al-Naffouri},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Zadoff-Chu coded ultrasonic signal for accurate range estimation},\n  year = {2017},\n  pages = {1250-1254},\n  abstract = {This paper presents a new adaptation of Zadoff-Chu sequences for the purpose of range estimation and movement tracking. The proposed method uses Zadoff-Chu sequences utilizing a wideband ultrasonic signal to estimate the range between two devices with very high accuracy and high update rate. This range estimation method is based on time of flight (TOF) estimation using cyclic cross correlation. The system was experimentally evaluated under different noise levels and multi-user interference scenarios. For a single user, the results show less than 7 mm error for 90% of range estimates in a typical indoor environment. Under the interference from three other users, the 90% error was less than 25 mm. The system provides high estimation update rate allowing accurate tracking of objects moving with high speed.},\n  keywords = {correlation methods;radiofrequency interference;accurate range estimation;Zadoff-Chu sequences;movement tracking;wideband ultrasonic signal;range estimation method;flight estimation;cyclic cross correlation;multiuser interference scenarios;high estimation update rate;Estimation;Correlation;Acoustics;Distance measurement;Signal processing;Wideband;Interference},\n  doi = {10.23919/EUSIPCO.2017.8081408},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347827.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a new adaptation of Zadoff-Chu sequences for the purpose of range estimation and movement tracking. The proposed method uses Zadoff-Chu sequences utilizing a wideband ultrasonic signal to estimate the range between two devices with very high accuracy and high update rate. This range estimation method is based on time of flight (TOF) estimation using cyclic cross correlation. The system was experimentally evaluated under different noise levels and multi-user interference scenarios. For a single user, the results show less than 7 mm error for 90% of range estimates in a typical indoor environment. Under the interference from three other users, the 90% error was less than 25 mm. The system provides high estimation update rate allowing accurate tracking of objects moving with high speed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Time-difference of arrival model for spherical microphone arrays and application to direction of arrival estimation.\n \n \n \n \n\n\n \n Nikunen, J.; and Virtanen, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1255-1259, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Time-differencePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081409,\n  author = {J. Nikunen and T. Virtanen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Time-difference of arrival model for spherical microphone arrays and application to direction of arrival estimation},\n  year = {2017},\n  pages = {1255-1259},\n  abstract = {This paper investigates different steering techniques for spherical microphone arrays and proposes a time-difference of arrival (TDOA) model for microphones on surface of a rigid sphere. The model is based on geometric interpretation of wavefront incident angle and the extra distance the wavefront needs to travel to reach microphones on the opposite side of a sphere. We evaluate the proposed model by comparing analytic TDOAs to measured TDOAs extracted from impulse responses (IR) of a rigid sphere (r = 7.5cm). The proposed method achieves over 40% relative improvement in TDOA accuracy in comparison to free-field propagation and TDOAs extracted from analytic IRs of a spherical microphone array provide an additional 10% improvement. We test the proposed model for the application of source direction of arrival (DOA) estimation using steered response power (SRP) with real reverberant recordings of moving speech sources. All tested methods perform equally well in noise-free scenario, while the proposed model and simulated IRs improve over free-field assumption in low SNR conditions. The proposed model has the benefit of only using single delay for steering the array.},\n  keywords = {direction-of-arrival estimation;microphone arrays;reverberation;time-of-arrival estimation;transient response;spherical microphone array;rigid sphere;wavefront incident angle;TDOA accuracy;direction of arrival estimation;time-difference of arrival model;steering techniques;impulse responses;steered response power;reverberant recordings;moving speech sources;size 7.5 cm;Microphone arrays;Direction-of-arrival estimation;Surface waves;Estimation;Surface treatment},\n  doi = {10.23919/EUSIPCO.2017.8081409},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346912.pdf},\n}\n\n
\n
\n\n\n
\n This paper investigates different steering techniques for spherical microphone arrays and proposes a time-difference of arrival (TDOA) model for microphones on surface of a rigid sphere. The model is based on geometric interpretation of wavefront incident angle and the extra distance the wavefront needs to travel to reach microphones on the opposite side of a sphere. We evaluate the proposed model by comparing analytic TDOAs to measured TDOAs extracted from impulse responses (IR) of a rigid sphere (r = 7.5cm). The proposed method achieves over 40% relative improvement in TDOA accuracy in comparison to free-field propagation and TDOAs extracted from analytic IRs of a spherical microphone array provide an additional 10% improvement. We test the proposed model for the application of source direction of arrival (DOA) estimation using steered response power (SRP) with real reverberant recordings of moving speech sources. All tested methods perform equally well in noise-free scenario, while the proposed model and simulated IRs improve over free-field assumption in low SNR conditions. The proposed model has the benefit of only using single delay for steering the array.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ADMM-based audio reconstruction for low-cost-sound-monitoring.\n \n \n \n \n\n\n \n Ramaswami, S.; Kawaguchi, Y.; Takashima, R.; Endo, T.; and Togami, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1260-1264, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ADMM-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081410,\n  author = {S. Ramaswami and Y. Kawaguchi and R. Takashima and T. Endo and M. Togami},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {ADMM-based audio reconstruction for low-cost-sound-monitoring},\n  year = {2017},\n  pages = {1260-1264},\n  abstract = {For low-cost sound monitoring of machineries, we propose a novel audio reconstruction method superior in terms of accuracy and processing time. A conventional method based on the Orthogonal Matching Pursuit (OMP) has been proposed for audio recovery. However, the conventional method has a low performance for sounds of machineries because, sounds of machineries tend to be not highly sparse, and the reconstruction performance of OMP decreases extremely if the signal is not sufficiently sparse. To solve the problem of the conventional method, the proposed method is based on the Alternating Direction Method of Multipliers (ADMM) for Group Lasso combined with the Gabor dictionary. While OMP's performance decreases with the number of nonzero elements, the proposed method shows a better robustness to variations in sparsity and outputs a reasonable result in a few tens of iterations. Those features among others make the algorithm a reliable solution which offers a better trade-off between accuracy and processing time compared to the conventional method.},\n  keywords = {audio coding;audio signal processing;iterative methods;signal reconstruction;ADMM;low-cost-sound-monitoring;audio recovery;orthogonal matching pursuit;alternating direction method-of-multipliers;audio reconstruction method;OMP performance;Group Lasso;Gabor dictionary;machinery sound;Convex functions;Matching pursuit algorithms;Dictionaries;Image reconstruction;Compressed sensing;Signal processing algorithms;Monitoring;compressive sensing;alternating direction method of multipliers;sub-Nyquist sampling;coprime sampling;orthogonal matching pursuit},\n  doi = {10.23919/EUSIPCO.2017.8081410},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346998.pdf},\n}\n\n
\n
\n\n\n
\n For low-cost sound monitoring of machineries, we propose a novel audio reconstruction method superior in terms of accuracy and processing time. A conventional method based on the Orthogonal Matching Pursuit (OMP) has been proposed for audio recovery. However, the conventional method has a low performance for sounds of machineries because, sounds of machineries tend to be not highly sparse, and the reconstruction performance of OMP decreases extremely if the signal is not sufficiently sparse. To solve the problem of the conventional method, the proposed method is based on the Alternating Direction Method of Multipliers (ADMM) for Group Lasso combined with the Gabor dictionary. While OMP's performance decreases with the number of nonzero elements, the proposed method shows a better robustness to variations in sparsity and outputs a reasonable result in a few tens of iterations. Those features among others make the algorithm a reliable solution which offers a better trade-off between accuracy and processing time compared to the conventional method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n AUDASCITY: AUdio denoising by adaptive social CosparsITY.\n \n \n \n \n\n\n \n Gaultier, C.; Kitić, S.; Bertin, N.; and Gribonval, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1265-1269, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AUDASCITY:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081411,\n  author = {C. Gaultier and S. Kitić and N. Bertin and R. Gribonval},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {AUDASCITY: AUdio denoising by adaptive social CosparsITY},\n  year = {2017},\n  pages = {1265-1269},\n  abstract = {This work aims at introducing a new algorithm, AUDASCITY, and comparing its performance to the time-frequency block thresholding algorithm for the ill-posed problem of audio denoising. We propose a heuristics which combines time-frequency structure, cosparsity, and an adaptive scheme to denoise audio signals corrupted with white noise. We report that AUDASCITY outperforms state-of-the-art for each numerical comparison. While there is still room for some perceptual improvements, AUDASCITY's usefulness is shown when used as a front-end for a classification task.},\n  keywords = {audio signal processing;compressed sensing;signal denoising;time-frequency analysis;white noise;time-frequency structure;adaptive scheme;audio signals;audio denoising;AUDASCITY;time-frequency block thresholding algorithm;white noise;adaptive social cosparsity;Noise reduction;Time-frequency analysis;Signal to noise ratio;Signal processing algorithms;Computational modeling},\n  doi = {10.23919/EUSIPCO.2017.8081411},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347077.pdf},\n}\n\n
\n
\n\n\n
\n This work aims at introducing a new algorithm, AUDASCITY, and comparing its performance to the time-frequency block thresholding algorithm for the ill-posed problem of audio denoising. We propose a heuristics which combines time-frequency structure, cosparsity, and an adaptive scheme to denoise audio signals corrupted with white noise. We report that AUDASCITY outperforms state-of-the-art for each numerical comparison. While there is still room for some perceptual improvements, AUDASCITY's usefulness is shown when used as a front-end for a classification task.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A perceptually-weighted deep neural network for monaural speech enhancement in various background noise conditions.\n \n \n \n \n\n\n \n Liu, Q.; Wang, W.; Jackson, P. J. B.; and Tang, Y.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1270-1274, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081412,\n  author = {Q. Liu and W. Wang and P. J. B. Jackson and Y. Tang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A perceptually-weighted deep neural network for monaural speech enhancement in various background noise conditions},\n  year = {2017},\n  pages = {1270-1274},\n  abstract = {Deep neural networks (DNN) have recently been shown to give state-of-the-art performance in monaural speech enhancement. However in the DNN training process, the perceptual difference between different components of the DNN output is not fully exploited, where equal importance is often assumed. To address this limitation, we have proposed a new perceptually-weighted objective function within a feedforward DNN framework, aiming to minimize the perceptual difference between the enhanced speech and the target speech. A perceptual weight is integrated into the proposed objective function, and has been tested on two types of output features: spectra and ideal ratio masks. Objective evaluations for both speech quality and speech intelligibility have been performed. Integration of our perceptual weight shows consistent improvement on several noise levels and a variety of different noise types.},\n  keywords = {feedforward neural nets;speech enhancement;speech intelligibility;speech recognition;perceptually-weighted deep neural network;monaural speech enhancement;background noise conditions;DNN training process;perceptually-weighted objective function;feedforward DNN framework;speech quality;speech intelligibility;Speech;Training;Speech enhancement;Distortion;Linear programming;Psychoacoustic models},\n  doi = {10.23919/EUSIPCO.2017.8081412},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346979.pdf},\n}\n\n
\n
\n\n\n
\n Deep neural networks (DNN) have recently been shown to give state-of-the-art performance in monaural speech enhancement. However in the DNN training process, the perceptual difference between different components of the DNN output is not fully exploited, where equal importance is often assumed. To address this limitation, we have proposed a new perceptually-weighted objective function within a feedforward DNN framework, aiming to minimize the perceptual difference between the enhanced speech and the target speech. A perceptual weight is integrated into the proposed objective function, and has been tested on two types of output features: spectra and ideal ratio masks. Objective evaluations for both speech quality and speech intelligibility have been performed. Integration of our perceptual weight shows consistent improvement on several noise levels and a variety of different noise types.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low-complexity non-uniform penalized affine projection algorithms for active noise control.\n \n \n \n \n\n\n \n Albu, F.; Li, Y.; and Wang, Y.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1275-1279, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Low-complexityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081413,\n  author = {F. Albu and Y. Li and Y. Wang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Low-complexity non-uniform penalized affine projection algorithms for active noise control},\n  year = {2017},\n  pages = {1275-1279},\n  abstract = {This paper describes new algorithms that incorporates the non-uniform norm constraint into the zero-attracting and reweighted modified filtered-x affine projection or pseudo affine projection algorithms for active noise control. The simulations indicate that the proposed algorithms can obtain better performance for primary and secondary paths with various sparseness levels with insignificant numerical complexity increase. It is also shown that the version using a linear function instead of the reweighted term leads to the best results, particularly for combinations of sparse or semi-sparse primary and secondary paths.},\n  keywords = {active filters;active noise control;compressed sensing;signal denoising;active noise control;pseudo affine projection algorithms;reweighted modified filtered-x affine projection;zero-attracting affine projection;nonuniform penalized affine projection algorithms;Signal processing algorithms;Approximation algorithms;Convergence;Filtering algorithms;Complexity theory;Europe;Signal processing;active noise control;affine projection;zero-attracting algorithms;norm penalty},\n  doi = {10.23919/EUSIPCO.2017.8081413},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343953.pdf},\n}\n\n
\n
\n\n\n
\n This paper describes new algorithms that incorporates the non-uniform norm constraint into the zero-attracting and reweighted modified filtered-x affine projection or pseudo affine projection algorithms for active noise control. The simulations indicate that the proposed algorithms can obtain better performance for primary and secondary paths with various sparseness levels with insignificant numerical complexity increase. It is also shown that the version using a linear function instead of the reweighted term leads to the best results, particularly for combinations of sparse or semi-sparse primary and secondary paths.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Amplitude and phase dereverberation of monocomponent signals.\n \n \n \n \n\n\n \n Belhomme, A.; Badeau, R.; Grenier, Y.; and Humbert, E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1280-1284, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AmplitudePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081414,\n  author = {A. Belhomme and R. Badeau and Y. Grenier and E. Humbert},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Amplitude and phase dereverberation of monocomponent signals},\n  year = {2017},\n  pages = {1280-1284},\n  abstract = {While most dereverberation methods focus on how to estimate the amplitude of an anechoic signal, we propose a method which also takes the phase into account. By applying a sinusoidal model to the anechoic signal, we derive a formulation to compute the amplitude and phase of each sinusoid. These parameters are then estimated by our method in the reverberant case. As we jointly estimate the amplitude and phase of the clean signal, we achieve a very strong dereverberation, resulting in a significant improvement of objective dereverberation measures over the state-of-the-art.},\n  keywords = {acoustic signal processing;reverberation;anechoic signal;phase dereverberation;monocomponent signals;sinusoidal model;Time-frequency analysis;Reverberation;Spectrogram;Speech;Frequency estimation;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081414},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340502.pdf},\n}\n\n
\n
\n\n\n
\n While most dereverberation methods focus on how to estimate the amplitude of an anechoic signal, we propose a method which also takes the phase into account. By applying a sinusoidal model to the anechoic signal, we derive a formulation to compute the amplitude and phase of each sinusoid. These parameters are then estimated by our method in the reverberant case. As we jointly estimate the amplitude and phase of the clean signal, we achieve a very strong dereverberation, resulting in a significant improvement of objective dereverberation measures over the state-of-the-art.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analysis of the photic driving effect via joint EEG and MEG data processing based on the coupled CP decomposition.\n \n \n \n \n\n\n \n Naskovska, K.; Korobkov, A. A.; Haardt, M.; and Haueisen, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1285-1289, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnalysisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081415,\n  author = {K. Naskovska and A. A. Korobkov and M. Haardt and J. Haueisen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Analysis of the photic driving effect via joint EEG and MEG data processing based on the coupled CP decomposition},\n  year = {2017},\n  pages = {1285-1289},\n  abstract = {There are many combined signal processing applications such as the joint processing of EEG (Electroencephalogram) and MEG (Magnetoencephalogram) data that can benefit from coupled CP (Canonical Polyadic) tensor decompositions. The coupled CP decomposition jointly decomposes tensors that have at least one factor matrix in common. The C-SECSI (Coupled - Semi-Algebraic framework for approximate CP decomposition via Simultaneaous matrix diagonalization) framework provides a semi-algebraic solution for the coupled CP decomposition of noise corrupted low-rank tensors. The C-SECSI framework efficiently computes the factor matrices even in ill-posed scenarios with an adjustable complexity-accuracy trade-off. In this paper, we present a reliability test for the C-SECSI framework that can improve the model order estimation. Moreover, we analyse the photic driving effect from simultaneously recorded EEG and MEG data using the C-SECSI framework. The EEG and MEG data used in the analysis are obtained by stimulating volunteers with flickering light at different frequencies that are multiples of the individual alpha frequency of each volunteer.},\n  keywords = {electroencephalography;magnetoencephalography;medical signal processing;neurophysiology;tensors;joint processing;coupled CP tensor decompositions;C-SECSI framework;photic driving effect;combined signal processing applications;MEG data;EEG data;electroencephalogram;magnetoencephalogram;canonical polyadic;coupled-semialgebraic framework;noise corrupted low-rank tensors;ill-posed scenarios;reliability test;complexity-accuracy trade-off;flickering light;alpha frequency;Tensile stress;Electroencephalography;Matrix decomposition;Reliability;Resonant frequency;Time-frequency analysis;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081415},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570344349.pdf},\n}\n\n
\n
\n\n\n
\n There are many combined signal processing applications such as the joint processing of EEG (Electroencephalogram) and MEG (Magnetoencephalogram) data that can benefit from coupled CP (Canonical Polyadic) tensor decompositions. The coupled CP decomposition jointly decomposes tensors that have at least one factor matrix in common. The C-SECSI (Coupled - Semi-Algebraic framework for approximate CP decomposition via Simultaneaous matrix diagonalization) framework provides a semi-algebraic solution for the coupled CP decomposition of noise corrupted low-rank tensors. The C-SECSI framework efficiently computes the factor matrices even in ill-posed scenarios with an adjustable complexity-accuracy trade-off. In this paper, we present a reliability test for the C-SECSI framework that can improve the model order estimation. Moreover, we analyse the photic driving effect from simultaneously recorded EEG and MEG data using the C-SECSI framework. The EEG and MEG data used in the analysis are obtained by stimulating volunteers with flickering light at different frequencies that are multiples of the individual alpha frequency of each volunteer.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Beat-to-beat ECG features for time resolution improvements in stress detection.\n \n \n \n \n\n\n \n Axman, D.; Paiva, J. S.; de La Torre , F.; and Cunha, J. P. S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1290-1294, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Beat-to-beatPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081416,\n  author = {D. Axman and J. S. Paiva and F. {de La Torre} and J. P. S. Cunha},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Beat-to-beat ECG features for time resolution improvements in stress detection},\n  year = {2017},\n  pages = {1290-1294},\n  abstract = {In stress sensing, Window-derived Heart Rate Variability (W-HRV) methods are by far the most heavily used feature extraction methods. However, these W-HRV methods come with a variety of tradeoffs that motivate the development of alternative methods in stress sensing. We compare our method of using HeartBeat Morphology (HBM) features for stress sensing to the traditional W-HRV method for feature extraction. In order to adequately evaluate these methods we conduct a Trier Social Stress Test (TSST) to elicit stress in a group of 13 firefighters while recording their ECG, actigraphy, and psychological self-assessment measures. We utilize the data from this experiment to analyze both feature extraction methods in terms of computational complexity, detection resolution performance, and event localization performance. Our results show that each method has an ideal niche for its use in stress sensing. HBM features tend to be more effective in an online, stress detection context. W-HRV shows to be more suitable for offline post processing to determine the exact localization of the stress event.},\n  keywords = {electrocardiography;feature extraction;medical signal detection;medical signal processing;time resolution improvements;stress sensing;feature extraction;W-HRV methods;detection resolution performance;HBM features;stress detection context;Trier social stress test;beat-to-beat ECG features;window-derived heart rate variability;heartbeat morphology features;actigraphy;psychological self-assessment measures;event localization performance;offline post processing;stress detection;Feature extraction;Stress;Electrocardiography;Heart beat;Protocols},\n  doi = {10.23919/EUSIPCO.2017.8081416},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342777.pdf},\n}\n\n
\n
\n\n\n
\n In stress sensing, Window-derived Heart Rate Variability (W-HRV) methods are by far the most heavily used feature extraction methods. However, these W-HRV methods come with a variety of tradeoffs that motivate the development of alternative methods in stress sensing. We compare our method of using HeartBeat Morphology (HBM) features for stress sensing to the traditional W-HRV method for feature extraction. In order to adequately evaluate these methods we conduct a Trier Social Stress Test (TSST) to elicit stress in a group of 13 firefighters while recording their ECG, actigraphy, and psychological self-assessment measures. We utilize the data from this experiment to analyze both feature extraction methods in terms of computational complexity, detection resolution performance, and event localization performance. Our results show that each method has an ideal niche for its use in stress sensing. HBM features tend to be more effective in an online, stress detection context. W-HRV shows to be more suitable for offline post processing to determine the exact localization of the stress event.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Page-Hinkley based method for HFOs detection in epileptic depth-EEG.\n \n \n \n \n\n\n \n Jrad, N.; Kachenoura, A.; Nica, A.; Merlet, I.; and Wendling, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1295-1299, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081417,\n  author = {N. Jrad and A. Kachenoura and A. Nica and I. Merlet and F. Wendling},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A Page-Hinkley based method for HFOs detection in epileptic depth-EEG},\n  year = {2017},\n  pages = {1295-1299},\n  abstract = {Interictal High Frequency Oscillations, (HFOs [30600 Hz]), recorded from intracerebral electroencephalo-graphy (iEEG) in epileptic brain, showed to be potential biomarkers of epilepsy. Hence, their automatic detection has become a subject of high interest. So far, all detection algorithms consisted of comparing HFOs energy, computed in bands of interest, to a threshold. In this paper, a sequential technique was investigated. Detection was based on a variant of the Cumulative Sum (CUSUM) test, the so-called Page-Hinkley algorithm showing optimal results for detecting abrupt changes in the mean of a normal random signal. Experiments on simulated and real datasets showed the good performance of the method in terms of sensitivity and false detection rate. Compared to the classical thresholding, Page-Hinkley showed better performance.},\n  keywords = {bioelectric potentials;biomedical electrodes;electroencephalography;medical disorders;medical signal detection;medical signal processing;neurophysiology;signal reconstruction;spatiotemporal phenomena;Page-Hinkley based method;epileptic depth-EEG;epileptic brain;sequential technique;false detection rate;interictal high frequency oscillation detection;cumulative sum test;intracerebral electroencephalography;frequency 30600.0 Hz;Hafnium oxide;Signal processing algorithms;Europe;Signal processing;Sensitivity;Transient analysis;Oscillators;Abrupt change;Page-Hinkley algorithm;Cumulative Sum test;Gabor Transform;intracerebral electroencephalo-graphy;epilepsy;interictal High Frequency Oscillations},\n  doi = {10.23919/EUSIPCO.2017.8081417},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347540.pdf},\n}\n\n
\n
\n\n\n
\n Interictal High Frequency Oscillations, (HFOs [30600 Hz]), recorded from intracerebral electroencephalo-graphy (iEEG) in epileptic brain, showed to be potential biomarkers of epilepsy. Hence, their automatic detection has become a subject of high interest. So far, all detection algorithms consisted of comparing HFOs energy, computed in bands of interest, to a threshold. In this paper, a sequential technique was investigated. Detection was based on a variant of the Cumulative Sum (CUSUM) test, the so-called Page-Hinkley algorithm showing optimal results for detecting abrupt changes in the mean of a normal random signal. Experiments on simulated and real datasets showed the good performance of the method in terms of sensitivity and false detection rate. Compared to the classical thresholding, Page-Hinkley showed better performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Cyclostationary analysis of ECG signals acquired inside an ultra-high field MRI scanner.\n \n \n \n \n\n\n \n Haritopoulos, M.; Krug, J.; Illanes, A.; Friebe, M.; and Nandi, A. K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1300-1304, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CyclostationaryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081418,\n  author = {M. Haritopoulos and J. Krug and A. Illanes and M. Friebe and A. K. Nandi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Cyclostationary analysis of ECG signals acquired inside an ultra-high field MRI scanner},\n  year = {2017},\n  pages = {1300-1304},\n  abstract = {In this paper, a strategy is proposed to estimate the R-peaks in ECG signals recorded inside a 7 T magnetic resonance imaging (MRI) scanner in order to reduce the disturbances due to the magnetohydrodynamic (MHD) effect and to finally obtain high quality cardiovascular magnetic resonance (CMR) images. We first show that the cyclostationarity property of the ECG signal disturbed by the MHD effect can be quantified by means of cyclic spectral analysis. Then, this information is forwarded as input to a cyclostationary source extraction algorithm applied to a set of ECG recordings acquired inside the MRI scanner in a Feet first (Ff) and a Head first (Hf) positions. Finally, detection of the R-peaks in the estimated cyclostationary signal completes the proposed procedure. Validation of the method is performed by comparing the estimated with clinical R-peaks annotations provided with the real world dataset. The obtained results are promising and future research directions are discussed.},\n  keywords = {bioelectric potentials;biomedical MRI;cardiovascular system;electrocardiography;magnetohydrodynamics;medical image processing;spectral analysis;magnetohydrodynamic effect;high quality cardiovascular magnetic resonance images;cyclic spectral analysis;cyclostationary source extraction algorithm;magnetic resonance imaging scanner;ECG signal recordings;cyclostationary signal estimation;ultrahigh field MRI scanner;R-peak detection;Electrocardiography;Magnetohydrodynamics;Signal processing algorithms;Correlation;Lead;Coherence},\n  doi = {10.23919/EUSIPCO.2017.8081418},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347125.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a strategy is proposed to estimate the R-peaks in ECG signals recorded inside a 7 T magnetic resonance imaging (MRI) scanner in order to reduce the disturbances due to the magnetohydrodynamic (MHD) effect and to finally obtain high quality cardiovascular magnetic resonance (CMR) images. We first show that the cyclostationarity property of the ECG signal disturbed by the MHD effect can be quantified by means of cyclic spectral analysis. Then, this information is forwarded as input to a cyclostationary source extraction algorithm applied to a set of ECG recordings acquired inside the MRI scanner in a Feet first (Ff) and a Head first (Hf) positions. Finally, detection of the R-peaks in the estimated cyclostationary signal completes the proposed procedure. Validation of the method is performed by comparing the estimated with clinical R-peaks annotations provided with the real world dataset. The obtained results are promising and future research directions are discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic characterization of sleep need dissipation using a single hidden layer neural network.\n \n \n \n \n\n\n \n Garcia-Molina, G.; Baehr, K.; Steele, B.; Tsoneva, T.; Pfundtner, S.; Riedner, B.; White, D. P.; and Tononi, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1305-1308, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081419,\n  author = {G. Garcia-Molina and K. Baehr and B. Steele and T. Tsoneva and S. Pfundtner and B. Riedner and D. P. White and G. Tononi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic characterization of sleep need dissipation using a single hidden layer neural network},\n  year = {2017},\n  pages = {1305-1308},\n  abstract = {In the two process sleep model, the rate of sleep need dissipation is proportional to slow wave activity (SWA; EEG power in the 0.5 to 4 Hz band). The dynamics of sleep need dissipation are characterized by two parameters (the initial sleep need So and the decay rate γ) that can be calculated from SWA values in NREM sleep. The goal in this paper is to use a neural network classifier to automatically detect NREM sleep and estimate Ŝo and γ̂ using a single EEG signal that is captured during sleep at home. The data from twenty subjects (4 sleep nights per subject) was used in this research. The neural network architecture was optimized using as training and validation sets the EEG sleep data from a previous study. Given the nature of the model, only three stages were considered (NREM, REM, and WAKE). The classification accuracy characterized by the Kappa value achieved in this study dataset was 0.63 (substantial agreement with manual staging) and the specificity/sensitivity for NREM detection were 0.87 and 0.8 respectively. The higher specificity in NREM detection led to systematic So underestimation (i.e. So > Ŝo) and 7 overestimation (i.e. γ <; γ̂). However the variability of the, Ŝo and γ̂ across nights of the same subject is lower compared to the variability of Ŝ0 and γ̂ This shows that using automatic staging to characterize sleep need dissipation leads to capturing the most specific and less variable EEG segments that contribute to SWA. This is suitable to characterize sleep need outside sleep lab settings (e.g. at home) that cannot be controlled to the same extent as sleep lab studies.},\n  keywords = {electroencephalography;medical disorders;medical signal processing;neural nets;neurophysiology;signal classification;sleep;sleep lab settings;automatic characterization;single hidden layer neural network;neural network classifier;sleep need dissipation;sleep model;slow wave activity;EEG power;initial sleep need;decay rate;NREM sleep detection;classification accuracy;Kappa value;automatic staging;single EEG signal;EEG sleep data;Sleep;Electroencephalography;Neurons;Biological neural networks;Manuals;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081419},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340870.pdf},\n}\n\n
\n
\n\n\n
\n In the two process sleep model, the rate of sleep need dissipation is proportional to slow wave activity (SWA; EEG power in the 0.5 to 4 Hz band). The dynamics of sleep need dissipation are characterized by two parameters (the initial sleep need So and the decay rate γ) that can be calculated from SWA values in NREM sleep. The goal in this paper is to use a neural network classifier to automatically detect NREM sleep and estimate Ŝo and γ̂ using a single EEG signal that is captured during sleep at home. The data from twenty subjects (4 sleep nights per subject) was used in this research. The neural network architecture was optimized using as training and validation sets the EEG sleep data from a previous study. Given the nature of the model, only three stages were considered (NREM, REM, and WAKE). The classification accuracy characterized by the Kappa value achieved in this study dataset was 0.63 (substantial agreement with manual staging) and the specificity/sensitivity for NREM detection were 0.87 and 0.8 respectively. The higher specificity in NREM detection led to systematic So underestimation (i.e. So > Ŝo) and 7 overestimation (i.e. γ <; γ̂). However the variability of the, Ŝo and γ̂ across nights of the same subject is lower compared to the variability of Ŝ0 and γ̂ This shows that using automatic staging to characterize sleep need dissipation leads to capturing the most specific and less variable EEG segments that contribute to SWA. This is suitable to characterize sleep need outside sleep lab settings (e.g. at home) that cannot be controlled to the same extent as sleep lab studies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pathwise least angle regression and a significance test for the elastic net.\n \n \n \n \n\n\n \n Tabassum, M. N.; and Ollila, E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1309-1313, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PathwisePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081420,\n  author = {M. N. Tabassum and E. Ollila},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Pathwise least angle regression and a significance test for the elastic net},\n  year = {2017},\n  pages = {1309-1313},\n  abstract = {Least angle regression (LARS) by Efron et al. (2004) is a novel method for constructing the piece-wise linear path of Lasso solutions. For several years, it remained also as the de facto method for computing the Lasso solution before more sophisticated optimization algorithms preceded it. LARS method has recently again increased its popularity due to its ability to find the values of the penalty parameters, called knots, at which a new parameter enters the active set of non-zero coefficients. Significance test for the Lasso by Lockhart et al. (2014), for example, requires solving the knots via the LARS algorithm. Elastic net (EN), on the other hand, is a highly popular extension of Lasso that uses a linear combination of Lasso and ridge regression penalties. In this paper, we propose a new novel algorithm, called pathwise (PW-)LARS-EN, that is able to compute the EN knots over a grid of EN tuning parameter α values. The developed PW-LARS-EN algorithm decreases the EN tuning parameter and exploits the previously found knot values and the original LARS algorithm. A covariance test statistic for the Lasso is then generalized to the EN for testing the significance of the predictors. Our simulation studies validate the fact that the test statistic has an asymptotic Exp(1) distribution.},\n  keywords = {elasticity;exponential distribution;optimisation;regression analysis;testing;least angle regression;pathwiseLARS-EN;PW-LARS-EN algorithm;de facto method;asymptotic Exp(1) distribution;nonzero coefficients;penalty parameters;LARS method;Lasso solution;piece-wise linear path;elastic net;pathwise;covariance test statistic;knot values;parameter α values;EN knots;ridge regression penalties;linear combination;Signal processing algorithms;Prediction algorithms;Tuning;Signal processing;Europe;Testing;Predictive models},\n  doi = {10.23919/EUSIPCO.2017.8081420},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347287.pdf},\n}\n\n
\n
\n\n\n
\n Least angle regression (LARS) by Efron et al. (2004) is a novel method for constructing the piece-wise linear path of Lasso solutions. For several years, it remained also as the de facto method for computing the Lasso solution before more sophisticated optimization algorithms preceded it. LARS method has recently again increased its popularity due to its ability to find the values of the penalty parameters, called knots, at which a new parameter enters the active set of non-zero coefficients. Significance test for the Lasso by Lockhart et al. (2014), for example, requires solving the knots via the LARS algorithm. Elastic net (EN), on the other hand, is a highly popular extension of Lasso that uses a linear combination of Lasso and ridge regression penalties. In this paper, we propose a new novel algorithm, called pathwise (PW-)LARS-EN, that is able to compute the EN knots over a grid of EN tuning parameter α values. The developed PW-LARS-EN algorithm decreases the EN tuning parameter and exploits the previously found knot values and the original LARS algorithm. A covariance test statistic for the Lasso is then generalized to the EN for testing the significance of the predictors. Our simulation studies validate the fact that the test statistic has an asymptotic Exp(1) distribution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Compressive sensing and sparse antenna arrays for indoor 3-D microwave imaging.\n \n \n \n \n\n\n \n Scott, S.; and Wawrzynek, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1314-1318, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CompressivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081421,\n  author = {S. Scott and J. Wawrzynek},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Compressive sensing and sparse antenna arrays for indoor 3-D microwave imaging},\n  year = {2017},\n  pages = {1314-1318},\n  abstract = {A new 3-D microwave imaging technique, based on compressive sensing, is proposed for use with sparse antenna arrays. It was designed to enable cost-effective 3-D imaging and tracking of people in an indoor environment. This algorithm is able to image both sparse and cluttered environments, through the use of wavelet transforms and compressive sensing techniques. The main advantage of the proposed technique is that it enables the use of much sparser antenna arrays than is possible with the traditional range-migration algorithm, reducing the cost of microwave imaging systems. Experiments show that the compressive sensing algorithm produced high quality 3-D images using antenna arrays that are 90 to 96% sparse. This reduces the cost of the antenna array by a factor of 10 to 25, when compared to traditional dense arrays, without a loss in image resolution.},\n  keywords = {antenna arrays;array signal processing;compressed sensing;image resolution;indoor radio;microwave imaging;stereo image processing;sparse antenna arrays;sparse environments;cluttered environments;compressive sensing techniques;sparser antenna arrays;microwave imaging systems;compressive sensing algorithm;image resolution;high quality 3D images;indoor 3D microwave imaging;Microwave antenna arrays;Transmitting antennas;Receiving antennas;Antenna measurements;Microwave imaging;Microwave imaging;compressed sensing},\n  doi = {10.23919/EUSIPCO.2017.8081421},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342751.pdf},\n}\n\n
\n
\n\n\n
\n A new 3-D microwave imaging technique, based on compressive sensing, is proposed for use with sparse antenna arrays. It was designed to enable cost-effective 3-D imaging and tracking of people in an indoor environment. This algorithm is able to image both sparse and cluttered environments, through the use of wavelet transforms and compressive sensing techniques. The main advantage of the proposed technique is that it enables the use of much sparser antenna arrays than is possible with the traditional range-migration algorithm, reducing the cost of microwave imaging systems. Experiments show that the compressive sensing algorithm produced high quality 3-D images using antenna arrays that are 90 to 96% sparse. This reduces the cost of the antenna array by a factor of 10 to 25, when compared to traditional dense arrays, without a loss in image resolution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimal number of measurements for compressed sensing with quadratically decreasing SNR.\n \n \n \n \n\n\n \n Lu, Y.; Dai, W.; and Eldar, Y. C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1319-1323, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OptimalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081422,\n  author = {Y. Lu and W. Dai and Y. C. Eldar},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Optimal number of measurements for compressed sensing with quadratically decreasing SNR},\n  year = {2017},\n  pages = {1319-1323},\n  abstract = {In this paper, we consider a practical signal transmission application with fixed power budget such as radar/sonar. The system is modeled by a linear equation with the assumption that the signal energy per measurement decreases linearly and the noise energy per measurement increases approximately linearly with the increasing of the number of measurements. Thus the SNR decreases quadratically with the number of measurements. This model suggests an optimal operation point different from the common wisdom where more measurements always mean better performance. Our analysis shows that there is an optimal number of measurements, neither too few nor too many, to minimize the mean-squared error of the estimate. The analysis is based on a state evolution technique which is proposed for the approximate message passing algorithm. We consider the Gaussian, Bernoulli-Gaussian and least-favorite distributions in both real and complex domains. Numerical results justify the correctness of our analysis.},\n  keywords = {compressed sensing;Gaussian processes;mean square error methods;message passing;compressed sensing;SNR;practical signal transmission application;fixed power budget;linear equation;signal energy;noise energy;mean-squared error;approximate message passing algorithm;optimal operation point;state evolution technique;Bernoulli-Gaussian distributions;least-favorite distributions;Energy measurement;Noise measurement;Approximation algorithms;Algorithm design and analysis;Mathematical model;Message passing;Signal processing algorithms;Approximate message passing;compressed sensing;state evolution;signal recovery},\n  doi = {10.23919/EUSIPCO.2017.8081422},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347050.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we consider a practical signal transmission application with fixed power budget such as radar/sonar. The system is modeled by a linear equation with the assumption that the signal energy per measurement decreases linearly and the noise energy per measurement increases approximately linearly with the increasing of the number of measurements. Thus the SNR decreases quadratically with the number of measurements. This model suggests an optimal operation point different from the common wisdom where more measurements always mean better performance. Our analysis shows that there is an optimal number of measurements, neither too few nor too many, to minimize the mean-squared error of the estimate. The analysis is based on a state evolution technique which is proposed for the approximate message passing algorithm. We consider the Gaussian, Bernoulli-Gaussian and least-favorite distributions in both real and complex domains. Numerical results justify the correctness of our analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed compressive sensing: Performance analysis with diverse signal ensembles.\n \n \n \n \n\n\n \n Hsieh, S.; Liang, W.; Lu, C.; and Pei, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1324-1328, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081423,\n  author = {S. Hsieh and W. Liang and C. Lu and S. Pei},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed compressive sensing: Performance analysis with diverse signal ensembles},\n  year = {2017},\n  pages = {1324-1328},\n  abstract = {Distributed compressive sensing is a framework considering jointly sparsity within signal ensembles along with multiple measurement vectors (MMVs). The current theoretical bound of performance for MMVs, however, is derived to be the same with that for single MV (SMV) because the characteristics of signal ensembles are ignored. In this work, we introduce a new factor called {"}Euclidean distances between signals{"} for the performance analysis of a deterministic signal model under MMVs framework. We show that, by taking the size of signal ensembles into consideration, MMVs indeed exhibit better performance than SMV. Although our concept can be broadly applied to CS algorithms with MMVs, the case study conducted on a well-known greedy solver, called simultaneous orthogonal matching pursuit (SOMP), will be explored in this paper. We show that the performance of SOMP, when incorporated with our concept by modifying the steps of support detection and signal estimations, will be improved remarkably, especially when the Euclidean distances between signals are short. The performance of modified SOMP is verified to meet our theoretical prediction.},\n  keywords = {compressed sensing;estimation theory;greedy algorithms;iterative methods;signal detection;distributed compressive sensing;performance analysis;multiple measurement vectors;SMV;deterministic signal model;MMVs framework;Euclidean distances between signals;single MV;CS algorithms;simultaneous orthogonal matching pursuit;SOMP;greedy solver;signal estimations;signal detection;diverse signal ensemble characteristics;Performance analysis;Estimation;Sensors;Compressed sensing;Electronic mail;Analytical models;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081423},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341747.pdf},\n}\n\n
\n
\n\n\n
\n Distributed compressive sensing is a framework considering jointly sparsity within signal ensembles along with multiple measurement vectors (MMVs). The current theoretical bound of performance for MMVs, however, is derived to be the same with that for single MV (SMV) because the characteristics of signal ensembles are ignored. In this work, we introduce a new factor called \"Euclidean distances between signals\" for the performance analysis of a deterministic signal model under MMVs framework. We show that, by taking the size of signal ensembles into consideration, MMVs indeed exhibit better performance than SMV. Although our concept can be broadly applied to CS algorithms with MMVs, the case study conducted on a well-known greedy solver, called simultaneous orthogonal matching pursuit (SOMP), will be explored in this paper. We show that the performance of SOMP, when incorporated with our concept by modifying the steps of support detection and signal estimations, will be improved remarkably, especially when the Euclidean distances between signals are short. The performance of modified SOMP is verified to meet our theoretical prediction.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transform learning algorithm based on the probability of representation of signals.\n \n \n \n \n\n\n \n Parthasarathy, G.; and Abhilash, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1329-1333, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"TransformPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081424,\n  author = {G. Parthasarathy and G. Abhilash},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Transform learning algorithm based on the probability of representation of signals},\n  year = {2017},\n  pages = {1329-1333},\n  abstract = {Compressed sensing is a signal acquisition scheme that measures signals at sub-Nyquist rate amenable to sparse recovery, with high probability, from a reduced set of measurements. One of the main requirements of compressive sensing is the sparsity of the class of signals of interest in some basis. A method to construct a sparsifying basis for a class of signals using information theoretic measures is proposed in this paper. The algorithm constructs the sparsifying basis from a known non-sparsifying basis by concentrating the probability distribution of the basis in the representation of a class of signals. Simulation studies using speech and image signals confirm that the basis constructed using the proposed method results in an improved sparsity of the signals with thresholded coefficients but without degrading the signal quality.},\n  keywords = {compressed sensing;information theory;learning (artificial intelligence);probability;signal classification;signal detection;signal reconstruction;signal representation;compressed sensing;signal acquisition scheme;sub-Nyquist rate;compressive sensing;information theoretic measures;image signals;signal quality;transform learning algorithm;signal representation probability;sparse recovery;Transforms;Entropy;Signal processing algorithms;Training;Optimization;Dictionaries;Algorithm design and analysis;Transform learning;Sparse representation;Compressed sensing;Representation entropy;Sparse modeling},\n  doi = {10.23919/EUSIPCO.2017.8081424},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342755.pdf},\n}\n\n
\n
\n\n\n
\n Compressed sensing is a signal acquisition scheme that measures signals at sub-Nyquist rate amenable to sparse recovery, with high probability, from a reduced set of measurements. One of the main requirements of compressive sensing is the sparsity of the class of signals of interest in some basis. A method to construct a sparsifying basis for a class of signals using information theoretic measures is proposed in this paper. The algorithm constructs the sparsifying basis from a known non-sparsifying basis by concentrating the probability distribution of the basis in the representation of a class of signals. Simulation studies using speech and image signals confirm that the basis constructed using the proposed method results in an improved sparsity of the signals with thresholded coefficients but without degrading the signal quality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multiplierless unified architecture for mixed radix-2/3/4 FFTs.\n \n \n \n \n\n\n \n Qureshi, F.; Takala, J.; Volkova, A.; and Hilaire, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1334-1338, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MultiplierlessPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081425,\n  author = {F. Qureshi and J. Takala and A. Volkova and T. Hilaire},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multiplierless unified architecture for mixed radix-2/3/4 FFTs},\n  year = {2017},\n  pages = {1334-1338},\n  abstract = {This paper presents a novel runtime-reconfigurable, mixed radix core for computation 2-, 3-, 4- point fast Fourier transforms (FFT). The proposed architecture is based on radix-3 Wingorad Fourier transform, however multiplication is performed by constant multiplication instead of general multiplier. The complexity is equal to multiplierless 3-point FFT in terms of adders/subtractors with the exception of a few additional multiplexers. The proposed architecture supports all the FFT sizes which can be factorized into 2, 3, 4 point systems. We also show that the proposed architecture has the same bound on the accuracy as the classical one.},\n  keywords = {adders;digital arithmetic;fast Fourier transforms;constant multiplication;radix-3 Wingorad Fourier transform;mixed radix core;mixed radix-2;multiplierless unified architecture;FFT sizes;3-point FFT;Computer architecture;Adders;Multiplexing;Hardware;Quantization (signal);Signal processing algorithms;Fast Fourier transform (FFT);Memory-based FFT;Mixed radix},\n  doi = {10.23919/EUSIPCO.2017.8081425},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346780.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a novel runtime-reconfigurable, mixed radix core for computation 2-, 3-, 4- point fast Fourier transforms (FFT). The proposed architecture is based on radix-3 Wingorad Fourier transform, however multiplication is performed by constant multiplication instead of general multiplier. The complexity is equal to multiplierless 3-point FFT in terms of adders/subtractors with the exception of a few additional multiplexers. The proposed architecture supports all the FFT sizes which can be factorized into 2, 3, 4 point systems. We also show that the proposed architecture has the same bound on the accuracy as the classical one.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Buffer dimensioning for throughput improvement of dynamic dataflow signal processing applications on multi-core platforms.\n \n \n \n \n\n\n \n Michalska, M.; Bezati, E.; Casale-Brunet, S.; and Mattavelli, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1339-1343, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BufferPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081426,\n  author = {M. Michalska and E. Bezati and S. Casale-Brunet and M. Mattavelli},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Buffer dimensioning for throughput improvement of dynamic dataflow signal processing applications on multi-core platforms},\n  year = {2017},\n  pages = {1339-1343},\n  abstract = {Executing a dataflow program on a parallel platform requires assigning to each buffer a given size so that correct program executions take place without introducing any deadlock. Furthermore, in the case of dynamic dataflow programs, specific buffer size assignments lead to significant differences in the throughput, hence a more appropriate optimization problem is to specify the buffer sizes so that the throughput is maximized and the used resources are minimized. This paper introduces a new heuristic methodology for the buffer dimensioning of dynamic dataflow programs, which is considered as a stage of a more general design space exploration process.},\n  keywords = {data flow computing;multiprocessing systems;optimisation;dynamic dataflow programs;buffer dimensioning;throughput improvement;dynamic dataflow signal processing applications;multicore platforms;parallel platform;specific buffer size assignments;Throughput;Optimization;Space exploration;Signal processing;System recovery;Parallel processing;Minimization},\n  doi = {10.23919/EUSIPCO.2017.8081426},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347135.pdf},\n}\n\n
\n
\n\n\n
\n Executing a dataflow program on a parallel platform requires assigning to each buffer a given size so that correct program executions take place without introducing any deadlock. Furthermore, in the case of dynamic dataflow programs, specific buffer size assignments lead to significant differences in the throughput, hence a more appropriate optimization problem is to specify the buffer sizes so that the throughput is maximized and the used resources are minimized. This paper introduces a new heuristic methodology for the buffer dimensioning of dynamic dataflow programs, which is considered as a stage of a more general design space exploration process.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An embedded solution for multispectral palmprint recognition.\n \n \n \n \n\n\n \n Li, C.; Benezeth, Y.; Nakamura, K.; Gomez, R.; and Yang, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1344-1348, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081427,\n  author = {C. Li and Y. Benezeth and K. Nakamura and R. Gomez and F. Yang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An embedded solution for multispectral palmprint recognition},\n  year = {2017},\n  pages = {1344-1348},\n  abstract = {Palmprint based identification has attracted much attention in the past decades. In some real-life applications, portable personal authentication systems with high accuracy and speed efficiency are required. This paper presents an embedded palmprint recognition solution based on the multispectral image modality. We first develop an effective recognition algorithm by using partial least squares regression, then a FPGA prototype is implemented and optimized through high-level synthesis technique. The evaluation experiments demonstrate that the proposed system can achieve a higher recognition rate at a lower running cost comparing to the reference implementations.},\n  keywords = {embedded systems;field programmable gate arrays;high level synthesis;least squares approximations;palmprint recognition;regression analysis;multispectral palmprint recognition;palmprint based identification;FPGA prototype;high-level synthesis technique;partial least squares regression;multispectral image modality;embedded palmprint recognition solution;speed efficiency;portable personal authentication systems;Signal processing algorithms;Optimization;Algorithm design and analysis;Feature extraction;Databases;Pipelines},\n  doi = {10.23919/EUSIPCO.2017.8081427},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347242.pdf},\n}\n\n
\n
\n\n\n
\n Palmprint based identification has attracted much attention in the past decades. In some real-life applications, portable personal authentication systems with high accuracy and speed efficiency are required. This paper presents an embedded palmprint recognition solution based on the multispectral image modality. We first develop an effective recognition algorithm by using partial least squares regression, then a FPGA prototype is implemented and optimized through high-level synthesis technique. The evaluation experiments demonstrate that the proposed system can achieve a higher recognition rate at a lower running cost comparing to the reference implementations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Parametrisable digital design of a sphere decoder with high-level synthesis.\n \n \n \n \n\n\n \n Knoop, B.; Schwez, L.; Peters-Drolshagen, D.; and Paul, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1349-1353, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ParametrisablePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081428,\n  author = {B. Knoop and L. Schwez and D. Peters-Drolshagen and S. Paul},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Parametrisable digital design of a sphere decoder with high-level synthesis},\n  year = {2017},\n  pages = {1349-1353},\n  abstract = {A Sphere Decoder is a popular tree search algorithm for the solution of integer least squares minimisation problems. It has gained considerable attention for its application to maximum likelihood detection of digitally modulated signals in MIMO communication systems and can almost universally be applied to a plethora of problems with some modifications to the sphere constraint. This creates the need for a baseline digital hardware design of a configurable Sphere Decoder, which can be adjusted for various applications. This paper presents the implementation of a baseline Sphere Decoder with high-level synthesis (HLS) in connection with a data type-agnostic programming methodology, which makes it even more flexible.},\n  keywords = {decoding;high level synthesis;maximum likelihood detection;MIMO communication;tree searching;parametrisable digital design;high-level synthesis;maximum likelihood detection;digitally modulated signals;MIMO communication systems;baseline digital hardware design;baseline Sphere Decoder;tree search algorithm;integer least squares minimisation;Hardware;Signal processing algorithms;Maximum likelihood decoding;Search problems;Algorithm design and analysis;Complexity theory},\n  doi = {10.23919/EUSIPCO.2017.8081428},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347553.pdf},\n}\n\n
\n
\n\n\n
\n A Sphere Decoder is a popular tree search algorithm for the solution of integer least squares minimisation problems. It has gained considerable attention for its application to maximum likelihood detection of digitally modulated signals in MIMO communication systems and can almost universally be applied to a plethora of problems with some modifications to the sphere constraint. This creates the need for a baseline digital hardware design of a configurable Sphere Decoder, which can be adjusted for various applications. This paper presents the implementation of a baseline Sphere Decoder with high-level synthesis (HLS) in connection with a data type-agnostic programming methodology, which makes it even more flexible.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An approximate hardware check node for λ-min-based LDPC decoders.\n \n \n \n\n\n \n Perris-Samios, G.; and Paliouras, V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1354-1357, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081429,\n  author = {G. Perris-Samios and V. Paliouras},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An approximate hardware check node for λ-min-based LDPC decoders},\n  year = {2017},\n  pages = {1354-1357},\n  abstract = {In this paper, iterative decoding using Belief Propagation λ-min decoding algorithm is considered. In this algorithm check nodes use only the λ lowest-magnitude messages thus simplifying the hardware complexity and reducing memory usage. A parallel-input architecture is proposed for the check node. We focus on the determination of the sought minima in a parallel fashion. Novel simplified circuits for the derivation of the λ minimum values are introduced here. The main novelty that leads to substantial hardware simplification is the approximate derivation of the λ values. Specifically, We here show that using the introduced approximate computation substantial hardware savings are obtained with no significant degradation in decoding performance. For cases of practical interest the proposed solution is shown to reduce the number of comparisons per check node from 14 down to 7; i.e., 2 times.},\n  keywords = {iterative decoding;parity check codes;approximate hardware check node;algorithm check nodes;lowest-magnitude messages;hardware complexity;parallel-input architecture;substantial hardware simplification;decoding performance;sought minima determination;iterative decoding;approximate computation substantial hardware savings;belief propagation λ-min decoding algorithm;λ-min-based LDPC decoders;memory usage reduction;Iterative decoding;Approximation algorithms;Complexity theory;Hardware;Decoding;Manganese;Low density parity-check codes (LDPCc);Belief propagation decoding algorithm (BP);Belief propagation λ-min;λ-min approximation},\n  doi = {10.23919/EUSIPCO.2017.8081429},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this paper, iterative decoding using Belief Propagation λ-min decoding algorithm is considered. In this algorithm check nodes use only the λ lowest-magnitude messages thus simplifying the hardware complexity and reducing memory usage. A parallel-input architecture is proposed for the check node. We focus on the determination of the sought minima in a parallel fashion. Novel simplified circuits for the derivation of the λ minimum values are introduced here. The main novelty that leads to substantial hardware simplification is the approximate derivation of the λ values. Specifically, We here show that using the introduced approximate computation substantial hardware savings are obtained with no significant degradation in decoding performance. For cases of practical interest the proposed solution is shown to reduce the number of comparisons per check node from 14 down to 7; i.e., 2 times.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Smart signal interconnection by the use of a photosensitive polymer.\n \n \n \n \n\n\n \n Saito, M.; Hamazaki, T.; and Sakiyama, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1358-1361, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SmartPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081430,\n  author = {M. Saito and T. Hamazaki and K. Sakiyama},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Smart signal interconnection by the use of a photosensitive polymer},\n  year = {2017},\n  pages = {1358-1361},\n  abstract = {Photosensitivity of a dye-dispersed polymer was utilized for creating a self-controlled photonic interconnection. Polydimethylsiloxane that contained photochromic diarylethene changed its color depending on wavelengths of irradiated laser beams. Transmission characteristics of this polymer were examined by using laser pulses of 405 (violet), 450 (blue), or 532 nm (green) wavelength as photonic signals. When violet or green signal pulses (1 kHz or 1 kbps) were launched into this polymer, an optical path was formed in self-organized manner, and consequently, the output signal intensity increased as time passed. By contrast, the intensity of blue pulses decreased gradually, since they erased their optical path by themselves.},\n  keywords = {dyes;laser beams;light transmission;optical interconnections;optical polymers;photochromism;visible spectra;smart signal interconnection;photosensitive polymer;photonic interconnection;polydimethylsiloxane;irradiated laser beams;green signal pulses;photochromic diarylethene;light transmission characteristics;violet signal pulses;blue signal pulses;frequency 1.0 kHz;wavelength 532.0 nm;Optical pulses;Polymers;Optical fibers;Optical signal processing;Laser beams;Measurement by laser beam;optical interconnection;optical signal processing;self-control;photochromism;polymer},\n  doi = {10.23919/EUSIPCO.2017.8081430},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340042.pdf},\n}\n\n
\n
\n\n\n
\n Photosensitivity of a dye-dispersed polymer was utilized for creating a self-controlled photonic interconnection. Polydimethylsiloxane that contained photochromic diarylethene changed its color depending on wavelengths of irradiated laser beams. Transmission characteristics of this polymer were examined by using laser pulses of 405 (violet), 450 (blue), or 532 nm (green) wavelength as photonic signals. When violet or green signal pulses (1 kHz or 1 kbps) were launched into this polymer, an optical path was formed in self-organized manner, and consequently, the output signal intensity increased as time passed. By contrast, the intensity of blue pulses decreased gradually, since they erased their optical path by themselves.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Compact framework for reproducible analysis of finite wordlength effects in linear digital networks.\n \n \n \n \n\n\n \n Luengo, D.; Osés, D.; and Cruz-Roldán, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1362-1366, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CompactPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081431,\n  author = {D. Luengo and D. Osés and F. Cruz-Roldán},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Compact framework for reproducible analysis of finite wordlength effects in linear digital networks},\n  year = {2017},\n  pages = {1362-1366},\n  abstract = {The analysis of finite wordlength effects in linear digital networks requires specifying the exact order in which all the internal computations are performed, as well as the type and resolution of all the quantizers. Popular digital filter descriptions (difference equations, transfer function, state space representation, etc.) are unable to provide a bit-true description valid for any filter structure, thus preventing most of the results in the literature from being truly reproducible. Furthermore, the quantizers are often not properly described. In this work, we introduce a novel and compact framework to describe unambiguously any single-input single-output (SISO) linear digital network. The proposed approach is simple, efficient and guarantees the reproducibility of the results obtained for any network by allowing us to describe in detail the data flow as well as the quantization of all the coefficients and operations performed. An example of a third order Butterworth filter with four different implementations is provided to show the descriptive power and flexibility of the proposed approach.},\n  keywords = {Butterworth filters;digital filters;quantisation (signal);roundoff errors;finite wordlength effects;quantizers;difference equations;bit-true description;filter structure;single-input single-output linear digital network;quantization;reproducible analysis;third order Butterworth filter;digital filter descriptions;Quantization (signal);Delays;Europe;MATLAB;Iron;Lattices},\n  doi = {10.23919/EUSIPCO.2017.8081431},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347141.pdf},\n}\n\n
\n
\n\n\n
\n The analysis of finite wordlength effects in linear digital networks requires specifying the exact order in which all the internal computations are performed, as well as the type and resolution of all the quantizers. Popular digital filter descriptions (difference equations, transfer function, state space representation, etc.) are unable to provide a bit-true description valid for any filter structure, thus preventing most of the results in the literature from being truly reproducible. Furthermore, the quantizers are often not properly described. In this work, we introduce a novel and compact framework to describe unambiguously any single-input single-output (SISO) linear digital network. The proposed approach is simple, efficient and guarantees the reproducibility of the results obtained for any network by allowing us to describe in detail the data flow as well as the quantization of all the coefficients and operations performed. An example of a third order Butterworth filter with four different implementations is provided to show the descriptive power and flexibility of the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The design of a homotopy-based 1-D seismic FIR F-X wavefield extrapolation filters.\n \n \n \n \n\n\n \n Mousa, W. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1367-1370, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081432,\n  author = {W. A. Mousa},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {The design of a homotopy-based 1-D seismic FIR F-X wavefield extrapolation filters},\n  year = {2017},\n  pages = {1367-1370},\n  abstract = {This paper proposes a design of accurate non-causal complex-valued seismic FIR wavefield extrapolation digital filters using the homotopy based approach. The FIR filter design problem is solved using the scalar homotopy continuation method since the system of equations used to design the filters is over determined. Appropriate wavenumber responses were obtained for such an application but at the expense of a longer running design time compared to other existing methods (used to design FIR wavefield extrapolation digital filters) such as the WLSQ and the Li-norm methods. This design running time, however, resulted in better practical filters for this application when compared to that obtained using the WLSQ method and is of comparable result compared with the Li-norm method.},\n  keywords = {extrapolation;FIR filters;geophysical signal processing;seismic waves;seismology;WLSQ method;Li-norm method;seismic FIR wavefield extrapolation digital filters;scalar homotopy continuation method;1D seismic FIR F-X wavefield extrapolation filters;homotopy-based approach;Finite impulse response filters;Extrapolation;Algorithm design and analysis;Signal processing algorithms;Passband;Design methodology;Mathematical model},\n  doi = {10.23919/EUSIPCO.2017.8081432},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342004.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a design of accurate non-causal complex-valued seismic FIR wavefield extrapolation digital filters using the homotopy based approach. The FIR filter design problem is solved using the scalar homotopy continuation method since the system of equations used to design the filters is over determined. Appropriate wavenumber responses were obtained for such an application but at the expense of a longer running design time compared to other existing methods (used to design FIR wavefield extrapolation digital filters) such as the WLSQ and the Li-norm methods. This design running time, however, resulted in better practical filters for this application when compared to that obtained using the WLSQ method and is of comparable result compared with the Li-norm method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The design of seismic migration complex-valued finite impulse response filters.\n \n \n \n \n\n\n \n Mousa, W. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1371-1374, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081433,\n  author = {W. A. Mousa},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {The design of seismic migration complex-valued finite impulse response filters},\n  year = {2017},\n  pages = {1371-1374},\n  abstract = {This paper proposes a novel way to design seismic migration Finite Impulse Response (FIR) digital filters using the Newton minimization algorithm. The algorithm requires computing the inverse of the Jacobian matrix, which is non-square for the seismic migration filters problem. In this case, we suggest using the Moore-Penrose pseudo-inverse to obtain the inverse of the Jacobian matrix. The proposed design algorithm running time is about 8 times faster than the recently proposed Li-norm algorithm. Furthermore, the proposed method results in seismic migration filters that lead to practically stable seismic images.},\n  keywords = {FIR filters;geophysical techniques;iterative methods;Jacobian matrices;minimisation;Newton method;seismology;Newton minimization algorithm;Jacobian matrix;seismic migration filters problem;design algorithm running time;recently proposed Li-norm algorithm;practically stable seismic images;seismic migration complex;finite impulse response filters;seismic migration Finite Impulse Response digital filters;Li-norm algorithm;stable seismic images;Moore-Penrose pseudo-inverse;Finite impulse response filters;Algorithm design and analysis;Signal processing algorithms;Extrapolation;Imaging;Jacobian matrices;Geophysics},\n  doi = {10.23919/EUSIPCO.2017.8081433},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342007.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a novel way to design seismic migration Finite Impulse Response (FIR) digital filters using the Newton minimization algorithm. The algorithm requires computing the inverse of the Jacobian matrix, which is non-square for the seismic migration filters problem. In this case, we suggest using the Moore-Penrose pseudo-inverse to obtain the inverse of the Jacobian matrix. The proposed design algorithm running time is about 8 times faster than the recently proposed Li-norm algorithm. Furthermore, the proposed method results in seismic migration filters that lead to practically stable seismic images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Classification of partial discharge EMI conditions using permutation entropy-based features.\n \n \n \n \n\n\n \n Mitiche, I.; Morison, G.; Nesbitt, A.; Boreham, P.; and Stewart, B. G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1375-1379, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ClassificationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081434,\n  author = {I. Mitiche and G. Morison and A. Nesbitt and P. Boreham and B. G. Stewart},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Classification of partial discharge EMI conditions using permutation entropy-based features},\n  year = {2017},\n  pages = {1375-1379},\n  abstract = {In this paper we investigate the application of feature extraction and machine learning techniques to fault identification in power systems. Specifically we implement the novel application of Permutation Entropy-based measures known as Weighted Permutation and Dispersion Entropy to field Electro-Magnetic Interference (EMI) signals for classification of discharge sources, also called conditions, such as partial discharge, arcing and corona which arise from various assets of different power sites. This work introduces two main contributions: the application of entropy measures in condition monitoring and the classification of real field EMI captured signals. The two simple and low dimension features are fed to a Multi-Class Support Vector Machine for the classification of different discharge sources contained in the EMI signals. Classification was performed to distinguish between the conditions observed within each site and between all sites. Results demonstrate that the proposed approach separated and identified the discharge sources successfully.},\n  keywords = {condition monitoring;electromagnetic interference;fault diagnosis;feature extraction;learning (artificial intelligence);partial discharge measurement;power engineering computing;power system faults;power system measurement;signal classification;support vector machines;feature extraction;condition monitoring;field EMI captured signals;EMI signals;dispersion entropy;multiclass support vector machine;partial discharge EMI condition classification;permutation entropy-based features;machine learning techniques;power system fault identification;electro-magnetic interference signals;weighted permutation;permutation entropy-based measures;discharge sources classification;partial discharge;Partial discharges;Electromagnetic interference;Feature extraction;Discharges (electric);Fault location;Support vector machines},\n  doi = {10.23919/EUSIPCO.2017.8081434},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347468.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we investigate the application of feature extraction and machine learning techniques to fault identification in power systems. Specifically we implement the novel application of Permutation Entropy-based measures known as Weighted Permutation and Dispersion Entropy to field Electro-Magnetic Interference (EMI) signals for classification of discharge sources, also called conditions, such as partial discharge, arcing and corona which arise from various assets of different power sites. This work introduces two main contributions: the application of entropy measures in condition monitoring and the classification of real field EMI captured signals. The two simple and low dimension features are fed to a Multi-Class Support Vector Machine for the classification of different discharge sources contained in the EMI signals. Classification was performed to distinguish between the conditions observed within each site and between all sites. Results demonstrate that the proposed approach separated and identified the discharge sources successfully.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wind turbine gearbox vibration signal signature and fault development through time.\n \n \n \n \n\n\n \n Koukoura, S.; Carroll, J.; Weiss, S.; and McDonald, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1380-1384, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"WindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081435,\n  author = {S. Koukoura and J. Carroll and S. Weiss and A. McDonald},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Wind turbine gearbox vibration signal signature and fault development through time},\n  year = {2017},\n  pages = {1380-1384},\n  abstract = {This paper aims to present a methodology for health monitoring wind turbine gearboxes using vibration data. Monitoring of wind turbines is a crucial aspect of maintenance optimisation that is required for wind farms to remain sustainable and profitable. The proposed methodology performs spectral line analysis and extracts health features from harmonic vibration spectra, at various time instants prior to a gear tooth failure. For this, the tachometer signal of the shaft is used to reconstruct the signal in the angular domain. The diagnosis approach is applied to detect gear faults affecting the intermediate stage of the gearbox. The health features extracted show the gradient deterioration of the gear at progressive time instants before the catastrophic failure. A classification model is trained for fault recognition and prognosis of time before failure. The effectiveness of the proposed fault diagnostic and prognostic approach has been tested with industrial data. The above will lay the groundwork of a robust framework for the early automatic detection of emerging gearbox faults. This will lead to minimisation of wind turbine downtime and increased revenue through operational enhancement.},\n  keywords = {condition monitoring;fault diagnosis;feature extraction;gears;maintenance engineering;spectral analysis;tachometers;vibrational signal processing;vibrations;wind power plants;wind turbines;gearbox fault automatic detection;wind turbine downtime minimization;operational enhancement;robust framework;industrial data;fault diagnostic approach;classification model;catastrophic failure;angular domain;signal reconstruction;shaft;health feature extraction;maintenance optimisation;wind turbine monitoring;gear faults;tachometer signal;gear tooth failure;harmonic vibration spectra;spectral line analysis;wind farms;vibration data;health monitoring wind turbine gearboxes;fault development;wind turbine downtime;prognostic approach;fault recognition;progressive time instants;Wind turbines;Vibrations;Gears;Amplitude modulation;Shafts;Frequency modulation},\n  doi = {10.23919/EUSIPCO.2017.8081435},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347822.pdf},\n}\n\n
\n
\n\n\n
\n This paper aims to present a methodology for health monitoring wind turbine gearboxes using vibration data. Monitoring of wind turbines is a crucial aspect of maintenance optimisation that is required for wind farms to remain sustainable and profitable. The proposed methodology performs spectral line analysis and extracts health features from harmonic vibration spectra, at various time instants prior to a gear tooth failure. For this, the tachometer signal of the shaft is used to reconstruct the signal in the angular domain. The diagnosis approach is applied to detect gear faults affecting the intermediate stage of the gearbox. The health features extracted show the gradient deterioration of the gear at progressive time instants before the catastrophic failure. A classification model is trained for fault recognition and prognosis of time before failure. The effectiveness of the proposed fault diagnostic and prognostic approach has been tested with industrial data. The above will lay the groundwork of a robust framework for the early automatic detection of emerging gearbox faults. This will lead to minimisation of wind turbine downtime and increased revenue through operational enhancement.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed computational load balancing for real-time applications.\n \n \n \n \n\n\n \n Sthapit, S.; Hopgood, J. R.; and Thompson, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1385-1189, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081436,\n  author = {S. Sthapit and J. R. Hopgood and J. Thompson},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed computational load balancing for real-time applications},\n  year = {2017},\n  pages = {1385-1189},\n  abstract = {Mobile Cloud Computing or Fog computing refer to offloading computationally intensive algorithms from a mobile device to a cloud or a intermediate cloud in order to save resources (time and energy) in the mobile device. In this paper, we look at alternative solution when the cloud or fog is not available. We modelled sensors using network of queues and use linear programming to make scheduling decisions. We then propose novel algorithms which can improve efficiency of the overall system. Results show significant performance improvement at the cost of using some extra energy. Particularly, when incoming job rate is higher, we found our Proactive Centralised gives the best compromise between performance and energy whereas Reactive Distributed is more effective when job rate is lower.},\n  keywords = {cloud computing;mobile computing;resource allocation;scheduling;real-time applications;mobile device;intermediate cloud;fog computing;distributed computational load;computationally intensive algorithms;mobile cloud computing;proactive centralised;reactive distributed;Cloud computing;Wireless fidelity;Signal processing algorithms;Cameras;Central Processing Unit;Sensors;Drones;Offloading;Mobile Cloud Computing;Energy;IOT;Fog Computing;Edge Computing},\n  doi = {10.23919/EUSIPCO.2017.8081436},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346774.pdf},\n}\n\n
\n
\n\n\n
\n Mobile Cloud Computing or Fog computing refer to offloading computationally intensive algorithms from a mobile device to a cloud or a intermediate cloud in order to save resources (time and energy) in the mobile device. In this paper, we look at alternative solution when the cloud or fog is not available. We modelled sensors using network of queues and use linear programming to make scheduling decisions. We then propose novel algorithms which can improve efficiency of the overall system. Results show significant performance improvement at the cost of using some extra energy. Particularly, when incoming job rate is higher, we found our Proactive Centralised gives the best compromise between performance and energy whereas Reactive Distributed is more effective when job rate is lower.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non-intrusive condition monitoring for manufacturing systems.\n \n \n \n \n\n\n \n Suzuki, R.; Kohmoto, S.; and Ogatsu, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1390-1394, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Non-intrusivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081437,\n  author = {R. Suzuki and S. Kohmoto and T. Ogatsu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Non-intrusive condition monitoring for manufacturing systems},\n  year = {2017},\n  pages = {1390-1394},\n  abstract = {A non-intrusive method for monitoring conditions in manufacturing systems is proposed. The method requires only a single current sensor for the monitoring of multiple machine individually, which is done by means of disaggregating measured waveforms. For accurate disaggregation even in complicated systems with multiple identical machines, it employs a new model-combining factorial hidden Markov model (FHMM) with behavioral models derived from queuing theory. Experimental results with an actual system show that the proposed method achieves more accurate disaggregation than conventional methods and obtains such valuable information on productivity as the reasons for and timing of manufacturing process stoppages.},\n  keywords = {condition monitoring;hidden Markov models;machinery;manufacturing systems;nonintrusive condition monitoring;manufacturing systems;current sensor;machine monitoring;measured waveforms disaggregation;factorial hidden Markov model;FHMM;behavioral models;queuing theory;Hidden Markov models;Monitoring;Home appliances;Queueing analysis;Manufacturing processes;Power demand;non-intrusive monitoring;factorial hidden Markov model;queueing network},\n  doi = {10.23919/EUSIPCO.2017.8081437},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347798.pdf},\n}\n\n
\n
\n\n\n
\n A non-intrusive method for monitoring conditions in manufacturing systems is proposed. The method requires only a single current sensor for the monitoring of multiple machine individually, which is done by means of disaggregating measured waveforms. For accurate disaggregation even in complicated systems with multiple identical machines, it employs a new model-combining factorial hidden Markov model (FHMM) with behavioral models derived from queuing theory. Experimental results with an actual system show that the proposed method achieves more accurate disaggregation than conventional methods and obtains such valuable information on productivity as the reasons for and timing of manufacturing process stoppages.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A GLRT approach for detecting correlated signals in white noise in two MIMO channels.\n \n \n \n \n\n\n \n Santamaria, I.; Via, J.; Scharf, L. L.; and Wang, Y.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1395-1399, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081438,\n  author = {I. Santamaria and J. Via and L. L. Scharf and Y. Wang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A GLRT approach for detecting correlated signals in white noise in two MIMO channels},\n  year = {2017},\n  pages = {1395-1399},\n  abstract = {In this work, we consider a second-order detection problem where rank-p signals are structured by an unknown, but common, p-dimensional random vector and then received through unknown M × p matrices at each of two M-element arrays. The noises in each channel are independent with identical variances. We derive generalized likelihood ratio (GLR) tests for this problem when the noise variance is either known or unknown. The resulting detection problems may be phrased as two-channel factor analysis problems.},\n  keywords = {covariance matrices;Gaussian noise;maximum likelihood detection;MIMO radar;radar detection;radar signal processing;signal detection;statistical analysis;vectors;white noise;second-order detection problem;MIMO channels;white noise;correlated signals;GLRT approach;two-channel factor analysis problems;noise variance;generalized likelihood ratio tests;M-element arrays;unknown M × p matrices;p-dimensional random vector;rank-p signals;Covariance matrices;Surveillance;Antenna arrays;Maximum likelihood estimation;Load modeling;MIMO;Radar antennas;Passive detection;MIMO channels;passive radar;generalized likelihood ratio},\n  doi = {10.23919/EUSIPCO.2017.8081438},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346642.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we consider a second-order detection problem where rank-p signals are structured by an unknown, but common, p-dimensional random vector and then received through unknown M × p matrices at each of two M-element arrays. The noises in each channel are independent with identical variances. We derive generalized likelihood ratio (GLR) tests for this problem when the noise variance is either known or unknown. The resulting detection problems may be phrased as two-channel factor analysis problems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low cost subspace tracking algorithms for sparse systems.\n \n \n \n \n\n\n \n Lassami, N.; Abed-Meraim, K.; and Aïssa-El-Bey, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1400-1404, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081439,\n  author = {N. Lassami and K. Abed-Meraim and A. Aïssa-El-Bey},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Low cost subspace tracking algorithms for sparse systems},\n  year = {2017},\n  pages = {1400-1404},\n  abstract = {In this paper, we focus on tracking the signal subspace under a sparsity constraint. More specifically, we propose a two-step approach to solve the considered problem whether the sparsity constraint is on the system weight matrix or on the source signals. The first step uses the OPAST algorithm for an adaptive extraction of an orthonormal basis of the principal subspace, then an estimation of the desired weight matrix is done in the second step, taking into account the sparsity constraint. The resulting algorithms: SS-OPAST and DS-OPAST have low computational complexity (suitable in the adaptive context) and they achieve both good convergence and estimation performance as illustrated by our simulation experiments for different application scenarios.},\n  keywords = {blind source separation;channel estimation;computational complexity;feature extraction;matrix algebra;object tracking;sparsity constraint;two-step approach;system weight matrix;source signals;adaptive extraction;principal subspace;low computational complexity;sparse systems;signal subspace;low cost subspace tracking algorithms;orthonormal basis;weight matrix extraction;SS-OPAST;DS-OPAST;Signal processing algorithms;Sparse matrices;Approximation algorithms;Signal processing;Europe;Estimation;Radar tracking;Principal subspace tracking;sparse sub-space;adaptive estimation;sparse source separation},\n  doi = {10.23919/EUSIPCO.2017.8081439},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342264.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we focus on tracking the signal subspace under a sparsity constraint. More specifically, we propose a two-step approach to solve the considered problem whether the sparsity constraint is on the system weight matrix or on the source signals. The first step uses the OPAST algorithm for an adaptive extraction of an orthonormal basis of the principal subspace, then an estimation of the desired weight matrix is done in the second step, taking into account the sparsity constraint. The resulting algorithms: SS-OPAST and DS-OPAST have low computational complexity (suitable in the adaptive context) and they achieve both good convergence and estimation performance as illustrated by our simulation experiments for different application scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Source enumeration in large arrays using corrected Rao's score test and relatively few samples.\n \n \n \n \n\n\n \n Liu, Y.; Sun, X.; and Liu, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1405-1406, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SourcePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081440,\n  author = {Y. Liu and X. Sun and G. Liu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Source enumeration in large arrays using corrected Rao's score test and relatively few samples},\n  year = {2017},\n  pages = {1405-1406},\n  abstract = {We focus on the problem of source enumeration in large arrays with relatively few samples, which is solved in this paper by using a statistic of corrected Rao's score test (CRST) via the generalized Bayesian information criterion (GBIC). Under the white noise assumption, the covariance matrix of the noise subspace components of the observations is proportional to an identity matrix, and this structure can be tested by the CRST statistic for the sphericity hypothesis test. The observations are decomposed into signal and noise subspace components by unitary coordinate transformation under a presumptive number of sources. Only when there is no signal in the presumptive noise subspace components, the corresponding CRST statistic is asymptotic normal distribution. The CRST statistic of the presumptive noise subspace components also is a statistic of the sample eigenvalues, and can be used as the statistic in the GBIC for estimating the number of sources. Simulation results demonstrate that the proposed method can achieve more accurate detection of the number of sources in the case of a large number of sensors with relatively few samples, especially when the number of samples is smaller than the number of sensors.},\n  keywords = {array signal processing;Bayes methods;covariance matrices;eigenvalues and eigenfunctions;normal distribution;signal sampling;source separation;white noise;source enumeration;generalized Bayesian information criterion;GBIC;white noise assumption;covariance matrix;identity matrix;sphericity hypothesis test;presumptive noise subspace components;corresponding CRST statistic;sample eigenvalues;corrected Rao score test;relatively few sample score test;unitary coordinate transformation;asymptotic normal distribution;Covariance matrices;Eigenvalues and eigenfunctions;Gaussian distribution;Sensors;Array signal processing;Estimation},\n  doi = {10.23919/EUSIPCO.2017.8081440},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342130.pdf},\n}\n\n
\n
\n\n\n
\n We focus on the problem of source enumeration in large arrays with relatively few samples, which is solved in this paper by using a statistic of corrected Rao's score test (CRST) via the generalized Bayesian information criterion (GBIC). Under the white noise assumption, the covariance matrix of the noise subspace components of the observations is proportional to an identity matrix, and this structure can be tested by the CRST statistic for the sphericity hypothesis test. The observations are decomposed into signal and noise subspace components by unitary coordinate transformation under a presumptive number of sources. Only when there is no signal in the presumptive noise subspace components, the corresponding CRST statistic is asymptotic normal distribution. The CRST statistic of the presumptive noise subspace components also is a statistic of the sample eigenvalues, and can be used as the statistic in the GBIC for estimating the number of sources. Simulation results demonstrate that the proposed method can achieve more accurate detection of the number of sources in the case of a large number of sensors with relatively few samples, especially when the number of samples is smaller than the number of sensors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayesian restoration of reflectivity and range profiles from subsampled single-photon multispectral Lidar data.\n \n \n \n \n\n\n \n Altmann, Y.; Tobin, R.; Maccarone, A.; Ren, X.; McCarthy, A.; Buller, G. S.; and McLaughlin, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1410-1414, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081441,\n  author = {Y. Altmann and R. Tobin and A. Maccarone and X. Ren and A. McCarthy and G. S. Buller and S. McLaughlin},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Bayesian restoration of reflectivity and range profiles from subsampled single-photon multispectral Lidar data},\n  year = {2017},\n  pages = {1410-1414},\n  abstract = {In this paper, we investigate the recovery of range and spectral profiles associated with remote three-dimensional scenes sensed via single-photon multispectral Lidar (MSL). We consider different spatial/spectral sampling strategies and pare their performance for similar overall numbers of detected photons. For a regular spatial grid, the first strategy consists of sampling all the spatial locations of the grid for each of the wavelengths. Conversely, the three other strategies consist, for each spatial location, of acquiring a reduced number of wavelengths, chosen randomly or in a deterministic manner. We propose a fully automated computational method, adapted for the different sampling strategies in order to recover the target range profile, as well as the reflectivity profiles associated with the different wavelengths. The performance of the four sampling strategies is illustrated using a single photon MSL system with four wavelengths. The results presented demonstrate that although the first strategy usually provides more accurate results, the subsampling strategies do not exhibit a significant performance degradation, particularly for extremely photon-starved data (down to one photon per pixel on average).},\n  keywords = {Bayes methods;image reconstruction;image resolution;image sampling;optical radar;range profiles;single-photon multispectral Lidar data;spectral profiles;three-dimensional scenes;spatial location;fully automated computational method;target range profile;reflectivity profiles;single photon MSL system;subsampling strategies;significant performance degradation;spatial/spectral sampling strategies;Bayesian restoration;sampling strategies;Photonics;Estimation;Bayes methods;Correlation;Surface emitting lasers;Imaging;Laser radar},\n  doi = {10.23919/EUSIPCO.2017.8081441},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347407.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we investigate the recovery of range and spectral profiles associated with remote three-dimensional scenes sensed via single-photon multispectral Lidar (MSL). We consider different spatial/spectral sampling strategies and pare their performance for similar overall numbers of detected photons. For a regular spatial grid, the first strategy consists of sampling all the spatial locations of the grid for each of the wavelengths. Conversely, the three other strategies consist, for each spatial location, of acquiring a reduced number of wavelengths, chosen randomly or in a deterministic manner. We propose a fully automated computational method, adapted for the different sampling strategies in order to recover the target range profile, as well as the reflectivity profiles associated with the different wavelengths. The performance of the four sampling strategies is illustrated using a single photon MSL system with four wavelengths. The results presented demonstrate that although the first strategy usually provides more accurate results, the subsampling strategies do not exhibit a significant performance degradation, particularly for extremely photon-starved data (down to one photon per pixel on average).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the use of tight frames for optimal sensor placement in time-difference of arrival localization.\n \n \n \n \n\n\n \n Rusu, C.; and Thompson, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1415-1419, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081442,\n  author = {C. Rusu and J. Thompson},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {On the use of tight frames for optimal sensor placement in time-difference of arrival localization},\n  year = {2017},\n  pages = {1415-1419},\n  abstract = {In this paper we analyze the use of tight frames for the problem of localizing a source from noisy time-difference of arrival measurements. Based on the Fisher information matrix, we show that positioning the sensor network according to a tight frame that also obeys some internal symmetries provides the best average localization accuracy. We connect our result to previous approaches from the literature and show experimentally that near optimal accuracy can also be provided by random tight frames. We also make the assumption that the sensors are not fixed but placed on mobile units and we study the problem of bringing them to a tight configuration with the minimum energy consumption. Although our results hold for any dimension, for simplicity of exposition, the numerical experiments depicted are in the two dimensional case.},\n  keywords = {sensor placement;time-of-arrival estimation;wireless sensor networks;time-difference of arrival localization;tight configuration;random tight frames;optimal accuracy;average localization accuracy;sensor network;Fisher information matrix;arrival measurements;noisy time-difference;optimal sensor placement;Estimation;Europe;Signal processing;Noise measurement;Null space;Noise level;Mobile communication;time-difference of arrival localization;Fisher information matrix;finite frames;tight frames},\n  doi = {10.23919/EUSIPCO.2017.8081442},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346743.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we analyze the use of tight frames for the problem of localizing a source from noisy time-difference of arrival measurements. Based on the Fisher information matrix, we show that positioning the sensor network according to a tight frame that also obeys some internal symmetries provides the best average localization accuracy. We connect our result to previous approaches from the literature and show experimentally that near optimal accuracy can also be provided by random tight frames. We also make the assumption that the sensors are not fixed but placed on mobile units and we study the problem of bringing them to a tight configuration with the minimum energy consumption. Although our results hold for any dimension, for simplicity of exposition, the numerical experiments depicted are in the two dimensional case.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Solving large-scale systems of random quadratic equations via stochastic truncated amplitude flow.\n \n \n \n \n\n\n \n Wang, G.; Giannakis, G. B.; and Chen, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1420-1424, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SolvingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081443,\n  author = {G. Wang and G. B. Giannakis and J. Chen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Solving large-scale systems of random quadratic equations via stochastic truncated amplitude flow},\n  year = {2017},\n  pages = {1420-1424},\n  abstract = {This work develops a new iterative algorithm, which is called stochastic truncated amplitude flow (STAF), to recover an unknown signal × ϵ Rn from m {"}phaseless{"} quadratic equations of the form ψ1=|aτi x|, 1 ≤ i ≤ m. This problem is also known as phase retrieval, which is NP-hard in general. Building on an amplitude-based nonconvex least-squares formulation, STAF proceeds in two stages: s1) Orthogonality-promoting initialization computed using a stochastic variance reduced gradient algorithm; and, s2) Refinements of the initial point through truncated stochastic gradient-type iterations. Both stages handle a single equation per iteration, therefore lending STAF well to Big Data applications. Specifically for independent Gaussian {ai}mi=1 vectors, STAF recovers exactly any x exponentially fast when there are about as many equations as unknowns. Finally, numerical tests demonstrate that STAF improves upon its competing alternatives.},\n  keywords = {eigenvalues and eigenfunctions;gradient methods;large-scale systems;least squares approximations;linear systems;optimisation;stochastic processes;vectors;truncated stochastic gradient-type iterations;STAF;large-scale systems;random quadratic equations;stochastic truncated amplitude flow;iterative algorithm;m phaseless quadratic equations;stochastic variance;gradient algorithm;phase retrieval;NP-hard problem;Big Data applications;independent Gaussian vectors;numerical tests;amplitude-based nonconvex least-squares formulation;Stochastic processes;Signal processing algorithms;Runtime;Mathematical model;Computational complexity;Europe;Signal processing;Phase retrieval;stochastic nonconvex optimization;stochastic variance reduced gradient;linear convergence;global optimum},\n  doi = {10.23919/EUSIPCO.2017.8081443},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341714.pdf},\n}\n\n
\n
\n\n\n
\n This work develops a new iterative algorithm, which is called stochastic truncated amplitude flow (STAF), to recover an unknown signal × ϵ Rn from m \"phaseless\" quadratic equations of the form ψ1=|aτi x|, 1 ≤ i ≤ m. This problem is also known as phase retrieval, which is NP-hard in general. Building on an amplitude-based nonconvex least-squares formulation, STAF proceeds in two stages: s1) Orthogonality-promoting initialization computed using a stochastic variance reduced gradient algorithm; and, s2) Refinements of the initial point through truncated stochastic gradient-type iterations. Both stages handle a single equation per iteration, therefore lending STAF well to Big Data applications. Specifically for independent Gaussian aimi=1 vectors, STAF recovers exactly any x exponentially fast when there are about as many equations as unknowns. Finally, numerical tests demonstrate that STAF improves upon its competing alternatives.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dictionary learning from incomplete data for efficient image restoration.\n \n \n \n \n\n\n \n Naumova, V.; and Schnass, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1425-1429, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DictionaryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081444,\n  author = {V. Naumova and K. Schnass},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Dictionary learning from incomplete data for efficient image restoration},\n  year = {2017},\n  pages = {1425-1429},\n  abstract = {In real-world image processing applications, the data is high dimensional but the amount of high-quality data needed to train the model is very limited. In this paper, we demonstrate applicability of a recently presented method for dictionary learning from incomplete data, the so-called Iterative Thresholding and K residual Means for Masked data, to deal with high-dimensional data in an efficient way. In particular, the proposed algorithm incorporates a corruption model directly at the dictionary learning stage, also enabling reconstruction of the low-rank component again from corrupted signals. These modifications circumvent some difficulties associated with the efficient dictionary learning procedure in the presence of limited or incomplete data. We choose an image inpainting problem as a guiding example, and further propose a procedure for automatic detection and reconstruction of the low-rank component from incomplete data and adaptive parameter selection for the sparse image reconstruction. We benchmark the efficacy and efficiency of our algorithm in terms of computing time and accuracy on colour, 3D medical, and hyperspectral images by comparing it to its dictionary learning counterparts.},\n  keywords = {image denoising;image reconstruction;image representation;image restoration;learning (artificial intelligence);real-world image processing applications;high-dimensional data;dictionary learning stage;low-rank component;image inpainting problem;sparse image reconstruction;dictionary learning counterparts;image restoration;masked data;dictionary learning procedure;adaptive parameter selection;Dictionaries;Signal processing algorithms;Machine learning;Manganese;Image color analysis;Image reconstruction;Matching pursuit algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081444},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346889.pdf},\n}\n\n
\n
\n\n\n
\n In real-world image processing applications, the data is high dimensional but the amount of high-quality data needed to train the model is very limited. In this paper, we demonstrate applicability of a recently presented method for dictionary learning from incomplete data, the so-called Iterative Thresholding and K residual Means for Masked data, to deal with high-dimensional data in an efficient way. In particular, the proposed algorithm incorporates a corruption model directly at the dictionary learning stage, also enabling reconstruction of the low-rank component again from corrupted signals. These modifications circumvent some difficulties associated with the efficient dictionary learning procedure in the presence of limited or incomplete data. We choose an image inpainting problem as a guiding example, and further propose a procedure for automatic detection and reconstruction of the low-rank component from incomplete data and adaptive parameter selection for the sparse image reconstruction. We benchmark the efficacy and efficiency of our algorithm in terms of computing time and accuracy on colour, 3D medical, and hyperspectral images by comparing it to its dictionary learning counterparts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low-rank and sparse NMF for joint endmembers' number estimation and blind unmixing of hyperspectral images.\n \n \n \n \n\n\n \n Giampouras, P. V.; Rontogiannis, A. A.; and Koutroumbas, K. D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1430-1434, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Low-rankPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081445,\n  author = {P. V. Giampouras and A. A. Rontogiannis and K. D. Koutroumbas},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Low-rank and sparse NMF for joint endmembers' number estimation and blind unmixing of hyperspectral images},\n  year = {2017},\n  pages = {1430-1434},\n  abstract = {Estimation of the number of endmembers existing in a scene constitutes a critical task in the hyperspectral unmixing process. The accuracy of this estimate plays a crucial role in subsequent unsupervised unmixing steps i.e., the derivation of the spectral signatures of the endmembers (endmembers' extraction) and the estimation of the abundance fractions of the pixels. A common practice amply followed in literature is to treat endmembers' number estimation and unmixing, independently as two separate tasks, providing the outcome of the former as input to the latter. In this paper, we go beyond this computationally demanding strategy. More precisely, we set forth a multiple constrained optimization framework, which encapsulates endmembers' number estimation and unsuper-vised unmixing in a single task. This is attained by suitably formulating the problem via a low-rank and sparse nonnegative matrix factorization rationale, where low-rankness is promoted with the use of a sophisticated ℓ1/ℓ2 norm penalty term. An alternating proximal algorithm is then proposed for minimizing the emerging cost function. The results obtained by simulated and real data experiments verify the effectiveness of the proposed approach.},\n  keywords = {hyperspectral imaging;image processing;matrix decomposition;optimisation;sparse matrices;blind unmixing;hyperspectral unmixing process;low-rankness;unsupervised unmixing;subsequent unsupervised unmixing steps;nonnegative matrix factorization rationale;alternating proximal algorithm;cost function;norm penalty term;Signal processing algorithms;Minimization;Estimation;Cost function;Hyperspectral imaging;NMF;sparse and low-rank;number of endmembers;unsupervised unmixing},\n  doi = {10.23919/EUSIPCO.2017.8081445},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347555.pdf},\n}\n\n
\n
\n\n\n
\n Estimation of the number of endmembers existing in a scene constitutes a critical task in the hyperspectral unmixing process. The accuracy of this estimate plays a crucial role in subsequent unsupervised unmixing steps i.e., the derivation of the spectral signatures of the endmembers (endmembers' extraction) and the estimation of the abundance fractions of the pixels. A common practice amply followed in literature is to treat endmembers' number estimation and unmixing, independently as two separate tasks, providing the outcome of the former as input to the latter. In this paper, we go beyond this computationally demanding strategy. More precisely, we set forth a multiple constrained optimization framework, which encapsulates endmembers' number estimation and unsuper-vised unmixing in a single task. This is attained by suitably formulating the problem via a low-rank and sparse nonnegative matrix factorization rationale, where low-rankness is promoted with the use of a sophisticated ℓ1/ℓ2 norm penalty term. An alternating proximal algorithm is then proposed for minimizing the emerging cost function. The results obtained by simulated and real data experiments verify the effectiveness of the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-frequency image reconstruction for radio-interferometry with self-tuned regularization parameters.\n \n \n \n \n\n\n \n Ammanouil, R.; Ferrari, A.; Flamary, R.; Ferrari, C.; and Mary, D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1435-1439, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-frequencyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081446,\n  author = {R. Ammanouil and A. Ferrari and R. Flamary and C. Ferrari and D. Mary},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-frequency image reconstruction for radio-interferometry with self-tuned regularization parameters},\n  year = {2017},\n  pages = {1435-1439},\n  abstract = {As the world's largest radio telescope, the Square Kilometer Array (SKA) will provide radio interferometric data with unprecedented detail. Image reconstruction algorithms for radio interferometry are challenged to scale well with TeraByte image sizes never seen before. In this work, we investigate one such 3D image reconstruction algorithm known as MUFFIN (MUlti-Frequency image reconstruction For radio INterferometry). In particular, we focus on the challenging task of automatically finding the optimal regularization parameter values. In practice, finding the regularization parameters using classical grid search is computationally intensive and nontrivial due to the lack of ground-truth. We adopt a greedy strategy where, at each iteration, the optimal parameters are found by minimizing the predicted Stein unbiased risk estimate (PSURE). The proposed self-tuned version of MUFFIN involves parallel and computationally efficient steps, and scales well with large-scale data. Finally, numerical results on a 3D image are presented to showcase the performance of the proposed approach.},\n  keywords = {image reconstruction;interferometry;iterative methods;radiotelescopes;search problems;multifrequency image reconstruction;self-tuned regularization parameters;Square Kilometer Array;radio interferometric data;image reconstruction algorithms;radio interferometry;TeraByte image;3D image reconstruction algorithm;optimal regularization parameter values;MUFFIN;classical grid search;predicted Stein unbiased risk estimate;Signal processing algorithms;Jacobian matrices;Deconvolution;Image reconstruction;Radio interferometry;Antenna measurements;Estimation},\n  doi = {10.23919/EUSIPCO.2017.8081446},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347562.pdf},\n}\n\n
\n
\n\n\n
\n As the world's largest radio telescope, the Square Kilometer Array (SKA) will provide radio interferometric data with unprecedented detail. Image reconstruction algorithms for radio interferometry are challenged to scale well with TeraByte image sizes never seen before. In this work, we investigate one such 3D image reconstruction algorithm known as MUFFIN (MUlti-Frequency image reconstruction For radio INterferometry). In particular, we focus on the challenging task of automatically finding the optimal regularization parameter values. In practice, finding the regularization parameters using classical grid search is computationally intensive and nontrivial due to the lack of ground-truth. We adopt a greedy strategy where, at each iteration, the optimal parameters are found by minimizing the predicted Stein unbiased risk estimate (PSURE). The proposed self-tuned version of MUFFIN involves parallel and computationally efficient steps, and scales well with large-scale data. Finally, numerical results on a 3D image are presented to showcase the performance of the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A distributed learning architecture for big imaging problems in astrophysics.\n \n \n \n \n\n\n \n Panousopoulou, A.; Farrens, S.; Mastorakis, Y.; Starck, J.; and Tsakailides, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1440-1444, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081447,\n  author = {A. Panousopoulou and S. Farrens and Y. Mastorakis and J. Starck and P. Tsakailides},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A distributed learning architecture for big imaging problems in astrophysics},\n  year = {2017},\n  pages = {1440-1444},\n  abstract = {Future challenges in Big Imaging problems will require that traditional, {"}black-box{"} machine learning methods, be revisited from the perspective of ongoing efforts in distributed computing. This paper proposes a distributed architecture for astrophysical imagery, which exploits the Apache Spark framework for the efficient parallelization of the learning problem at hand. The use case is related to the challenging problem of deconvolving a space variant point spread function from noisy galaxy images. We conduct benchmark studies considering relevant datasets and analyze the efficacy of the herein developed parallelization approaches. The experimental results report 58% improvement in time response terms against the conventional computing solutions, while useful insights into the computational trade-offs and the limitations of Spark are extracted.},\n  keywords = {astronomical image processing;astronomy computing;learning (artificial intelligence);optical transfer function;parallel processing;distributed learning architecture;big imaging problems;astrophysics;black-box machine;distributed computing;distributed architecture;astrophysical imagery;Apache Spark framework;space variant point spread function;noisy galaxy images;Imaging;Computer architecture;Optimization;Sparks;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081447},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343067.pdf},\n}\n\n
\n
\n\n\n
\n Future challenges in Big Imaging problems will require that traditional, \"black-box\" machine learning methods, be revisited from the perspective of ongoing efforts in distributed computing. This paper proposes a distributed architecture for astrophysical imagery, which exploits the Apache Spark framework for the efficient parallelization of the learning problem at hand. The use case is related to the challenging problem of deconvolving a space variant point spread function from noisy galaxy images. We conduct benchmark studies considering relevant datasets and analyze the efficacy of the herein developed parallelization approaches. The experimental results report 58% improvement in time response terms against the conventional computing solutions, while useful insights into the computational trade-offs and the limitations of Spark are extracted.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust object characterization from lensless microscopy videos.\n \n \n \n \n\n\n \n Flasseur, O.; Denis, L.; Fournier, C.; and Thiébaut, É.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1445-1449, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081448,\n  author = {O. Flasseur and L. Denis and C. Fournier and É. Thiébaut},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust object characterization from lensless microscopy videos},\n  year = {2017},\n  pages = {1445-1449},\n  abstract = {Lensless microscopy, also known as in-line digital holography, is a 3D quantitative imaging method used in various fields including microfluidics and biomedical imaging. To estimate the size and 3D location of microscopic objects in holograms, maximum likelihood methods have been shown to outperform traditional approaches based on 3D image reconstruction followed by 3D image analysis. However, the presence of objects other than the object of interest may bias maximum likelihood estimates. Using experimental videos of holograms, we show that replacing the maximum likelihood with a robust estimation procedure reduces this bias. We propose a criterion based on the intersection of confidence intervals in order to automatically set the level that distinguishes between inliers and outliers. We show that this criterion achieves a bias / variance trade-off. We also show that joint analysis of a sequence of holograms using the robust procedure is shown to further improve estimation accuracy.},\n  keywords = {estimation theory;holography;image reconstruction;maximum likelihood estimation;medical image processing;robust estimation procedure;robust object characterization;lensless microscopy videos;in-line digital holography;3D quantitative imaging method;biomedical imaging;microscopic objects;maximum likelihood methods;3D image reconstruction;3D image analysis;Estimation;Robustness;Microscopy;Videos;Three-dimensional displays;Diffraction},\n  doi = {10.23919/EUSIPCO.2017.8081448},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346926.pdf},\n}\n\n
\n
\n\n\n
\n Lensless microscopy, also known as in-line digital holography, is a 3D quantitative imaging method used in various fields including microfluidics and biomedical imaging. To estimate the size and 3D location of microscopic objects in holograms, maximum likelihood methods have been shown to outperform traditional approaches based on 3D image reconstruction followed by 3D image analysis. However, the presence of objects other than the object of interest may bias maximum likelihood estimates. Using experimental videos of holograms, we show that replacing the maximum likelihood with a robust estimation procedure reduces this bias. We propose a criterion based on the intersection of confidence intervals in order to automatically set the level that distinguishes between inliers and outliers. We show that this criterion achieves a bias / variance trade-off. We also show that joint analysis of a sequence of holograms using the robust procedure is shown to further improve estimation accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Nonparametric detection using empirical distributions and bootstrapping.\n \n \n \n \n\n\n \n Gölz, M.; Koivunen, V.; and Zoubir, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1450-1454, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"NonparametricPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081449,\n  author = {M. Gölz and V. Koivunen and A. Zoubir},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Nonparametric detection using empirical distributions and bootstrapping},\n  year = {2017},\n  pages = {1450-1454},\n  abstract = {This paper addresses the problem of decision making when there is no or very vague knowledge about the probability models associated with the hypotheses. Such scenarios occur for example in Internet of Things (IoT), environmental surveillance and data analytics. The probability models are learned from the data by empirical distributions that provide an accurate approximation of the true model. Hence, the approach is fully nonparametric. The bootstrap method is employed to approximate the distribution of the decision statistic. The actual test is based on the Anderson-Darling test that is shown to perform reliably even if the empirical distributions differ only slightly. The proposed detector allows controlling Type I and II error levels without specifying explicit probability models or performing tedious large sample analysis. It is also proved that the test can achieve the specified power. Numerical simulations validate the results.},\n  keywords = {data analysis;decision making;nonparametric statistics;probability;statistical distributions;statistical testing;empirical distributions;bootstrap method;decision statistic;Anderson-Darling test;explicit probability models;nonparametric detection;decision making;environmental surveillance;data analytics;Type II error level;Type I error level;numerical simulation;Probability;Testing;Signal processing;Training data;Signal processing algorithms;Reliability},\n  doi = {10.23919/EUSIPCO.2017.8081449},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346665.pdf},\n}\n\n
\n
\n\n\n
\n This paper addresses the problem of decision making when there is no or very vague knowledge about the probability models associated with the hypotheses. Such scenarios occur for example in Internet of Things (IoT), environmental surveillance and data analytics. The probability models are learned from the data by empirical distributions that provide an accurate approximation of the true model. Hence, the approach is fully nonparametric. The bootstrap method is employed to approximate the distribution of the decision statistic. The actual test is based on the Anderson-Darling test that is shown to perform reliably even if the empirical distributions differ only slightly. The proposed detector allows controlling Type I and II error levels without specifying explicit probability models or performing tedious large sample analysis. It is also proved that the test can achieve the specified power. Numerical simulations validate the results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the efficiency of maximum-likelihood estimators of misspecified models.\n \n \n \n \n\n\n \n Diong, M. L.; Chaumette, E.; and Vincent, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1455-1459, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081450,\n  author = {M. L. Diong and E. Chaumette and F. Vincent},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {On the efficiency of maximum-likelihood estimators of misspecified models},\n  year = {2017},\n  pages = {1455-1459},\n  abstract = {The key results on maximum-likelihood (ML) estimation of misspecified models have been introduced by statisticians (P.J. Huber, H. Akaike, H. White, Q. H. Vuong) resorting to a general probabilistic formalism somewhat difficult to rephrase into the formalism widespread in the signal processing literature. In particular, Vuong proposed two misspecified Cramer-Rao bounds (CRBs) to address, respectively, the situation where the true parametric probability model is known, or not known. In this communication, derivations of the existing results on the accuracy of ML estimation of misspecified models are outlined in an easily comprehensible manner. Simple alternative derivations of these two misspecified CRBs based on the seminal work of Barankin (which underlies all the lower bounds introduced in deterministic estimation) are provided. Since two distinct CRBs exist when the true parametric probability model is known, a quasi-efficiency denomination is introduced.},\n  keywords = {maximum likelihood estimation;probability;signal processing;maximum-likelihood estimators;misspecified models;general probabilistic formalism;signal processing literature;misspecified Cramer-Rao bounds;ML estimation;misspecified CRBs;deterministic estimation;true parametric probability model;quasi-efficiency denomination;Maximum likelihood estimation;Signal processing;Europe;Probabilistic logic;Probability distribution;Parametric statistics},\n  doi = {10.23919/EUSIPCO.2017.8081450},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341807.pdf},\n}\n\n
\n
\n\n\n
\n The key results on maximum-likelihood (ML) estimation of misspecified models have been introduced by statisticians (P.J. Huber, H. Akaike, H. White, Q. H. Vuong) resorting to a general probabilistic formalism somewhat difficult to rephrase into the formalism widespread in the signal processing literature. In particular, Vuong proposed two misspecified Cramer-Rao bounds (CRBs) to address, respectively, the situation where the true parametric probability model is known, or not known. In this communication, derivations of the existing results on the accuracy of ML estimation of misspecified models are outlined in an easily comprehensible manner. Simple alternative derivations of these two misspecified CRBs based on the seminal work of Barankin (which underlies all the lower bounds introduced in deterministic estimation) are provided. Since two distinct CRBs exist when the true parametric probability model is known, a quasi-efficiency denomination is introduced.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On joint order and bandwidth selection for identification of nonstationary autoregressive processes.\n \n \n \n \n\n\n \n Niedźwiecki, M.; and Ciołek, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1460-1464, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081451,\n  author = {M. Niedźwiecki and M. Ciołek},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {On joint order and bandwidth selection for identification of nonstationary autoregressive processes},\n  year = {2017},\n  pages = {1460-1464},\n  abstract = {When identifying a nonstationary autoregressive process, e.g. for the purpose of signal prediction or parametric spectrum estimation, two important decisions must be taken. First, one should choose the appropriate order of the autoregressive model, i.e., the number of autoregressive coefficients that will be estimated. Second, if identification is carried out using the local estimation technique, such as the localized version of the method of least squares, one should select the most appropriate estimation bandwidth, i.e., the effective width of the local data window used for the purpose of parameter tracking. The paper presents the first unified treatment of the problem of joint order and bandwidth selection. Two solutions to this problem are examined, first based on the predictive least squares principle, and second exploiting the suitably modified Akaike's final prediction error statistic. It is shown that the best results are obtained if the two approaches mentioned above are combined.},\n  keywords = {autoregressive processes;least squares approximations;signal processing;nonstationary autoregressive process;signal prediction;parametric spectrum estimation;autoregressive coefficients;local estimation technique;local data window;predictive least squares principle;joint order and bandwidth selection;modified Akaike final prediction error statistic;Estimation;Bandwidth;Europe;Signal processing;Autoregressive processes;Adaptation models;Signal processing algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081451},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341447.pdf},\n}\n\n
\n
\n\n\n
\n When identifying a nonstationary autoregressive process, e.g. for the purpose of signal prediction or parametric spectrum estimation, two important decisions must be taken. First, one should choose the appropriate order of the autoregressive model, i.e., the number of autoregressive coefficients that will be estimated. Second, if identification is carried out using the local estimation technique, such as the localized version of the method of least squares, one should select the most appropriate estimation bandwidth, i.e., the effective width of the local data window used for the purpose of parameter tracking. The paper presents the first unified treatment of the problem of joint order and bandwidth selection. Two solutions to this problem are examined, first based on the predictive least squares principle, and second exploiting the suitably modified Akaike's final prediction error statistic. It is shown that the best results are obtained if the two approaches mentioned above are combined.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient parallel architecture for a real-time UHD scalable HEVC encoder.\n \n \n \n \n\n\n \n Parois, R.; Hamidouche, W.; Vieron, J.; Raulet, M.; and Deforges, O.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1465-1469, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"EfficientPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081452,\n  author = {R. Parois and W. Hamidouche and J. Vieron and M. Raulet and O. Deforges},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Efficient parallel architecture for a real-time UHD scalable HEVC encoder},\n  year = {2017},\n  pages = {1465-1469},\n  abstract = {The scalable extension (SHVC) of the High Efficiency Video Coding (HEVC) allows encoding in layers a video with multiple quality level such as resolution, bit-depth or Signal to Noise Ratio (SNR). Compared to the equivalent HEVC simulcast, the SHVC extension provides inter-layer prediction mechanisms enabling significant bit-rate savings. Moreover these inter-layer prediction mechanisms are less complex than those from former standards. Therefore, SHVC seems a promising solution for both broadcast and storage applications and is considered in the ATSC 3.0 as video coding solution. Indeed the spatial scalability is an application use-case considered in the ATSC 3.0. This paper proposes a scalable multi-layer architecture combining pipelined software HEVC encoders. The proposed architecture provides a good trade-off between parallelism over layer and latency. Moreover two configurations are proposed for Live or File encodings with real-time or best coding efficiency targets, respectively. Results present a 2× spatial scalability application of this architecture achieving in a low-delay configuration real-time video encodings of 1080p60 and 1600p30 sequences. Moreover the proposed SHVC solution also demonstrated real-time encodings of UHD contents at an ATSC 3.0 meeting in random-access configuration.},\n  keywords = {parallel architectures;real-time systems;video coding;pipelined software HEVC encoders;random-access configuration;real-time video encodings;low-delay configuration;file encodings;live encodings;spatial scalability application;ATSC 3 video coding solution;bit-rate savings;real-time UHD scalable HEVC encoder;broadcast applications;signal to noise ratio;high efficiency video coding;parallel architecture;SHVC solution;scalable multilayer architecture;application use-case;storage applications;inter-layer prediction mechanisms;SHVC extension;equivalent HEVC simulcast;multiple quality level;scalable extension;Encoding;Real-time systems;Video coding;Pipelines;Standards;Scalability;Computer architecture},\n  doi = {10.23919/EUSIPCO.2017.8081452},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347734.pdf},\n}\n\n
\n
\n\n\n
\n The scalable extension (SHVC) of the High Efficiency Video Coding (HEVC) allows encoding in layers a video with multiple quality level such as resolution, bit-depth or Signal to Noise Ratio (SNR). Compared to the equivalent HEVC simulcast, the SHVC extension provides inter-layer prediction mechanisms enabling significant bit-rate savings. Moreover these inter-layer prediction mechanisms are less complex than those from former standards. Therefore, SHVC seems a promising solution for both broadcast and storage applications and is considered in the ATSC 3.0 as video coding solution. Indeed the spatial scalability is an application use-case considered in the ATSC 3.0. This paper proposes a scalable multi-layer architecture combining pipelined software HEVC encoders. The proposed architecture provides a good trade-off between parallelism over layer and latency. Moreover two configurations are proposed for Live or File encodings with real-time or best coding efficiency targets, respectively. Results present a 2× spatial scalability application of this architecture achieving in a low-delay configuration real-time video encodings of 1080p60 and 1600p30 sequences. Moreover the proposed SHVC solution also demonstrated real-time encodings of UHD contents at an ATSC 3.0 meeting in random-access configuration.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Prediction mode based reference line synthesis for intra prediction of video coding.\n \n \n \n \n\n\n \n Yao, Q.; Kawamura, K.; and Naito, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1470-1474, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PredictionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081453,\n  author = {Q. Yao and K. Kawamura and S. Naito},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Prediction mode based reference line synthesis for intra prediction of video coding},\n  year = {2017},\n  pages = {1470-1474},\n  abstract = {Intra prediction is a significant coding tool that allows a high level of video compression to be achieved in the current state-of-the-art video coding standard, High Efficiency Video Coding (HEVC), and the joint exploration model (JEM) developed by the Joint Video Exploration Team (JVET) of ITU-T VCEG and ISO/IEC MPEG for the next generation video coding standard. In intra prediction, the top and left adjacent lines to the current coding block in the neighboring reconstructed blocks are selected as the reference lines. However, it has been observed that the adjacent reference line might not always provide optimal prediction due to the quantization noise and object occlusions caused by straight lines. In this paper, we propose the synthesis of another reference line by integrating multiple lines in the neighboring reconstructed blocks based on the prediction mode. The synthesized line and the reconstructed adjacent line compete in the rate distortion optimization process, where the line that yields a minimum cost is finally selected. The proposed method is implemented on top of JEM 3.0, and the experimental results show that -0.29% (average), -1.15% (maximum) luma BD gain and -0.15% (average), -0.59% (maximum) luma BD gain can be achieved in all intra and random access conditions, respectively, among all the test sequences.},\n  keywords = {data compression;optimisation;video coding;optimal prediction;straight lines;neighboring reconstructed blocks;synthesized line;reconstructed adjacent line;random access conditions;prediction mode based reference line synthesis;intra prediction;significant coding tool;video compression;High Efficiency Video Coding;joint exploration model;Joint Video Exploration Team;adjacent lines;adjacent reference line;ISO-IEC MPEG;next generation video coding standard;coding block;HEVC;JEM;JVET;ITU-T VCEG;luma BD gain;rate distortion optimization process;Encoding;Video coding;Quantization (signal);Europe;Standards;Predictive models},\n  doi = {10.23919/EUSIPCO.2017.8081453},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343170.pdf},\n}\n\n
\n
\n\n\n
\n Intra prediction is a significant coding tool that allows a high level of video compression to be achieved in the current state-of-the-art video coding standard, High Efficiency Video Coding (HEVC), and the joint exploration model (JEM) developed by the Joint Video Exploration Team (JVET) of ITU-T VCEG and ISO/IEC MPEG for the next generation video coding standard. In intra prediction, the top and left adjacent lines to the current coding block in the neighboring reconstructed blocks are selected as the reference lines. However, it has been observed that the adjacent reference line might not always provide optimal prediction due to the quantization noise and object occlusions caused by straight lines. In this paper, we propose the synthesis of another reference line by integrating multiple lines in the neighboring reconstructed blocks based on the prediction mode. The synthesized line and the reconstructed adjacent line compete in the rate distortion optimization process, where the line that yields a minimum cost is finally selected. The proposed method is implemented on top of JEM 3.0, and the experimental results show that -0.29% (average), -1.15% (maximum) luma BD gain and -0.15% (average), -0.59% (maximum) luma BD gain can be achieved in all intra and random access conditions, respectively, among all the test sequences.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low complexity intra mode decision algorithm for 3D-HEVC.\n \n \n \n \n\n\n \n Hamout, H.; and Elyousfi, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1475-1479, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081454,\n  author = {H. Hamout and A. Elyousfi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Low complexity intra mode decision algorithm for 3D-HEVC},\n  year = {2017},\n  pages = {1475-1479},\n  abstract = {The 3D High Efficiency Video Coding (3D-HEVC) has recently been approved by the Joint Collaborative Team on 3D Video coding (JCT-3V) and standardized as an extension of HEVC. To improve the intra coding efficiency of Multi-view Video plus Depth maps (MVD), the 3D-HEVC brings a good intra coding solution, but the computational complexity load increase significantly, which restricts the use of 3D-HEVC encoders in real world application. Therefore, an efficient intra mode decision algorithm is needed. To overcome the aforementioned problem, this paper presents a new mode prediction method enabling high complexity reduction of 3D-HEVC intra coding. The experimental results show that the low complexity intra mode decision algorithm increases the speed of intra coding significantly with negligible loss of encoding efficiency.},\n  keywords = {computational complexity;video coding;low complexity intra mode decision algorithm;intra coding efficiency;good intra coding solution;3D-HEVC encoders;3D-HEVC intra coding;3D high efficiency video coding;Joint Collaborative Team on 3D Video coding;Encoding;Tensile stress;Signal processing algorithms;Prediction algorithms;Three-dimensional displays;Computational complexity;3D-HEVC;Intra coding;JCT-3V;Tensor structure;isotropic Gaussian filter},\n  doi = {10.23919/EUSIPCO.2017.8081454},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341630.pdf},\n}\n\n
\n
\n\n\n
\n The 3D High Efficiency Video Coding (3D-HEVC) has recently been approved by the Joint Collaborative Team on 3D Video coding (JCT-3V) and standardized as an extension of HEVC. To improve the intra coding efficiency of Multi-view Video plus Depth maps (MVD), the 3D-HEVC brings a good intra coding solution, but the computational complexity load increase significantly, which restricts the use of 3D-HEVC encoders in real world application. Therefore, an efficient intra mode decision algorithm is needed. To overcome the aforementioned problem, this paper presents a new mode prediction method enabling high complexity reduction of 3D-HEVC intra coding. The experimental results show that the low complexity intra mode decision algorithm increases the speed of intra coding significantly with negligible loss of encoding efficiency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Visual color difference evaluation of standard color pixel representations for high dynamic range video compression.\n \n \n \n \n\n\n \n Azimi, M.; Boitard, R.; Nasiopoulos, P.; and Pourazad, M. T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1480-1484, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"VisualPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081455,\n  author = {M. Azimi and R. Boitard and P. Nasiopoulos and M. T. Pourazad},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Visual color difference evaluation of standard color pixel representations for high dynamic range video compression},\n  year = {2017},\n  pages = {1480-1484},\n  abstract = {With the recent introduction of High Dynamic Range (HDR) and Wide Color Gamut (WCG) technologies, viewers' quality of experience is highly enriched. To distribute HDR videos over a transmission pipeline, color pixels need to be quantized into integer code-words. Linear quantization is not optimal since the Human Visual System (HVS) do not perceive light in a linear fashion. Thus, perceptual transfer functions (PTFs) and color pixel representations are used to convert linear light and color values into a non-linear domain, so that they correspond more closely to the response of the human eye. In this work, we measure the visual color differences caused by different PTFs and color representation with 10-bit quantization. Our study encompasses all the visible colors of the BT.2020 gamut at different representative luminance levels. Visual color differences are predicted using a perceptual color error metric (CIE ΔE2000). Results show that visible color distortion can already occur before any type of video compression is performed on the signal and that choosing the right PTF and color representation can greatly reduce these distortions and effectively enhance the quality of experience.},\n  keywords = {brightness;colour displays;data compression;display devices;image colour analysis;image representation;lighting;quantisation (signal);video coding;color representation;standard color pixel representations;high dynamic range video compression;Wide Color Gamut technologies;HDR videos;linear quantization;Human Visual System;linear light;color values;visual color differences;perceptual color error metric;visible color distortion;visual color difference evaluation;representative luminance levels;Color;Quantization (signal);Transfer functions;Pipelines;Standards;Video compression;Europe;HDR;Color difference;Perceptual transfer function;Color pixel representation;Quantization},\n  doi = {10.23919/EUSIPCO.2017.8081455},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347652.pdf},\n}\n\n
\n
\n\n\n
\n With the recent introduction of High Dynamic Range (HDR) and Wide Color Gamut (WCG) technologies, viewers' quality of experience is highly enriched. To distribute HDR videos over a transmission pipeline, color pixels need to be quantized into integer code-words. Linear quantization is not optimal since the Human Visual System (HVS) do not perceive light in a linear fashion. Thus, perceptual transfer functions (PTFs) and color pixel representations are used to convert linear light and color values into a non-linear domain, so that they correspond more closely to the response of the human eye. In this work, we measure the visual color differences caused by different PTFs and color representation with 10-bit quantization. Our study encompasses all the visible colors of the BT.2020 gamut at different representative luminance levels. Visual color differences are predicted using a perceptual color error metric (CIE ΔE2000). Results show that visible color distortion can already occur before any type of video compression is performed on the signal and that choosing the right PTF and color representation can greatly reduce these distortions and effectively enhance the quality of experience.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A machine learning approach to reducing image coding artifacts.\n \n \n \n \n\n\n \n Matsuda, I.; Ishikawa, T.; Kameda, Y.; and Itoh, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1485-1489, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081456,\n  author = {I. Matsuda and T. Ishikawa and Y. Kameda and S. Itoh},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A machine learning approach to reducing image coding artifacts},\n  year = {2017},\n  pages = {1485-1489},\n  abstract = {In this paper, a method for reducing coding artifacts introduced by lossy image compression is proposed. The method is similar to sample adaptive offset (SAO) which is adopted in the H.265/HEVC video coding standard as one of in-loop filtering tools. In the SAO, samples of the reconstructed image are classified into several categories based on some simple algorithms, and an optimum offset value is then added to the samples belonging to each category. Since the classification algorithms are switched on a block-by-block basis, not a negligible amount of side-information must be transmitted to the decoder in addition to the offset values. On the other hand, our method adopts a machine learning technique using a support vector machine (SVM) for the classification process. By applying the common SVM classifier to a whole image, the amount of the side-information can be considerably reduced. Simulation results indicate that the proposed method provides bitrate savings of up to 1.0 % for HD size images degraded through intra frame coding of the H.265/HEVC standard.},\n  keywords = {data compression;image coding;learning (artificial intelligence);support vector machines;video coding;image coding artifacts;lossy image compression;SAO;H.265/HEVC video coding standard;in-loop filtering tools;reconstructed image;classification algorithms;block-by-block basis;offset values;support vector machine;classification process;HD size images;intra frame coding;H.265/HEVC standard;sample adaptive offset;SVM classifier;Support vector machines;Image reconstruction;Image coding;Encoding;Training;Standards;Quantization (signal);Post filtering;coding artifacts;machine learning;support vector machine (SVM);sample adaptive offset (SAO)},\n  doi = {10.23919/EUSIPCO.2017.8081456},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347565.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a method for reducing coding artifacts introduced by lossy image compression is proposed. The method is similar to sample adaptive offset (SAO) which is adopted in the H.265/HEVC video coding standard as one of in-loop filtering tools. In the SAO, samples of the reconstructed image are classified into several categories based on some simple algorithms, and an optimum offset value is then added to the samples belonging to each category. Since the classification algorithms are switched on a block-by-block basis, not a negligible amount of side-information must be transmitted to the decoder in addition to the offset values. On the other hand, our method adopts a machine learning technique using a support vector machine (SVM) for the classification process. By applying the common SVM classifier to a whole image, the amount of the side-information can be considerably reduced. Simulation results indicate that the proposed method provides bitrate savings of up to 1.0 % for HD size images degraded through intra frame coding of the H.265/HEVC standard.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Patch-based image denoising: Probability distribution estimation vs. sparsity prior.\n \n \n \n \n\n\n \n Tran, D.; Li-Thiao-Té, S.; Luong, M.; Le-Tien, T.; and Dibos, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1490-1494, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Patch-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081457,\n  author = {D. Tran and S. Li-Thiao-Té and M. Luong and T. Le-Tien and F. Dibos},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Patch-based image denoising: Probability distribution estimation vs. sparsity prior},\n  year = {2017},\n  pages = {1490-1494},\n  abstract = {Patch-based image denoising can be interpreted under the Bayesian framework which incorporates the image formation model and a prior image distribution. In the sparsity approach, the prior is often assumed to obey an arbitrarily chosen distribution. Our motivation is to estimate the probability directly from the distribution of image patches extracted from good quality images, thanks to a given dictionary and the redundancy of information between many images. In this paper, we provide a scheme to estimate the probability distribution and also an optimized algorithm for denoising. We demonstrate that using the estimated probability distribution as the image prior is more efficient than the state-of-the-art sparsity models for noise removal.},\n  keywords = {Bayes methods;image denoising;image reconstruction;image representation;probability;probability distribution estimation;patch-based image denoising;estimated probability distribution;good quality images;image patches;sparsity approach;prior image distribution;image formation model;Bayesian framework;Dictionaries;Probability distribution;Noise reduction;Databases;Estimation;Noise measurement;Standards;Patch-based;denoising;sparse representation;probability distribution estimation},\n  doi = {10.23919/EUSIPCO.2017.8081457},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347219.pdf},\n}\n\n
\n
\n\n\n
\n Patch-based image denoising can be interpreted under the Bayesian framework which incorporates the image formation model and a prior image distribution. In the sparsity approach, the prior is often assumed to obey an arbitrarily chosen distribution. Our motivation is to estimate the probability directly from the distribution of image patches extracted from good quality images, thanks to a given dictionary and the redundancy of information between many images. In this paper, we provide a scheme to estimate the probability distribution and also an optimized algorithm for denoising. We demonstrate that using the estimated probability distribution as the image prior is more efficient than the state-of-the-art sparsity models for noise removal.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Parameter estimation in spike and slab variational inference for blind image deconvolution.\n \n \n \n \n\n\n \n Serra, J. G.; Mateos, J.; Molina, R.; and Katsaggelos, A. K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1495-1499, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ParameterPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081458,\n  author = {J. G. Serra and J. Mateos and R. Molina and A. K. Katsaggelos},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Parameter estimation in spike and slab variational inference for blind image deconvolution},\n  year = {2017},\n  pages = {1495-1499},\n  abstract = {Most current state of the art blind image deconvolution methods model the underlying image (either in the image or filter space) using sparsity promoting priors and perform inference, that is, image, blur, and parameter estimation using variational approximation. In this paper we propose the use of the spike-and-slab prior model in the filter space and a variational posterior approximation more expressive than mean field. The spike-and-slab prior model, which is the {"}gold-standard{"} in sparse machine learning, has the ability to selectively shrink irrelevant variables while relevant variables are mildly regularized. This allows to discard irrelevant information while preserving important features for the estimation of the blur which results in more precise and less noisy blur kernel estimates. In this paper we present a variational inference algorithm for estimating the blur in the filter space, which is both more efficient than MCMC and more accurate than the standard mean field variational approximation. The parameters of the prior model are automatically estimated together with the blur. Once the blur is estimated, a non-blind image restoration algorithm is used to obtain the sharp image. We prove the efficacy of our method on both synthetically generated and real images.},\n  keywords = {approximation theory;deconvolution;image restoration;inference mechanisms;learning (artificial intelligence);parameter estimation;parameter estimation;slab variational inference;variational posterior approximation;nonblind image restoration algorithm;blind image deconvolution;spike variational inference;sparsity promoting priors;variational approximation;spike-and-slab prior model;sparse machine learning;blur kernel estimates;Estimation;Deconvolution;Slabs;Approximation algorithms;Inference algorithms;Standards;Bayes methods},\n  doi = {10.23919/EUSIPCO.2017.8081458},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346461.pdf},\n}\n\n
\n
\n\n\n
\n Most current state of the art blind image deconvolution methods model the underlying image (either in the image or filter space) using sparsity promoting priors and perform inference, that is, image, blur, and parameter estimation using variational approximation. In this paper we propose the use of the spike-and-slab prior model in the filter space and a variational posterior approximation more expressive than mean field. The spike-and-slab prior model, which is the \"gold-standard\" in sparse machine learning, has the ability to selectively shrink irrelevant variables while relevant variables are mildly regularized. This allows to discard irrelevant information while preserving important features for the estimation of the blur which results in more precise and less noisy blur kernel estimates. In this paper we present a variational inference algorithm for estimating the blur in the filter space, which is both more efficient than MCMC and more accurate than the standard mean field variational approximation. The parameters of the prior model are automatically estimated together with the blur. Once the blur is estimated, a non-blind image restoration algorithm is used to obtain the sharp image. We prove the efficacy of our method on both synthetically generated and real images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Billboard deformation via 3D voxel by using optimization for free-viewpoint system.\n \n \n \n \n\n\n \n Nonaka, K.; Yao, Q.; Sabirin, H.; Chen, J.; Sankoh, H.; and Naito, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1500-1504, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BillboardPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081459,\n  author = {K. Nonaka and Q. Yao and H. Sabirin and J. Chen and H. Sankoh and S. Naito},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Billboard deformation via 3D voxel by using optimization for free-viewpoint system},\n  year = {2017},\n  pages = {1500-1504},\n  abstract = {A free viewpoint application has been developed that yields an immersive user experience. The free viewpoint approach called the {"}billboard methodis{"} suitable for displaying a synthesized 3D view in a mobile device, but it suffers from the limitation that a billboard cannot present an accurate impression of depth for a foreground object, and it gives users an unacceptable impression from certain virtual viewpoints. To solve this problem, we propose the optimal deformation of the billboard. The deformation is designed as a mapping of grid points in the input billboard silhouette to produce an optimal silhouette from an accurate voxel model of the object. We formulate and solve this procedure as a nonlinear optimization problem based on a grid-point constraint and some a priori information. Our results show that the optimal deformation is produced by the proposed method, which generates a synthesized virtual image having a natural appearance.},\n  keywords = {deformation;human factors;nonlinear programming;virtual reality;immersive user experience;unacceptable impression;natural appearance;foreground object;mobile device;synthesized 3D view;free viewpoint approach;free viewpoint application;free-viewpoint system;billboard deformation;synthesized virtual image;grid-point constraint;nonlinear optimization problem;accurate voxel model;optimal silhouette;input billboard silhouette;grid points;optimal deformation;virtual viewpoints;Cameras;Three-dimensional displays;Solid modeling;Image edge detection;Computational modeling;Optimization;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081459},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345898.pdf},\n}\n\n
\n
\n\n\n
\n A free viewpoint application has been developed that yields an immersive user experience. The free viewpoint approach called the \"billboard methodis\" suitable for displaying a synthesized 3D view in a mobile device, but it suffers from the limitation that a billboard cannot present an accurate impression of depth for a foreground object, and it gives users an unacceptable impression from certain virtual viewpoints. To solve this problem, we propose the optimal deformation of the billboard. The deformation is designed as a mapping of grid points in the input billboard silhouette to produce an optimal silhouette from an accurate voxel model of the object. We formulate and solve this procedure as a nonlinear optimization problem based on a grid-point constraint and some a priori information. Our results show that the optimal deformation is produced by the proposed method, which generates a synthesized virtual image having a natural appearance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A new adaptive video SRR algorithm with improved robustness to innovations.\n \n \n \n \n\n\n \n Borsoi, R. A.; Costa, G. H.; and Bermudez, J. C. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1505-1509, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081460,\n  author = {R. A. Borsoi and G. H. Costa and J. C. M. Bermudez},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A new adaptive video SRR algorithm with improved robustness to innovations},\n  year = {2017},\n  pages = {1505-1509},\n  abstract = {In this paper, a new video super-resolution reconstruction (SRR) method with improved robustness to outliers is proposed. By studying the proximal point cost function representation of the R-LMS iterative equation, a better understanding of its performance is attained, which allows us to devise a new algorithm with improved robustness, while maintaining comparable quality and computational cost. Monte Carlo simulation results illustrate that the proposed method outperforms the traditional and regularized versions of the LMS algorithm.},\n  keywords = {image reconstruction;image resolution;iterative methods;least mean squares methods;Monte Carlo methods;video signal processing;LMS algorithm;adaptive video SRR algorithm;video super-resolution reconstruction method;proximal point cost function representation;R-LMS iterative equation;Monte Carlo simulation;Signal processing algorithms;Robustness;Image reconstruction;Technological innovation;Cost function;Image resolution;Computational efficiency;Super-resolution;R-LMS;outliers},\n  doi = {10.23919/EUSIPCO.2017.8081460},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343669.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a new video super-resolution reconstruction (SRR) method with improved robustness to outliers is proposed. By studying the proximal point cost function representation of the R-LMS iterative equation, a better understanding of its performance is attained, which allows us to devise a new algorithm with improved robustness, while maintaining comparable quality and computational cost. Monte Carlo simulation results illustrate that the proposed method outperforms the traditional and regularized versions of the LMS algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Phase contrast computed tomography using continuous THz source.\n \n \n \n \n\n\n \n Suga, M.; Sasaki, Y.; Sasahara, T.; Yuasa, T.; and Otani, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1510-1513, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PhasePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081461,\n  author = {M. Suga and Y. Sasaki and T. Sasahara and T. Yuasa and C. Otani},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Phase contrast computed tomography using continuous THz source},\n  year = {2017},\n  pages = {1510-1513},\n  abstract = {We propose a novel computed tomography (CT) imaging method based on phase-contrast using a continuous wave (CW) THz source with a frequency of 0.54 THz. The system acquires phase-shift by phase modulation technique using Mach-Zehnder interferometer at each data point, and collects projections of the phase-shift from multiple directions over 360 degrees to finally reconstruct a spatial distribution of refractive index. We construct a preliminary system for proof of the concept, and perform an imaging experiment using a polystyrene foam phantom. It was shown that the THz-CT produces an artifact-free and quantitative reconstruction at a spatial resolution of a few mm.},\n  keywords = {biomedical optical imaging;computerised tomography;image reconstruction;image resolution;Mach-Zehnder interferometers;medical image processing;phantoms;phase modulation;refractive index;terahertz wave imaging;phase contrast computed tomography;continuous THz source;continuous wave;phase-shift;phase modulation technique;Mach-Zehnder interferometer;image reconstruction;refractive index;polystyrene foam phantom;Image reconstruction;Computed tomography;Refractive index;Optimized production technology;Europe;Signal processing;phase contrast;computed tomography;Mach-Zehnder interferometer;refractive index},\n  doi = {10.23919/EUSIPCO.2017.8081461},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340863.pdf},\n}\n\n
\n
\n\n\n
\n We propose a novel computed tomography (CT) imaging method based on phase-contrast using a continuous wave (CW) THz source with a frequency of 0.54 THz. The system acquires phase-shift by phase modulation technique using Mach-Zehnder interferometer at each data point, and collects projections of the phase-shift from multiple directions over 360 degrees to finally reconstruct a spatial distribution of refractive index. We construct a preliminary system for proof of the concept, and perform an imaging experiment using a polystyrene foam phantom. It was shown that the THz-CT produces an artifact-free and quantitative reconstruction at a spatial resolution of a few mm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Heuristics for tile parallelism in HEVC.\n \n \n \n \n\n\n \n Koziri, M.; Papadopoulos, P. K.; Tziritas, N.; Giachoudis, N.; Loukopoulos, T.; Khan, S. U.; and Stamoulis, G. I.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1514-1518, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"HeuristicsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081462,\n  author = {M. Koziri and P. K. Papadopoulos and N. Tziritas and N. Giachoudis and T. Loukopoulos and S. U. Khan and G. I. Stamoulis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Heuristics for tile parallelism in HEVC},\n  year = {2017},\n  pages = {1514-1518},\n  abstract = {HEVC has emerged as the new video coding standard promising increased compression ratios compared to its predecessors. This performance improvement comes at a high computational cost. For this reason, HEVC offers three coarse grained parallelization potentials namely, wave front, slices and tiles. In this paper we focus on tile parallelism which is a relatively new concept with its effects not yet fully explored. Particularly, we investigate the problem of partitioning a frame into tiles so that in a resulting one on one tile-CPU core assignment the cores are load balanced, thus, maximum speedup can be achieved. We propose various heuristics for the problem with a focus on low delay coding and evaluate them against state of the art approaches. Results demonstrate that particular heuristic combinations clearly outperform their counterparts in the literature.},\n  keywords = {data compression;video coding;heuristics;tile parallelism;HEVC;video coding standard;performance improvement;tile-CPU core assignment the cores;low delay coding;coarse grained parallelization;compression ratios;Encoding;Parallel processing;Partitioning algorithms;Estimation;Video coding;Delays;Signal processing algorithms;video coding;tiles;parallelism;HEVC;partitioning},\n  doi = {10.23919/EUSIPCO.2017.8081462},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347698.pdf},\n}\n\n
\n
\n\n\n
\n HEVC has emerged as the new video coding standard promising increased compression ratios compared to its predecessors. This performance improvement comes at a high computational cost. For this reason, HEVC offers three coarse grained parallelization potentials namely, wave front, slices and tiles. In this paper we focus on tile parallelism which is a relatively new concept with its effects not yet fully explored. Particularly, we investigate the problem of partitioning a frame into tiles so that in a resulting one on one tile-CPU core assignment the cores are load balanced, thus, maximum speedup can be achieved. We propose various heuristics for the problem with a focus on low delay coding and evaluate them against state of the art approaches. Results demonstrate that particular heuristic combinations clearly outperform their counterparts in the literature.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance and energy consumption analysis of the X265 video encoder.\n \n \n \n \n\n\n \n Silveira, D.; Porto, M.; and Bampi, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1519-1523, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081463,\n  author = {D. Silveira and M. Porto and S. Bampi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Performance and energy consumption analysis of the X265 video encoder},\n  year = {2017},\n  pages = {1519-1523},\n  abstract = {The ×265 video encoder aims at improving the speed and the computational efficiency of HEVC encoders implementation. In this paper we present a detailed energy consumption analysis, considering the consumption components of CPU, cache memories and main memory, for all ×265 presets executing in a multicore system. Ten HD 1080p test video sequences with different motion and brightness characteristics are used in the experiments. Three tools are used to obtain the results: CACTI, PCM and Perf. To get more reliable time/energy results, 10 executions were performed for each preset. The results show that fast presets are 47× faster than slower presets. However, slower presets use robust configurations and achieve large reductions in bitrate. Due to this, the ultrafast preset has a bitrate 45% higher than placebo preset. Furthermore, the system energy consumption increases 45×, from ultrafast preset to placebo preset. Our experiments clearly present the dependence between bitrate and energy consumption for all encoding presets, which allows us to choose the best bitrate/energy trade-off for each platform at hand.},\n  keywords = {energy consumption;high definition video;image sequences;power aware computing;video coding;ultrafast preset;placebo preset;system energy consumption analysis;HEVC encoders;CPU consumption components;X265 presets;CACTI;PCM;Perf;brightness characteristics;HD 1080p test video sequences;multicore system;main memory;cache memories;X265 video encoder;bitrate/energy trade-off;encoding presets;Energy consumption;Tools;Encoding;Bit rate;Cache memory;Phase change materials;Software;Video encoding;HEVC;x265;energy consumption;multicore system},\n  doi = {10.23919/EUSIPCO.2017.8081463},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347537.pdf},\n}\n\n
\n
\n\n\n
\n The ×265 video encoder aims at improving the speed and the computational efficiency of HEVC encoders implementation. In this paper we present a detailed energy consumption analysis, considering the consumption components of CPU, cache memories and main memory, for all ×265 presets executing in a multicore system. Ten HD 1080p test video sequences with different motion and brightness characteristics are used in the experiments. Three tools are used to obtain the results: CACTI, PCM and Perf. To get more reliable time/energy results, 10 executions were performed for each preset. The results show that fast presets are 47× faster than slower presets. However, slower presets use robust configurations and achieve large reductions in bitrate. Due to this, the ultrafast preset has a bitrate 45% higher than placebo preset. Furthermore, the system energy consumption increases 45×, from ultrafast preset to placebo preset. Our experiments clearly present the dependence between bitrate and energy consumption for all encoding presets, which allows us to choose the best bitrate/energy trade-off for each platform at hand.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Edge-aware depth motion estimation — A complexity reduction scheme for 3D-HEVC.\n \n \n \n\n\n \n Sanchez, G.; Saldanha, M.; Zatt, B.; Porto, M.; Agostini, L.; and Marcon, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1524-1528, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081464,\n  author = {G. Sanchez and M. Saldanha and B. Zatt and M. Porto and L. Agostini and C. Marcon},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Edge-aware depth motion estimation — A complexity reduction scheme for 3D-HEVC},\n  year = {2017},\n  pages = {1524-1528},\n  abstract = {This work presents the Edge-Aware Depth Motion Estimation (E-ADME), a complexity reduction scheme developed for depth maps coding on 3D High Efficiency Video Coding (3D-HEVC). This scheme focuses on obtaining an execution time reduction while keeping a high quality of the encoded depth map. E-ADME starts classifying each encoding depth block as edge or homogeneous. If the block is classified as an edge, then the Test Zone Search (TZS) is applied because edges require expensive comparisons to find the best block match. Otherwise, the scheme applies an Iterative-Small Diamond Search Pattern (I-SDSP), which is a lightweight center-biased algorithm for efficient encoding of homogeneous blocks. The proposed solution was capable of achieving a time saving of 6.9% in depth maps coding, increasing less than 0.15% the BD-rate of the synthesized view.},\n  keywords = {computational complexity;motion estimation;video coding;execution time reduction;encoded depth map;E-ADME;encoding depth block;depth maps;complexity reduction scheme;3D-HEVC;3D High Efficiency Video Coding;edge-aware depth motion estimation;Encoding;Algorithm design and analysis;Signal processing algorithms;Complexity theory;Image edge detection;Motion estimation;Prediction algorithms;3D-HEVC;Motion Estimation;Depth Maps;Timesaving;Complexity Reduction},\n  doi = {10.23919/EUSIPCO.2017.8081464},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n This work presents the Edge-Aware Depth Motion Estimation (E-ADME), a complexity reduction scheme developed for depth maps coding on 3D High Efficiency Video Coding (3D-HEVC). This scheme focuses on obtaining an execution time reduction while keeping a high quality of the encoded depth map. E-ADME starts classifying each encoding depth block as edge or homogeneous. If the block is classified as an edge, then the Test Zone Search (TZS) is applied because edges require expensive comparisons to find the best block match. Otherwise, the scheme applies an Iterative-Small Diamond Search Pattern (I-SDSP), which is a lightweight center-biased algorithm for efficient encoding of homogeneous blocks. The proposed solution was capable of achieving a time saving of 6.9% in depth maps coding, increasing less than 0.15% the BD-rate of the synthesized view.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Frequency spectrum regularization for pattern noise removal based on image decomposition.\n \n \n \n \n\n\n \n Shirai, K.; Ono, S.; and Okuda, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1529-1533, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"FrequencyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081465,\n  author = {K. Shirai and S. Ono and M. Okuda},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Frequency spectrum regularization for pattern noise removal based on image decomposition},\n  year = {2017},\n  pages = {1529-1533},\n  abstract = {This paper deals with a mixed norm of complex vectors, i.e., the sum of amplitude spectra, and its minimization problem. A combination of this mixed norm and image decomposition problem works well for reduction and decomposition of pattern noise that arises when scanning old photographs with granulated surface. Generally, the spectral distribution of natural images decreases smoothly from low frequency band toward high frequency band, while that of pattern noise is distributed sparsely. Therefore, we assume that an observed image consists of a latent image component and a pattern noise component, and characterize them by using the total variation function and the proposed function, respectively. This enables a reasonable decomposition of the two components. Compared to similar decomposition methods such as Robust PCA, our method has a good decomposition accuracy for this task, and low computational cost.},\n  keywords = {image denoising;frequency spectrum regularization;pattern noise removal;complex vectors;image decomposition problem;old photographs;granulated surface;spectral distribution;natural images;latent image component;pattern noise component;low-frequency band;high-frequency band;Mathematical model;Convex functions;Minimization;Signal processing algorithms;Europe;Signal processing;Data models},\n  doi = {10.23919/EUSIPCO.2017.8081465},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347198.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with a mixed norm of complex vectors, i.e., the sum of amplitude spectra, and its minimization problem. A combination of this mixed norm and image decomposition problem works well for reduction and decomposition of pattern noise that arises when scanning old photographs with granulated surface. Generally, the spectral distribution of natural images decreases smoothly from low frequency band toward high frequency band, while that of pattern noise is distributed sparsely. Therefore, we assume that an observed image consists of a latent image component and a pattern noise component, and characterize them by using the total variation function and the proposed function, respectively. This enables a reasonable decomposition of the two components. Compared to similar decomposition methods such as Robust PCA, our method has a good decomposition accuracy for this task, and low computational cost.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive colour-space selection in high efficiency video coding.\n \n \n \n \n\n\n \n Strutz, T.; and Leipnitz, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1534-1538, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081466,\n  author = {T. Strutz and A. Leipnitz},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive colour-space selection in high efficiency video coding},\n  year = {2017},\n  pages = {1534-1538},\n  abstract = {Recent developments in the standardisation of High Efficiency Video Coding (HEVC) have shown that the block-wise activation/deactivation of a colour transform can significantly improve the compression performance. This coding tool is based on a fixed colour space which is either YCgCo in lossy compression mode or YCgCo-R in the lossless mode. The proposed method shows that the performance can be increased even more when the colour space is not fixed but selected dependent on the image characteristic. Improvements of more than 2% can be achieved in lossless intra coding if the colour space is automatically chosen once for the entire image. In lossy intra compression, the performance can also be increased if a proper colour space is chosen.},\n  keywords = {data compression;image colour analysis;video coding;adaptive colour-space selection;high efficiency video coding;compression performance;coding tool;fixed colour space;lossy compression mode;lossless mode;lossless intra coding;lossy intra compression;block-wise activation-deactivation;colour transform;Image color analysis;Transforms;Image coding;Color;Tools;Standards;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081466},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346995.pdf},\n}\n\n
\n
\n\n\n
\n Recent developments in the standardisation of High Efficiency Video Coding (HEVC) have shown that the block-wise activation/deactivation of a colour transform can significantly improve the compression performance. This coding tool is based on a fixed colour space which is either YCgCo in lossy compression mode or YCgCo-R in the lossless mode. The proposed method shows that the performance can be increased even more when the colour space is not fixed but selected dependent on the image characteristic. Improvements of more than 2% can be achieved in lossless intra coding if the colour space is automatically chosen once for the entire image. In lossy intra compression, the performance can also be increased if a proper colour space is chosen.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n HDR image compression with optimized JPEG coding.\n \n \n \n \n\n\n \n Zaid, A. O.; and Houimli, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1539-1543, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"HDRPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081467,\n  author = {A. O. Zaid and A. Houimli},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {HDR image compression with optimized JPEG coding},\n  year = {2017},\n  pages = {1539-1543},\n  abstract = {This paper presents an efficient compression system adapted to High Dynamic Range (HDR) images. First a Tone Mapping Operator (TMO) generates the Low Dynamic Range (LDR) version of the HDR content together with its extra information. The obtained LDR image is encoded using an optimized JPEG coding scheme, whereas the extra information is encoded as side data. Specifically, the optimized JPEG based algorithm constructs near-optimal rate-distortion quantization tables using DCT coefficient distribution statistics and Lagrangian optimization approach. To ensure accurate HDR reconstruction, the extra information is compressed with conventional JPEG encoder using the highest quality level. The aim of the proposed HDR coding system is twofold. First, it performs a bit allocation mechanism, to achieve near-optimal rate control. Second, it maintains the backward compatibility with the conventional JPEG. Experiments show that the compression performance of the proposed HDR coder outperforms that of the reference method.},\n  keywords = {data compression;discrete cosine transforms;image coding;image reconstruction;optimisation;quantisation (signal);rate distortion theory;statistical distributions;HDR coding system;near-optimal rate control;HDR image compression;High Dynamic Range images;Tone Mapping Operator;LDR image;optimized JPEG coding scheme;near-optimal rate-distortion quantization tables;DCT coefficient distribution statistics;Lagrangian optimization approach;Low Dynamic Range image;HDR reconstruction;bit allocation mechanism;Transform coding;Image coding;Discrete cosine transforms;Bit rate;Rate-distortion;Quantization (signal);Distortion},\n  doi = {10.23919/EUSIPCO.2017.8081467},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346606.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents an efficient compression system adapted to High Dynamic Range (HDR) images. First a Tone Mapping Operator (TMO) generates the Low Dynamic Range (LDR) version of the HDR content together with its extra information. The obtained LDR image is encoded using an optimized JPEG coding scheme, whereas the extra information is encoded as side data. Specifically, the optimized JPEG based algorithm constructs near-optimal rate-distortion quantization tables using DCT coefficient distribution statistics and Lagrangian optimization approach. To ensure accurate HDR reconstruction, the extra information is compressed with conventional JPEG encoder using the highest quality level. The aim of the proposed HDR coding system is twofold. First, it performs a bit allocation mechanism, to achieve near-optimal rate control. Second, it maintains the backward compatibility with the conventional JPEG. Experiments show that the compression performance of the proposed HDR coder outperforms that of the reference method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The iterative 2D windowed Fourier transform: A SOS approach to speckle noise reduction in digital holography.\n \n \n \n \n\n\n \n Montrésor, S.; Picart, P.; and Karray, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1544-1548, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081468,\n  author = {S. Montrésor and P. Picart and M. Karray},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {The iterative 2D windowed Fourier transform: A SOS approach to speckle noise reduction in digital holography},\n  year = {2017},\n  pages = {1544-1548},\n  abstract = {This paper addresses the problem of the reduction of speckle noise corrupting phase images from laser digital holography technique. It present an iterative denoising algorithm based on the 2-D windowed Fourier Transform. The algorithm is a new approach based on the SOS procedure recently proposed by Y. Romano and M. Elad adapted to phase processing. The approach is proposed in the case of the 2-D windowed Fourier Transform algorithm applied to phase maps which constitutes the state of the art in the field of digital holography. The reason is that the sum and difference operations used in the SOS procedure cannot be computed directly on phase maps but using a sine and cosine representation in order to avoid 2n phase jumps. Results on simulated phase maps including realistic noise conditions encountered in digital holography show the advantage of the proposed iterative approach. The paper proposes the application of the method to denoising of phase images from digital three color holography applied to cracks characterization of a composite materiel under mechanical strength test.},\n  keywords = {Fourier transforms;holography;iterative methods;signal denoising;speckle;simulated phase maps;realistic noise conditions;iterative approach;phase images;digital three color holography;SOS approach;noise reduction;speckle noise;laser digital holography technique;iterative denoising algorithm;SOS procedure;2n phase jumps;iterative 2D windowed Fourier Transform algorithm;Holography;Signal to noise ratio;Speckle;Signal processing algorithms;Noise reduction;Fourier transforms;Noise measurement;denoising;image processing;speckle noise;phase;iterative algorithms;windowed Fourier Transform;digital holography},\n  doi = {10.23919/EUSIPCO.2017.8081468},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345517.pdf},\n}\n\n
\n
\n\n\n
\n This paper addresses the problem of the reduction of speckle noise corrupting phase images from laser digital holography technique. It present an iterative denoising algorithm based on the 2-D windowed Fourier Transform. The algorithm is a new approach based on the SOS procedure recently proposed by Y. Romano and M. Elad adapted to phase processing. The approach is proposed in the case of the 2-D windowed Fourier Transform algorithm applied to phase maps which constitutes the state of the art in the field of digital holography. The reason is that the sum and difference operations used in the SOS procedure cannot be computed directly on phase maps but using a sine and cosine representation in order to avoid 2n phase jumps. Results on simulated phase maps including realistic noise conditions encountered in digital holography show the advantage of the proposed iterative approach. The paper proposes the application of the method to denoising of phase images from digital three color holography applied to cracks characterization of a composite materiel under mechanical strength test.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MAP tomographic reconstruction with a spatially adaptive hierarchical image model.\n \n \n \n \n\n\n \n Nikou, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1549-1553, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MAPPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081469,\n  author = {C. Nikou},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {MAP tomographic reconstruction with a spatially adaptive hierarchical image model},\n  year = {2017},\n  pages = {1549-1553},\n  abstract = {A method for penalized likelihood tomographic reconstruction is presented which is based on a spatially adaptive stochastic image model. The model imposes onto the image a smoothing Gaussian prior whose parameters follow a Gamma distribution. Three variations of the model are examined: (i) a stationary model, where the Gamma distribution has the same constant parameter for the entire image, (ii) a non stationary model, where this parameter varies with respect to location and (iii) a non stationary directional model where the parameter varies also with respect to orientation (horizontal or vertical direction). In all cases, the MAP criterion provides a closed form solution for both the unknown image and the parameters of the model. Numerical experiments showed that the reconstructions obtained using the proposed image priors outperform the state of the art EM based methods.},\n  keywords = {gamma distribution;image reconstruction;iterative methods;maximum likelihood estimation;stochastic processes;constant parameter;MAP criterion;image priors;MAP tomographic reconstruction;spatially adaptive hierarchical image model;penalized likelihood tomographic reconstruction;spatially adaptive stochastic image model;Gamma distribution;stationary model;image reconstructions;EM based methods;smoothing Gaussian prior;horizontal direction;vertical direction;closed form solution;Adaptation models;Image reconstruction;Computational modeling;Tomography;Mathematical model;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081469},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570348171.pdf},\n}\n\n
\n
\n\n\n
\n A method for penalized likelihood tomographic reconstruction is presented which is based on a spatially adaptive stochastic image model. The model imposes onto the image a smoothing Gaussian prior whose parameters follow a Gamma distribution. Three variations of the model are examined: (i) a stationary model, where the Gamma distribution has the same constant parameter for the entire image, (ii) a non stationary model, where this parameter varies with respect to location and (iii) a non stationary directional model where the parameter varies also with respect to orientation (horizontal or vertical direction). In all cases, the MAP criterion provides a closed form solution for both the unknown image and the parameters of the model. Numerical experiments showed that the reconstructions obtained using the proposed image priors outperform the state of the art EM based methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Topographical pattern analysis using wavelet based coherence connectivity estimation in the distinction of meditation and non-meditation EEG.\n \n \n \n \n\n\n \n Shaw, L.; and Routray, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1554-1558, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"TopographicalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081470,\n  author = {L. Shaw and A. Routray},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Topographical pattern analysis using wavelet based coherence connectivity estimation in the distinction of meditation and non-meditation EEG},\n  year = {2017},\n  pages = {1554-1558},\n  abstract = {Classification of EEG signal involved in a particular cognitive activity has found many application in brain-computer interface (BCI). In specific, use of classification algorithms to highly multivariate non-stationary recordings like EEG is a challenging and promising task. This study investigated two sub-stantial novelty of the topics, (1) Distinction between meditation (Kriya Yoga) and non-meditation state allied EEG, (2) Characterization of the underlying mechanism of cognitive process that is associated with meditation using topographical analysis. The topographic wavelet coherence based brain connectivity between two different groups is shown. Two groups of data, one with 23 meditators (meditator group) and other with ten non-meditators (controlled group) are analyzed. The spatial distribution between two groups can be well distinguished by the topographical approach. The quantification has been done by the colour intensity embedded in the topographical plots. The wavelet coherence is found to be a different parameter to represent the distinctiveness between two groups. The time-frequency quantification regarding wavelet coherence spectrum is shown the unique patterns among meditators and non-meditators. Thus time-frequency based wavelet coherence has found to be an unusual brain pattern in the distinction between meditators and non-meditators.},\n  keywords = {bioelectric potentials;brain-computer interfaces;cognition;electroencephalography;medical signal processing;neurophysiology;signal classification;time-frequency analysis;wavelet transforms;topographical pattern analysis;brain-computer interface;EEG signal classification;multivariate nonstationary recordings;cognitive activity estimation;topographic wavelet coherence based brain connectivity estimation;time-frequency based wavelet coherence connectivity estimation;Coherence;Electroencephalography;Wavelet analysis;Time series analysis;Wavelet transforms;Time-frequency analysis;Estimation},\n  doi = {10.23919/EUSIPCO.2017.8081470},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347702.pdf},\n}\n\n
\n
\n\n\n
\n Classification of EEG signal involved in a particular cognitive activity has found many application in brain-computer interface (BCI). In specific, use of classification algorithms to highly multivariate non-stationary recordings like EEG is a challenging and promising task. This study investigated two sub-stantial novelty of the topics, (1) Distinction between meditation (Kriya Yoga) and non-meditation state allied EEG, (2) Characterization of the underlying mechanism of cognitive process that is associated with meditation using topographical analysis. The topographic wavelet coherence based brain connectivity between two different groups is shown. Two groups of data, one with 23 meditators (meditator group) and other with ten non-meditators (controlled group) are analyzed. The spatial distribution between two groups can be well distinguished by the topographical approach. The quantification has been done by the colour intensity embedded in the topographical plots. The wavelet coherence is found to be a different parameter to represent the distinctiveness between two groups. The time-frequency quantification regarding wavelet coherence spectrum is shown the unique patterns among meditators and non-meditators. Thus time-frequency based wavelet coherence has found to be an unusual brain pattern in the distinction between meditators and non-meditators.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Person re-identification based on deep multi-instance learning.\n \n \n \n \n\n\n \n Varga, D.; and Szirányi, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1559-1563, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PersonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081471,\n  author = {D. Varga and T. Szirányi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Person re-identification based on deep multi-instance learning},\n  year = {2017},\n  pages = {1559-1563},\n  abstract = {Person re-identification is one of the widely studied research topic in the fields of computer vision and pattern recognition. In this paper, we present a deep multi-instance learning approach for person re-identification. Since most publicly available databases for pedestrian re-identification are not enough big, over-fitting problems occur in deep learning architectures. To tackle this problem, person re-identification is expressed as a deep multi-instance learning issue. Therefore, a multi-scale feature learning process is introduced which is driven by optimizing a novel cost function. We report on experiments and comparisons to other state-of-the-art algorithms using publicly available databases such as VIPeR and ETHZ.},\n  keywords = {computer vision;convolution;feature extraction;learning (artificial intelligence);neural net architecture;pedestrian re-identification;deep learning architectures;person re-identification;computer vision;pattern recognition;deep multiinstance learning approach;multiscale feature learning;CNN architecture;Feature extraction;Training;Measurement;Computer architecture;Cameras;Probes;Machine learning},\n  doi = {10.23919/EUSIPCO.2017.8081471},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347386.pdf},\n}\n\n
\n
\n\n\n
\n Person re-identification is one of the widely studied research topic in the fields of computer vision and pattern recognition. In this paper, we present a deep multi-instance learning approach for person re-identification. Since most publicly available databases for pedestrian re-identification are not enough big, over-fitting problems occur in deep learning architectures. To tackle this problem, person re-identification is expressed as a deep multi-instance learning issue. Therefore, a multi-scale feature learning process is introduced which is driven by optimizing a novel cost function. We report on experiments and comparisons to other state-of-the-art algorithms using publicly available databases such as VIPeR and ETHZ.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SparkDict: A fast dictionary learning algorithm.\n \n \n \n \n\n\n \n Schnier, T.; Bockelmann, C.; and Dekorsy, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1564-1568, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SparkDict:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081472,\n  author = {T. Schnier and C. Bockelmann and A. Dekorsy},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {SparkDict: A fast dictionary learning algorithm},\n  year = {2017},\n  pages = {1564-1568},\n  abstract = {For the always increasing amount of data new tools are needed to effectively harvest important information out of them. One of the core fields for data mining is Dictionary Learning, the search for a sparse representation of given data, which is widely used in signal processing and machine learning. In this paper we present a new algorithm in this field that is based on random projections of the data. In particular, we show that our proposition needs a lot less training samples and is a lot faster to achieve the same dictionary accuracy as state of the art algorithms, especially in the medium to high sparsity regions. As the spark, the minimum number of linear dependent columns of a matrix, plays an important role in the design of our contribution, we coined our contribution SparkDict.},\n  keywords = {data mining;image representation;learning (artificial intelligence);sparse matrices;data mining;sparse representation;signal processing;machine learning;high sparsity regions;SparkDict;fast dictionary learning algorithm;random data projections;Dictionaries;Signal processing algorithms;Machine learning;Sparks;Signal processing;Sparse matrices;Algorithm design and analysis;Dictionary Learning;Spark;Sparsity},\n  doi = {10.23919/EUSIPCO.2017.8081472},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341620.pdf},\n}\n\n
\n
\n\n\n
\n For the always increasing amount of data new tools are needed to effectively harvest important information out of them. One of the core fields for data mining is Dictionary Learning, the search for a sparse representation of given data, which is widely used in signal processing and machine learning. In this paper we present a new algorithm in this field that is based on random projections of the data. In particular, we show that our proposition needs a lot less training samples and is a lot faster to achieve the same dictionary accuracy as state of the art algorithms, especially in the medium to high sparsity regions. As the spark, the minimum number of linear dependent columns of a matrix, plays an important role in the design of our contribution, we coined our contribution SparkDict.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Penalty learning for changepoint detection.\n \n \n \n \n\n\n \n Truong, C.; Gudre, L.; and Vayatis, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1569-1573, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PenaltyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081473,\n  author = {C. Truong and L. Gudre and N. Vayatis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Penalty learning for changepoint detection},\n  year = {2017},\n  pages = {1569-1573},\n  abstract = {We consider the problem of signal segmentation in the setup of supervised learning. The supervision lies here in the existence of labelled change points in a historical database of similar signals. Typical segmentation techniques rely on a penalized least square procedure where the smoothing parameter is fixed arbitrarily. We introduce the alpin (Adaptive Linear Penalty INference) algorithm to tune automatically the smoothing parameter. ALPIN has linear complexity with respect to the sample size and turns out to be robust with respect to noise and diverse annotation strategies. Numerical experiments reveal the efficiency of ALPIN compared to state-of-the-art methods.},\n  keywords = {inference mechanisms;learning (artificial intelligence);least squares approximations;signal processing;smoothing methods;penalized least square procedure;smoothing parameter;linear complexity;diverse annotation strategies;changepoint detection;signal segmentation;supervised learning;penalty learning;adaptive linear penalty inference;ALPIN algorithm;Smoothing methods;Signal processing algorithms;Complexity theory;Measurement;Signal processing;Databases},\n  doi = {10.23919/EUSIPCO.2017.8081473},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346413.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of signal segmentation in the setup of supervised learning. The supervision lies here in the existence of labelled change points in a historical database of similar signals. Typical segmentation techniques rely on a penalized least square procedure where the smoothing parameter is fixed arbitrarily. We introduce the alpin (Adaptive Linear Penalty INference) algorithm to tune automatically the smoothing parameter. ALPIN has linear complexity with respect to the sample size and turns out to be robust with respect to noise and diverse annotation strategies. Numerical experiments reveal the efficiency of ALPIN compared to state-of-the-art methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An effective feature selection method based on pair-wise feature proximity for high dimensional low sample size data.\n \n \n \n \n\n\n \n Happy, S. L.; Mohanty, R.; and Routray, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1574-1578, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081474,\n  author = {S. L. Happy and R. Mohanty and A. Routray},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An effective feature selection method based on pair-wise feature proximity for high dimensional low sample size data},\n  year = {2017},\n  pages = {1574-1578},\n  abstract = {Feature selection has been studied widely in the literature. However, the efficacy of the selection criteria for low sample size applications is neglected in most cases. Most of the existing feature selection criteria are based on the sample similarity. However, the distance measures become insignificant for high dimensional low sample size (HDLSS) data. Moreover, the variance of a feature with a few samples is pointless unless it represents the data distribution efficiently. Instead of looking at the samples in groups, we evaluate their efficiency based on pair-wise fashion. In our investigation, we noticed that considering a pair of samples at a time and selecting the features that bring them closer or put them far away is a better choice for feature selection. Experimental results on benchmark data sets demonstrate the effectiveness of the proposed method with low sample size, which outperforms many other state-of-the-art feature selection methods.},\n  keywords = {feature selection;pattern classification;high dimensional low sample size data;sample similarity;data distribution;feature selection methods;pairwise feature proximity;feature selection criteria;HDLSS data;Feature extraction;Euclidean distance;Europe;Signal processing;Electronic mail;Electrical engineering;Feature selection;pair-wise feature proximity;high dimensional low sample size data},\n  doi = {10.23919/EUSIPCO.2017.8081474},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347049.pdf},\n}\n\n
\n
\n\n\n
\n Feature selection has been studied widely in the literature. However, the efficacy of the selection criteria for low sample size applications is neglected in most cases. Most of the existing feature selection criteria are based on the sample similarity. However, the distance measures become insignificant for high dimensional low sample size (HDLSS) data. Moreover, the variance of a feature with a few samples is pointless unless it represents the data distribution efficiently. Instead of looking at the samples in groups, we evaluate their efficiency based on pair-wise fashion. In our investigation, we noticed that considering a pair of samples at a time and selecting the features that bring them closer or put them far away is a better choice for feature selection. Experimental results on benchmark data sets demonstrate the effectiveness of the proposed method with low sample size, which outperforms many other state-of-the-art feature selection methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dictionary learning for spontaneous neural activity modeling.\n \n \n \n \n\n\n \n Troullinou, E.; Tsagkatakis, G.; Palagina, G.; Papadopouli, M.; Smirnakis, S. M.; and Tsakalides, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1579-1583, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DictionaryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081475,\n  author = {E. Troullinou and G. Tsagkatakis and G. Palagina and M. Papadopouli and S. M. Smirnakis and P. Tsakalides},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Dictionary learning for spontaneous neural activity modeling},\n  year = {2017},\n  pages = {1579-1583},\n  abstract = {Modeling the activity of an ensemble of neurons can provide critical insights into the workings of the brain. In this work we examine if learning based signal modeling can contribute to a high quality modeling of neuronal signal data. To that end, we employ the sparse coding and dictionary learning schemes for capturing the behavior of neuronal responses into a small number of representative prototypical signals. Performance is measured by the reconstruction quality of clean and noisy test signals, which serves as an indicator of the generalization and discrimination capabilities of the learned dictionaries. To validate the merits of the proposed approach, a novel dataset of the actual recordings from 183 neurons from the primary visual cortex of a mouse in early postnatal development was developed and investigated. The results demonstrate that high quality modeling of testing data can be achieved from a small number of training examples and that the learned dictionaries exhibit significant specificity when introducing noise.},\n  keywords = {brain;dictionaries;learning (artificial intelligence);medical signal processing;neurophysiology;signal reconstruction;signal representation;spontaneous neural activity modeling;neuronal signal data;sparse coding;neuronal responses;dictionary learning;learning based signal modeling;clean test signal reconstruction quality;noisy test signal reconstruction quality;mouse primary visual cortex;postnatal development;Dictionaries;Training;Neurons;Machine learning;Data models;Encoding;Visualization},\n  doi = {10.23919/EUSIPCO.2017.8081475},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347608.pdf},\n}\n\n
\n
\n\n\n
\n Modeling the activity of an ensemble of neurons can provide critical insights into the workings of the brain. In this work we examine if learning based signal modeling can contribute to a high quality modeling of neuronal signal data. To that end, we employ the sparse coding and dictionary learning schemes for capturing the behavior of neuronal responses into a small number of representative prototypical signals. Performance is measured by the reconstruction quality of clean and noisy test signals, which serves as an indicator of the generalization and discrimination capabilities of the learned dictionaries. To validate the merits of the proposed approach, a novel dataset of the actual recordings from 183 neurons from the primary visual cortex of a mouse in early postnatal development was developed and investigated. The results demonstrate that high quality modeling of testing data can be achieved from a small number of training examples and that the learned dictionaries exhibit significant specificity when introducing noise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed particle filtering under real-time constraints.\n \n \n \n \n\n\n \n Bozkurt, A. K.; and Cemgil, A. T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1584-1588, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081476,\n  author = {A. K. Bozkurt and A. T. Cemgil},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed particle filtering under real-time constraints},\n  year = {2017},\n  pages = {1584-1588},\n  abstract = {Particle filters are powerful methods for state estimation in nonlinear/non-Gaussian dynamical systems. However due to the heavy computational requirements, they may not satisfy the real-time constraints in many applications requiring a large number of particles. By means of distributed implementation, real-time particle filtering can be achieved. However, the resampling stage in particle filters requires particle interaction which causes communication overhead. In this work, we propose a distributed resampling algorithm based on Butterfly Resampling previously described in the literature. We describe three interaction schemes (i) the complete interaction, (ii) the pairwise interaction where the nodes are constrained to communicate in pairs and (iii) the partial pairwise interaction in which only one pair is allowed to communicate. The goal is to diminish the communication cost in exchange for negligible loss of effective sample size. We conduct experiments on a cluster environment and compare our methods in terms of execution time, communication time and effective sample size. We find that the sparse interaction schemes show better performance for distributed systems and they keep the effective sample size nearly as high as the complete interaction scheme does.},\n  keywords = {particle filtering (numerical methods);signal sampling;state estimation;communication cost;execution time;communication time;sparse interaction schemes;distributed systems;complete interaction scheme;real-time constraints;state estimation;distributed implementation;real-time particle filtering;resampling stage;particle interaction;communication overhead;distributed resampling algorithm;Butterfly Resampling;partial pairwise interaction;distributed particle filtering;nonlinear-non-Gaussian dynamical systems;Sparse matrices;Real-time systems;Europe;Signal processing;Signal processing algorithms;Algorithm design and analysis;RNA},\n  doi = {10.23919/EUSIPCO.2017.8081476},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347786.pdf},\n}\n\n
\n
\n\n\n
\n Particle filters are powerful methods for state estimation in nonlinear/non-Gaussian dynamical systems. However due to the heavy computational requirements, they may not satisfy the real-time constraints in many applications requiring a large number of particles. By means of distributed implementation, real-time particle filtering can be achieved. However, the resampling stage in particle filters requires particle interaction which causes communication overhead. In this work, we propose a distributed resampling algorithm based on Butterfly Resampling previously described in the literature. We describe three interaction schemes (i) the complete interaction, (ii) the pairwise interaction where the nodes are constrained to communicate in pairs and (iii) the partial pairwise interaction in which only one pair is allowed to communicate. The goal is to diminish the communication cost in exchange for negligible loss of effective sample size. We conduct experiments on a cluster environment and compare our methods in terms of execution time, communication time and effective sample size. We find that the sparse interaction schemes show better performance for distributed systems and they keep the effective sample size nearly as high as the complete interaction scheme does.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An approximate minimum variance filter for nonlinear systems with randomly delayed observations.\n \n \n \n \n\n\n \n Date, P.; and Allahyani, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1589-1593, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081477,\n  author = {P. Date and S. Allahyani},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An approximate minimum variance filter for nonlinear systems with randomly delayed observations},\n  year = {2017},\n  pages = {1589-1593},\n  abstract = {In this paper, we extend our earlier results on minimum variance filter for systems with additive multiplicative noise in two different ways. Firstly, we propose a novel characterization of the linearization error in terms of multiplicative noise. Secondly, we also allow for random delay of up to one time step in the measurement. The delay is modelled by Bernoulli random variables. We derive a closed-form expression for the minimum variance filter for the resulting system with a linearized state transition equation, accounting for both the linearization error as well as the random delay. The utility of the proposed filtering algorithm is demonstrated through numerical experiments.},\n  keywords = {error analysis;filtering theory;Kalman filters;random processes;filtering algorithm;approximate minimum variance filter;nonlinear systems;randomly delayed observations;additive multiplicative noise;linearization error;Bernoulli random variables;linearized state transition equation;Delays;Mathematical model;Signal processing algorithms;Uncertainty;Kalman filters;Covariance matrices;Multiplicative noise;nonlinear filtering;delayed measurements},\n  doi = {10.23919/EUSIPCO.2017.8081477},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570338603.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we extend our earlier results on minimum variance filter for systems with additive multiplicative noise in two different ways. Firstly, we propose a novel characterization of the linearization error in terms of multiplicative noise. Secondly, we also allow for random delay of up to one time step in the measurement. The delay is modelled by Bernoulli random variables. We derive a closed-form expression for the minimum variance filter for the resulting system with a linearized state transition equation, accounting for both the linearization error as well as the random delay. The utility of the proposed filtering algorithm is demonstrated through numerical experiments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online learning in L2 space with multiple Gaussian kernels.\n \n \n \n \n\n\n \n Ohnishi, M.; and Yukawa, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1594-1598, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081478,\n  author = {M. Ohnishi and M. Yukawa},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Online learning in L2 space with multiple Gaussian kernels},\n  year = {2017},\n  pages = {1594-1598},\n  abstract = {We present a novel online learning paradigm for nonlinear function estimation based on iterative orthogonal projections in an L2 space reflecting the stochastic property of input signals. An online algorithm is built upon the fact that any finite dimensional subspace has a reproducing kernel, which is given in terms of the Gram matrix of its basis. The basis used in the present study involves multiple Gaussian kernels. The sequence generated by the algorithm is expected to approach towards the best approximation, in the L2-norm sense, of the nonlinear function to be estimated. This is in sharp contrast to the conventional kernel adaptive filtering paradigm because the best approximation in the reproducing kernel Hilbert space generally differs from the minimum mean squared error estimator over the subspace (Yukawa and Müller 2016). Numerical examples show the efficacy of the proposed approach.},\n  keywords = {approximation theory;Gaussian processes;Hilbert spaces;iterative methods;learning (artificial intelligence);least mean squares methods;nonlinear functions;multiple Gaussian kernels;nonlinear function estimation;iterative orthogonal projections;stochastic property;online algorithm;finite dimensional subspace;Gram matrix;conventional kernel adaptive;reproducing kernel Hilbert space;online learning paradigm;minimum mean squared error estimator;Kernel;Dictionaries;Signal processing algorithms;Approximation algorithms;Measurement;Convergence;Complexity theory},\n  doi = {10.23919/EUSIPCO.2017.8081478},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342892.pdf},\n}\n\n
\n
\n\n\n
\n We present a novel online learning paradigm for nonlinear function estimation based on iterative orthogonal projections in an L2 space reflecting the stochastic property of input signals. An online algorithm is built upon the fact that any finite dimensional subspace has a reproducing kernel, which is given in terms of the Gram matrix of its basis. The basis used in the present study involves multiple Gaussian kernels. The sequence generated by the algorithm is expected to approach towards the best approximation, in the L2-norm sense, of the nonlinear function to be estimated. This is in sharp contrast to the conventional kernel adaptive filtering paradigm because the best approximation in the reproducing kernel Hilbert space generally differs from the minimum mean squared error estimator over the subspace (Yukawa and Müller 2016). Numerical examples show the efficacy of the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Maxout filter networks referencing morphological filters.\n \n \n \n \n\n\n \n Nakashizuka, M.; Kobayashi, K.; Ishikawa, T.; and Itoi, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1599-1603, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MaxoutPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081479,\n  author = {M. Nakashizuka and K. Kobayashi and T. Ishikawa and K. Itoi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Maxout filter networks referencing morphological filters},\n  year = {2017},\n  pages = {1599-1603},\n  abstract = {This paper presents nonlinear filters that are obtained from extensions of morphological filters. The proposed nonlinear filter consists of a convex and concave filter that are extensions of the dilation and erosion of morphological filter with the maxout activation function. Maxout can approximate arbitrary convex functions as piecewise linear functions, including the max function of the morphological filters. The class of the convex function hence includes the morphological dilation and can be trained for specific image processing tasks. In this paper, the closing filter is extended to a convex-concave filter with maxout. The convex-concave filter is trained for noise and mask removal with a training set. The examples of noise and mask removal show that the convex-concave filter can obtain a recovered image, whose quality is comparable to in-painting by using the total variation minimization with reduced computational cost without mask information of the corrupted pixels.},\n  keywords = {convex programming;image denoising;image filtering;image processing;mathematical morphology;minimisation;nonlinear filters;piecewise linear techniques;morphological dilation;closing filter;convex-concave filter;maxout filter networks;morphological filter;nonlinear filter;maxout activation function;approximate arbitrary convex functions;image processing;mask removal;noise removal;total variation minimization;corrupted pixels;Training;Convex functions;Neural networks;Image processing;Morphology;Noise reduction;Mathematical morphology;maxout;noise removal;nonlinear filter;neural network},\n  doi = {10.23919/EUSIPCO.2017.8081479},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347768.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents nonlinear filters that are obtained from extensions of morphological filters. The proposed nonlinear filter consists of a convex and concave filter that are extensions of the dilation and erosion of morphological filter with the maxout activation function. Maxout can approximate arbitrary convex functions as piecewise linear functions, including the max function of the morphological filters. The class of the convex function hence includes the morphological dilation and can be trained for specific image processing tasks. In this paper, the closing filter is extended to a convex-concave filter with maxout. The convex-concave filter is trained for noise and mask removal with a training set. The examples of noise and mask removal show that the convex-concave filter can obtain a recovered image, whose quality is comparable to in-painting by using the total variation minimization with reduced computational cost without mask information of the corrupted pixels.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Solution of the inverse frobenius-perron problem for semi-Markov chaotic maps via recursive Markov state disaggregation.\n \n \n \n \n\n\n \n McDonald, A.; and van Wyk , M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1604-1608, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SolutionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081480,\n  author = {A. McDonald and M. {van Wyk}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Solution of the inverse frobenius-perron problem for semi-Markov chaotic maps via recursive Markov state disaggregation},\n  year = {2017},\n  pages = {1604-1608},\n  abstract = {A novel solution of the inverse Frobenius-Perron problem for constructing semi-Markov chaotic maps with prescribed statistical properties is presented. The proposed solution uses recursive Markov state disaggregation to construct an ergodic map with a piecewise constant invariant density function that approximates an arbitrary probability distribution over a compact interval. The solution is novel in the sense that it provides greater freedom, as compared to existing analytic solutions, in specifying the autocorrelation function of the semi-Markov map during its construction. The proposed solution is demonstrated by constructing multiple chaotic maps with invariant densities that provide an increasingly accurate approximation of the asymmetric beta probability distribution over the unit interval. It is demonstrated that normalised autocorrelation functions with components having different rates of decay and which alternate in sign between consecutive delays may be specified. It is concluded that the flexibility of the proposed solution facilitates its application towards modelling of random signals in various contexts.},\n  keywords = {chaos;Markov processes;probability;random processes;semiMarkov chaotic maps;recursive Markov state disaggregation;ergodic map;piecewise constant invariant density function;multiple chaotic maps;asymmetric beta probability distribution;normalised autocorrelation functions;statistical properties;arbitrary probability distribution;inverse Frobenius-Perron problem;Eigenvalues and eigenfunctions;Markov processes;Probability distribution;Signal processing;Probability density function;Europe;Correlation},\n  doi = {10.23919/EUSIPCO.2017.8081480},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347846.pdf},\n}\n\n
\n
\n\n\n
\n A novel solution of the inverse Frobenius-Perron problem for constructing semi-Markov chaotic maps with prescribed statistical properties is presented. The proposed solution uses recursive Markov state disaggregation to construct an ergodic map with a piecewise constant invariant density function that approximates an arbitrary probability distribution over a compact interval. The solution is novel in the sense that it provides greater freedom, as compared to existing analytic solutions, in specifying the autocorrelation function of the semi-Markov map during its construction. The proposed solution is demonstrated by constructing multiple chaotic maps with invariant densities that provide an increasingly accurate approximation of the asymmetric beta probability distribution over the unit interval. It is demonstrated that normalised autocorrelation functions with components having different rates of decay and which alternate in sign between consecutive delays may be specified. It is concluded that the flexibility of the proposed solution facilitates its application towards modelling of random signals in various contexts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Flexible and scalable transform-domain codebook for high bit rate CELP coders.\n \n \n \n \n\n\n \n Eksler, V.; Bessette, B.; Jelínek, M.; and Vaillancourt, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1609-1613, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"FlexiblePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081481,\n  author = {V. Eksler and B. Bessette and M. Jelínek and T. Vaillancourt},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Flexible and scalable transform-domain codebook for high bit rate CELP coders},\n  year = {2017},\n  pages = {1609-1613},\n  abstract = {The Code-Excited Linear Prediction (CELP) model is very efficient in coding speech at low bit rates. However, if the bit rate of the coder is increased, the CELP model does not gain in quality as quickly as other approaches. Moreover, the computational complexity of the CELP model generally increases significantly at higher bit rates. In this paper we focus on a technique that aims to overcome these limitations by means of a special transform-domain codebook within the CELP model. We show by the example of the AMR-WB codec that the CELP model with the new flexible and scalable codebook improves the quality at high bit rates at no additional complexity cost.},\n  keywords = {linear predictive coding;speech coding;transform-domain codebook;high bit rate CELP coders;code-excited linear prediction;speech coding;computational complexity;CELP model;AMR-WB codec;Bit rate;Technological innovation;Complexity theory;Codecs;Discrete cosine transforms;Speech;Computational modeling;Speech coding;ACELP;AMR-WB},\n  doi = {10.23919/EUSIPCO.2017.8081481},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570339044.pdf},\n}\n\n
\n
\n\n\n
\n The Code-Excited Linear Prediction (CELP) model is very efficient in coding speech at low bit rates. However, if the bit rate of the coder is increased, the CELP model does not gain in quality as quickly as other approaches. Moreover, the computational complexity of the CELP model generally increases significantly at higher bit rates. In this paper we focus on a technique that aims to overcome these limitations by means of a special transform-domain codebook within the CELP model. We show by the example of the AMR-WB codec that the CELP model with the new flexible and scalable codebook improves the quality at high bit rates at no additional complexity cost.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance analysis of several pitch detection algorithms on simulated and real noisy speech data.\n \n \n \n \n\n\n \n Jouvet, D.; and Laprie, Y.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1614-1618, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081482,\n  author = {D. Jouvet and Y. Laprie},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Performance analysis of several pitch detection algorithms on simulated and real noisy speech data},\n  year = {2017},\n  pages = {1614-1618},\n  abstract = {This paper analyses the performance of a large bunch of pitch detection algorithms on clean and noisy speech data. Two sets of noisy speech data are considered. One corresponds to simulated noisy data, and is obtained by adding several types of noise signals at various levels on the clean speech data of the Pitch-Tracking Database from Graz University of Technology (PTDB-TUG). The second one, SPEECON, was recorded in several different acoustic environments. The paper discusses the performance of pitch detection algorithms on the simulated noisy data, and on the real noisy data of the SPEECON corpus. Also, an analysis of the performance of the best pitch detection algorithm with respect to estimated signal-to-noise ratio (SNR) shows that very similar performance is observed on the real noisy data recorded in public places, and on the clean data with addition of babble noise.},\n  keywords = {speech enhancement;speech recognition;performance analysis;pitch detection algorithm;noisy speech data;simulated noisy data;clean speech data;Pitch-Tracking Database;Speech;Noise measurement;Detection algorithms;Signal to noise ratio;Signal processing algorithms;Frequency-domain analysis;Databases;Pitch;fundamental frequency;clean speech data;noisy speech data},\n  doi = {10.23919/EUSIPCO.2017.8081482},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342455.pdf},\n}\n\n
\n
\n\n\n
\n This paper analyses the performance of a large bunch of pitch detection algorithms on clean and noisy speech data. Two sets of noisy speech data are considered. One corresponds to simulated noisy data, and is obtained by adding several types of noise signals at various levels on the clean speech data of the Pitch-Tracking Database from Graz University of Technology (PTDB-TUG). The second one, SPEECON, was recorded in several different acoustic environments. The paper discusses the performance of pitch detection algorithms on the simulated noisy data, and on the real noisy data of the SPEECON corpus. Also, an analysis of the performance of the best pitch detection algorithm with respect to estimated signal-to-noise ratio (SNR) shows that very similar performance is observed on the real noisy data recorded in public places, and on the clean data with addition of babble noise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modeling formant dynamics in speech spectral envelopes.\n \n \n \n \n\n\n \n Craciun, A.; Paulus, J.; Sevkin, G.; and Bäckström, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1619-1623, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ModelingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081483,\n  author = {A. Craciun and J. Paulus and G. Sevkin and T. Bäckström},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Modeling formant dynamics in speech spectral envelopes},\n  year = {2017},\n  pages = {1619-1623},\n  abstract = {The spectral envelope of a speech signal encodes information about the characteristics of the speech source. As a result, spectral envelope modeling is a central task in speech applications, where tracking temporal transitions in diphones and triphones is essential for efficient speech synthesis and recognition algorithms. Temporal changes in the envelope structure are often derived from estimated formant tracks, an approach which is sensitive to estimation errors. In this paper we propose a speech source model which estimates frequency and amplitude movements in the spectral envelopes of speech signals and does not rely on formant tracking. The proposed model estimates the amplitude and frequency shifts for each sub-band and time frame of a speech signal using the information from the previous time frame. Our experiments demonstrate that the model captures temporal structures of spectral envelopes with high precision. The proposed model can thus be applied as an accurate low-order representation of temporal dynamics in speech spectral envelopes.},\n  keywords = {speech recognition;speech synthesis;speech spectral envelopes;speech signal;spectral envelope modeling;speech applications;tracking temporal transitions;recognition algorithms;envelope structure;speech source model;formant tracking;temporal dynamics;speech synthesis;formant dynamics modeling;model capture temporal structures;Speech;Tracking;Frequency estimation;Integrated circuit modeling;Time-frequency analysis;Estimation},\n  doi = {10.23919/EUSIPCO.2017.8081483},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342512.pdf},\n}\n\n
\n
\n\n\n
\n The spectral envelope of a speech signal encodes information about the characteristics of the speech source. As a result, spectral envelope modeling is a central task in speech applications, where tracking temporal transitions in diphones and triphones is essential for efficient speech synthesis and recognition algorithms. Temporal changes in the envelope structure are often derived from estimated formant tracks, an approach which is sensitive to estimation errors. In this paper we propose a speech source model which estimates frequency and amplitude movements in the spectral envelopes of speech signals and does not rely on formant tracking. The proposed model estimates the amplitude and frequency shifts for each sub-band and time frame of a speech signal using the information from the previous time frame. Our experiments demonstrate that the model captures temporal structures of spectral envelopes with high precision. The proposed model can thus be applied as an accurate low-order representation of temporal dynamics in speech spectral envelopes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A novel filterbank for epoch estimation.\n \n \n \n \n\n\n \n Bachhav, P.; and Fatil, H. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1624-1628, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081484,\n  author = {P. Bachhav and H. A. Fatil},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A novel filterbank for epoch estimation},\n  year = {2017},\n  pages = {1624-1628},\n  abstract = {We present a novel approach for epoch estimation from the simple observation of the speech spectrum. Fundamental frequency (F0) of the speech signal and local variations around F0 are the characteristics of glottal excitation source. Extraction of this information from the speech spectrum can be used to estimate epochs (since higher harmonics interact with the vocal tract characteristics, they no longer represent the true glottal source). In this paper, we bandpass filter the speech signal through a novel Gaussian filterbank followed by simple peak detection to extract epochs. We do not attempt any post processing to study the effectiveness of F0 on epoch estimation in the proposed method. The algorithm is validated on various databases and compared with four state-of-the-art methods. The method has shown better or comparable results on the clean speech and found to be highly robust to the additive white noise giving highest IDR at various SNR levels.},\n  keywords = {channel bank filters;Gaussian processes;speech processing;white noise;epoch estimation;clean speech;speech spectrum;speech signal;vocal tract characteristics;glottal source;Speech;Estimation;Filter banks;Harmonic analysis;Power harmonic filters;Databases;Glottal closure instant (GCI);epoch;fundamental frequency(F0);spectrum},\n  doi = {10.23919/EUSIPCO.2017.8081484},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341646.pdf},\n}\n\n
\n
\n\n\n
\n We present a novel approach for epoch estimation from the simple observation of the speech spectrum. Fundamental frequency (F0) of the speech signal and local variations around F0 are the characteristics of glottal excitation source. Extraction of this information from the speech spectrum can be used to estimate epochs (since higher harmonics interact with the vocal tract characteristics, they no longer represent the true glottal source). In this paper, we bandpass filter the speech signal through a novel Gaussian filterbank followed by simple peak detection to extract epochs. We do not attempt any post processing to study the effectiveness of F0 on epoch estimation in the proposed method. The algorithm is validated on various databases and compared with four state-of-the-art methods. The method has shown better or comparable results on the clean speech and found to be highly robust to the additive white noise giving highest IDR at various SNR levels.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Pitch prediction from Mel-generalized cepstrum — a computationally efficient pitch modeling approach for speech synthesis.\n \n \n \n\n\n \n Rao, M. V. A.; and Ghosh, P. K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1629-1633, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081485,\n  author = {M. V. A. Rao and P. K. Ghosh},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Pitch prediction from Mel-generalized cepstrum — a computationally efficient pitch modeling approach for speech synthesis},\n  year = {2017},\n  pages = {1629-1633},\n  abstract = {Text-to-speech (TTS) systems are often used as part of the user interface in wearable devices. Due to limited memory and computational/battery power in wearable devices, it could be useful to have a TTS system which requires less memory and is less computationally intensive. Conventional speech synthesis systems has separate modeling for pitch (FO-model) and spectral representation, namely Mel generalized coefficients (MGC) (MGC-model). In this paper we estimate pitch from the MGC estimated using MGC-model instead of having a separate FO-model. Pitch is obtained from the estimated MGC using a statistical mapping through Gaussian mixture model (GMM). Experiments using CMU-ARCTIC database demonstrate that the proposed GMM based FO-model, even with a single mixture, results in no significant loss in the naturalness of the synthesized speech while the proposed FO-model, in addition to reducing computational complexity, results in ~93% reduction in the number of parameters compared to that of the F0-model.},\n  keywords = {cepstral analysis;computational complexity;Gaussian processes;mixture models;signal representation;speech processing;speech synthesis;speech-based user interfaces;pitch prediction;Mel-generalized cepstrum;computationally efficient pitch modeling approach;user interface;wearable devices;TTS system;spectral representation;Mel generalized coefficients;MGC-model;estimated MGC;Gaussian mixture model;GMM based FO-model;computational complexity;F0-model;text-to-speech synthesis systems;statistical mapping;CMU-ARCTIC database;pitch estimation;Hidden Markov models;High-temperature superconductors;Speech;Computational modeling;Speech synthesis;Training;Covariance matrices},\n  doi = {10.23919/EUSIPCO.2017.8081485},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Text-to-speech (TTS) systems are often used as part of the user interface in wearable devices. Due to limited memory and computational/battery power in wearable devices, it could be useful to have a TTS system which requires less memory and is less computationally intensive. Conventional speech synthesis systems has separate modeling for pitch (FO-model) and spectral representation, namely Mel generalized coefficients (MGC) (MGC-model). In this paper we estimate pitch from the MGC estimated using MGC-model instead of having a separate FO-model. Pitch is obtained from the estimated MGC using a statistical mapping through Gaussian mixture model (GMM). Experiments using CMU-ARCTIC database demonstrate that the proposed GMM based FO-model, even with a single mixture, results in no significant loss in the naturalness of the synthesized speech while the proposed FO-model, in addition to reducing computational complexity, results in  93% reduction in the number of parameters compared to that of the F0-model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multi-channel estimation of power spectral density matrix using inter-frame and inter-banc information.\n \n \n \n\n\n \n Ranjbaryan, R.; and Abutalebi, H. R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1634-1638, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081486,\n  author = {R. Ranjbaryan and H. R. Abutalebi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-channel estimation of power spectral density matrix using inter-frame and inter-banc information},\n  year = {2017},\n  pages = {1634-1638},\n  abstract = {In this paper, we address the estimation of power spectral density (PSD) matrix. The accurate estimation of PSD matrix plays an important role in many speech enhancement methods. In traditional PSD estimation methods, only the information of previous frames is employed through a forgetting factor. In the current research, we consider the correlation of inter-band components and incorporate their information to compute the PSD matrix more accurately. The simulation results are presented to confirm the efficiency of this method. They show that the performance of the speech enhancement method is substantially improved by using the proposed PSD estimation technique.},\n  keywords = {channel estimation;matrix algebra;spectral analysis;speech enhancement;multichannel estimation;inter-banc information;speech enhancement method;PSD estimation technique;inter-frame information;power spectral density matrix estimation;PSD matrix estimation;forgetting factor;inter-band component correlation;Estimation;Correlation;Time-frequency analysis;Speech;Microphones;Speech enhancement;Signal to noise ratio},\n  doi = {10.23919/EUSIPCO.2017.8081486},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this paper, we address the estimation of power spectral density (PSD) matrix. The accurate estimation of PSD matrix plays an important role in many speech enhancement methods. In traditional PSD estimation methods, only the information of previous frames is employed through a forgetting factor. In the current research, we consider the correlation of inter-band components and incorporate their information to compute the PSD matrix more accurately. The simulation results are presented to confirm the efficiency of this method. They show that the performance of the speech enhancement method is substantially improved by using the proposed PSD estimation technique.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimal high-dimensional shrinkage covariance estimation for elliptical distributions.\n \n \n \n \n\n\n \n Ollila, E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1639-1643, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OptimalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081487,\n  author = {E. Ollila},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Optimal high-dimensional shrinkage covariance estimation for elliptical distributions},\n  year = {2017},\n  pages = {1639-1643},\n  abstract = {We derive an optimal shrinkage sample covariance matrix (SCM) estimator which is suitable for high dimensional problems and when sampling from an unspecified elliptically symmetric distribution. Specifically, we derive the optimal (oracle) shrinkage parameters that obtain the minimum mean-squared error (MMSE) between the shrinkage SCM and the true covariance matrix when sampling from an elliptical distribution. Subsequently, we show how the oracle shrinkage parameters can be consistently estimated under the random matrix theory regime. Simulations show the advantage of the proposed estimator over the conventional shrinkage SCM estimator due to Ledoit and Wolf (2004). The proposed shrinkage SCM estimator often provides significantly better performance than the Ledoit-Wolf estimator and has the advantage that consistency is guaranteed over the whole class of elliptical distributions with finite 4th order moments.},\n  keywords = {covariance matrices;least mean squares methods;matrix algebra;mean square error methods;sampling methods;shrinkage;elliptical distribution;oracle shrinkage parameters;random matrix theory regime;conventional shrinkage SCM estimator;Ledoit-Wolf estimator;optimal high-dimensional shrinkage covariance estimation;optimal shrinkage sample covariance matrix estimator;unspecified elliptically symmetric distribution;optimal shrinkage parameters;high-dimensional problems;minimum mean-squared error;MMSE;Covariance matrices;Symmetric matrices;Signal processing;Gaussian distribution;Sociology;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081487},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347644.pdf},\n}\n\n
\n
\n\n\n
\n We derive an optimal shrinkage sample covariance matrix (SCM) estimator which is suitable for high dimensional problems and when sampling from an unspecified elliptically symmetric distribution. Specifically, we derive the optimal (oracle) shrinkage parameters that obtain the minimum mean-squared error (MMSE) between the shrinkage SCM and the true covariance matrix when sampling from an elliptical distribution. Subsequently, we show how the oracle shrinkage parameters can be consistently estimated under the random matrix theory regime. Simulations show the advantage of the proposed estimator over the conventional shrinkage SCM estimator due to Ledoit and Wolf (2004). The proposed shrinkage SCM estimator often provides significantly better performance than the Ledoit-Wolf estimator and has the advantage that consistency is guaranteed over the whole class of elliptical distributions with finite 4th order moments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Misspecified Cramér-rao bounds for complex unconstrained and constrained parameters.\n \n \n \n\n\n \n Fortunati, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1644-1648, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081488,\n  author = {S. Fortunati},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Misspecified Cramér-rao bounds for complex unconstrained and constrained parameters},\n  year = {2017},\n  pages = {1644-1648},\n  abstract = {In this paper, a generalization of the Misspecified Cramér-Rao Bound (MCRB) and of the Constrained MCRB (CMCRB) to complex parameter vectors is presented. Our derivation aims at providing lower bounds on the Mean Square Error (MSE) for both circular and non-circular, MS-unbiased, mismatched estimators. A simple toy example is also presented to clarify the theoretical findings.},\n  keywords = {direction-of-arrival estimation;mean square error methods;MIMO radar;complex unconstrained parameters;complex parameter vectors;mean square error;misspecified Cramer-Rao bound;constrained MCRB;CMCRB;MSE;noncircular estimator;MS-unbiased estimator;mismatched estimators;Covariance matrices;Signal processing;Data models;Linear matrix inequalities;Europe;Estimation;Probability density function},\n  doi = {10.23919/EUSIPCO.2017.8081488},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this paper, a generalization of the Misspecified Cramér-Rao Bound (MCRB) and of the Constrained MCRB (CMCRB) to complex parameter vectors is presented. Our derivation aims at providing lower bounds on the Mean Square Error (MSE) for both circular and non-circular, MS-unbiased, mismatched estimators. A simple toy example is also presented to clarify the theoretical findings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The outlier-corrected-data-adaptive Lasso: A new robust estimator for the independent contamination model.\n \n \n \n \n\n\n \n Machkour, J.; Alt, B.; Muma, M.; and Zoubir, A. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1649-1653, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081489,\n  author = {J. Machkour and B. Alt and M. Muma and A. M. Zoubir},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {The outlier-corrected-data-adaptive Lasso: A new robust estimator for the independent contamination model},\n  year = {2017},\n  pages = {1649-1653},\n  abstract = {Many of today's signal processing tasks consider sparse models where the number of explanatory variables exceeds the sample size. When dealing with real-world data, the presence of impulsive noise and outliers must also be accounted for. Accurate and robust parameter estimation and consistent variable selection are needed simultaneously. Recently, some popular robust methods have been adapted to such complex settings. Especially, in high dimensional settings, however, it is possible to have a single contaminated predictor being responsible for many outliers. The amount of outliers introduced by this predictor easily exceeds the breakdown point of any existing robust estimator. Therefore, we propose a new robust and sparse estimator, the Outlier-Corrected-Data-(Adaptive) Lasso (OCD-(A) Lasso). It simultaneously handles highly contaminated predictors in the dataset and performs well under the classical contamination model. In a numerical study, it outperforms competing Lasso estimators, at a largely reduced computational complexity compared to its robust counterparts.},\n  keywords = {computational complexity;estimation theory;impulse noise;parameter estimation;signal processing;computational complexity;OCD-A-Lasso;explanatory variables;contaminated predictors;Lasso estimators;sparse estimator;single contaminated predictor;high dimensional settings;consistent variable selection;robust parameter estimation;impulsive noise;real-world data;sparse models;signal processing tasks;independent contamination model;outlier-corrected-data-adaptive Lasso;Robustness;Contamination;Signal processing algorithms;Signal processing;Electric breakdown;Prediction algorithms;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081489},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346975.pdf},\n}\n\n
\n
\n\n\n
\n Many of today's signal processing tasks consider sparse models where the number of explanatory variables exceeds the sample size. When dealing with real-world data, the presence of impulsive noise and outliers must also be accounted for. Accurate and robust parameter estimation and consistent variable selection are needed simultaneously. Recently, some popular robust methods have been adapted to such complex settings. Especially, in high dimensional settings, however, it is possible to have a single contaminated predictor being responsible for many outliers. The amount of outliers introduced by this predictor easily exceeds the breakdown point of any existing robust estimator. Therefore, we propose a new robust and sparse estimator, the Outlier-Corrected-Data-(Adaptive) Lasso (OCD-(A) Lasso). It simultaneously handles highly contaminated predictors in the dataset and performs well under the classical contamination model. In a numerical study, it outperforms competing Lasso estimators, at a largely reduced computational complexity compared to its robust counterparts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On characterization and application of oscillatory almost-cyclostationary processes.\n \n \n \n \n\n\n \n Napolitano, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1654-1658, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081490,\n  author = {A. Napolitano},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {On characterization and application of oscillatory almost-cyclostationary processes},\n  year = {2017},\n  pages = {1654-1658},\n  abstract = {The class of the second-order oscillatory almost-cyclostationary processes is characterized. These processes have autocorrelation function which is the superposition of amplitude- and angle-modulated complex sinewaves, where the modulating functions, referred to as evolutionary cyclic autocorrelation functions, depend on both time and lag parameter. This class of processes includes that of the almost-cyclostationary processes. The problem of statistical function measurements is addressed for the special case of amplitude-modulated time-warped almost-cyclostationary processes. These processes are shown to be a suitable model for the electrocardiogram.},\n  keywords = {amplitude modulation;correlation methods;electrocardiography;oscillations;signal processing;electrocardiogram model;complex sine waves superposition;angle-modulated complex sinewave;amplitude modulated sine wave;second-order oscillatory processes;oscillatory almost-cyclostationary processes;amplitude-modulated time;statistical function measurements;evolutionary cyclic autocorrelation functions;modulating functions;autocorrelation function;Correlation;Frequency modulation;Europe;Bandwidth;Spectral analysis;Oscillatory almost-cyclostationary processes;evolutionary spectral analysis;electrocardiogram},\n  doi = {10.23919/EUSIPCO.2017.8081490},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347382.pdf},\n}\n\n
\n
\n\n\n
\n The class of the second-order oscillatory almost-cyclostationary processes is characterized. These processes have autocorrelation function which is the superposition of amplitude- and angle-modulated complex sinewaves, where the modulating functions, referred to as evolutionary cyclic autocorrelation functions, depend on both time and lag parameter. This class of processes includes that of the almost-cyclostationary processes. The problem of statistical function measurements is addressed for the special case of amplitude-modulated time-warped almost-cyclostationary processes. These processes are shown to be a suitable model for the electrocardiogram.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Jeffrey's divergence between complex-valued sinusoidal processes.\n \n \n \n \n\n\n \n Grivel, E.; Saleh, M.; and Omar, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1659-1663, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Jeffrey'sPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081491,\n  author = {E. Grivel and M. Saleh and S. Omar},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Jeffrey's divergence between complex-valued sinusoidal processes},\n  year = {2017},\n  pages = {1659-1663},\n  abstract = {Like other divergences, Jeffrey's divergence (JD) is used for change detection, for model comparison, etc. Recently, a great deal of interest has been paid to this symmetric version of the Kullback-Leibler (KL) divergence. This led to analytical expressions of the JD between autoregressive (AR) processes, moving-average (MA) processes, either noise-free or disturbed by additive white noises, as well as ARMA processes. In this paper, we propose to study the JD between processes that are defined as sums of complex-valued sinusoidal processes disturbed by additive white noises. We show that the JD tends to a stationary behavior when the number of variates becomes large. The derivative of the JD becomes a constant that depends on the parameters defining the processes. The convergence speed towards this stationary regime depends on the differences between the normalized angular frequencies. The smaller the difference, the slower the convergence. This result can be obtained by interpreting some steps to compute the JD as orthogonal projections. Some examples illustrate the theoretical analysis.},\n  keywords = {autoregressive moving average processes;signal detection;white noise;JD;complex-valued sinusoidal processes;Kullback-Leibler divergence;autoregressive processes;ARMA processes;Jeffrey divergence;additive white noise;change detection;Additive white noise;Convergence;Europe;Covariance matrices;Estimation;Physics;Jeffrey's divergence;Kullback-Leibler divergence;change detection;model comparison},\n  doi = {10.23919/EUSIPCO.2017.8081491},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346868.pdf},\n}\n\n
\n
\n\n\n
\n Like other divergences, Jeffrey's divergence (JD) is used for change detection, for model comparison, etc. Recently, a great deal of interest has been paid to this symmetric version of the Kullback-Leibler (KL) divergence. This led to analytical expressions of the JD between autoregressive (AR) processes, moving-average (MA) processes, either noise-free or disturbed by additive white noises, as well as ARMA processes. In this paper, we propose to study the JD between processes that are defined as sums of complex-valued sinusoidal processes disturbed by additive white noises. We show that the JD tends to a stationary behavior when the number of variates becomes large. The derivative of the JD becomes a constant that depends on the parameters defining the processes. The convergence speed towards this stationary regime depends on the differences between the normalized angular frequencies. The smaller the difference, the slower the convergence. This result can be obtained by interpreting some steps to compute the JD as orthogonal projections. Some examples illustrate the theoretical analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A kernel density-based particle filter for state and time-varying parameter estimation in nonlinear state-space models.\n \n \n \n \n\n\n \n Cheng, C.; and Tourneret, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1664-1668, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081492,\n  author = {C. Cheng and J. Tourneret},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A kernel density-based particle filter for state and time-varying parameter estimation in nonlinear state-space models},\n  year = {2017},\n  pages = {1664-1668},\n  abstract = {In linear/nonlinear dynamical systems, there are many situations where model parameters cannot be obtained a priori or vary with time. As a consequence, the estimation algorithms that are based on the exact knowledge of these model parameters cannot be accurate in this context. In this work, a kernel density-based particle filter is investigated to jointly estimate the states and unknown time-varying parameters of a dynamical system described by nonlinear state and measurement equations. The approach combines an auxiliary particle filter with the kernel smoothing method so as to obtain a stationary kernel density for the unknown parameters. The performance of the proposed approach is investigated for positioning using measurements from a global navigation satellite system that are possibly contaminated by multipath interferences.},\n  keywords = {nonlinear dynamical systems;parameter estimation;particle filtering (numerical methods);smoothing methods;state-space methods;time-varying systems;estimation algorithms;unknown time-varying parameters;dynamical system;measurement equations;auxiliary particle filter;kernel smoothing method;stationary kernel density;global navigation satellite system;nonlinear state-space models;linear/nonlinear dynamical systems;multipath interferences;kernel density-based particle filter;Kernel;Estimation;Global navigation satellite system;Atmospheric measurements;Particle measurements;Smoothing methods;Bayes methods},\n  doi = {10.23919/EUSIPCO.2017.8081492},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347390.pdf},\n}\n\n
\n
\n\n\n
\n In linear/nonlinear dynamical systems, there are many situations where model parameters cannot be obtained a priori or vary with time. As a consequence, the estimation algorithms that are based on the exact knowledge of these model parameters cannot be accurate in this context. In this work, a kernel density-based particle filter is investigated to jointly estimate the states and unknown time-varying parameters of a dynamical system described by nonlinear state and measurement equations. The approach combines an auxiliary particle filter with the kernel smoothing method so as to obtain a stationary kernel density for the unknown parameters. The performance of the proposed approach is investigated for positioning using measurements from a global navigation satellite system that are possibly contaminated by multipath interferences.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph clustering for localization within a sensor array.\n \n \n \n \n\n\n \n Riahi, N.; Gerstoft, P.; and Mecklenbräuker, C. F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1669-1673, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"GraphPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081493,\n  author = {N. Riahi and P. Gerstoft and C. F. Mecklenbräuker},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Graph clustering for localization within a sensor array},\n  year = {2017},\n  pages = {1669-1673},\n  abstract = {We develop a model-free technique to identify weak sources within dense sensor arrays using graph clustering. No knowledge about the propagation medium is needed except that signal strengths decay to insignificant levels within a scale that is shorter than the aperture. We then reinterpret the spatial coherence matrix of a wave field as a matrix whose support is a connectivity matrix of a graph with sensors as vertices. In a dense network, well-separated sources induce clusters in this graph. The support of the covariance matrix is estimated from limited-time data using a hypothesis test with a robust phase-only coherence test statistic combined with a physical distance criterion. The method is applied to a dense 5200 element geophone array that blanketed 7 km × 10 km of the city of Long Beach (CA). The analysis exposes a helicopter traversing the array.},\n  keywords = {covariance matrices;graph theory;pattern clustering;seismometers;sensor arrays;statistical testing;graph clustering;model-free technique;propagation medium;spatial coherence matrix;dense 5200 element geophone array;signal strength decay;covariance matrix estimation;sensor array localization;weak source identification;wave field matrix;connectivity graph matrix;vertices;well-separated source clustering;robust phase-only coherence test statistics;physical distance criterion;Long Beach city;CA;helicopter;Coherence;Sensor arrays;Probability density function;Array signal processing;Matrix decomposition;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081493},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343237.pdf},\n}\n\n
\n
\n\n\n
\n We develop a model-free technique to identify weak sources within dense sensor arrays using graph clustering. No knowledge about the propagation medium is needed except that signal strengths decay to insignificant levels within a scale that is shorter than the aperture. We then reinterpret the spatial coherence matrix of a wave field as a matrix whose support is a connectivity matrix of a graph with sensors as vertices. In a dense network, well-separated sources induce clusters in this graph. The support of the covariance matrix is estimated from limited-time data using a hypothesis test with a robust phase-only coherence test statistic combined with a physical distance criterion. The method is applied to a dense 5200 element geophone array that blanketed 7 km × 10 km of the city of Long Beach (CA). The analysis exposes a helicopter traversing the array.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph sampling with determinantal processes.\n \n \n \n \n\n\n \n Tremblay, N.; Amblard, P.; and Barthelmé, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1674-1678, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"GraphPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081494,\n  author = {N. Tremblay and P. Amblard and S. Barthelmé},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Graph sampling with determinantal processes},\n  year = {2017},\n  pages = {1674-1678},\n  abstract = {We present a new random sampling strategy for k-bandlimited signals defined on graphs, based on determinantal point processes (DPP). For small graphs, i.e., in cases where the spectrum of the graph is accessible, we exhibit a DPP sampling scheme that enables perfect recovery of bandlimited signals. For large graphs, i.e., in cases where the graph's spectrum is not accessible, we investigate, both theoretically and empirically, a sub-optimal but much faster DPP based on loop-erased random walks on the graph. Preliminary experiments show promising results especially in cases where the number of measurements should stay as small as possible and for graphs that have a strong community structure. Our sampling scheme is efficient and can be applied to graphs with up to 106 nodes.},\n  keywords = {graph theory;random processes;stochastic processes;random sampling strategy;determinantal point processes;DPP sampling scheme;graph sampling;k-band limited signals;loop-erased random walks;Laplace equations;Signal processing;Symmetric matrices;Europe;Signal processing algorithms;Greedy algorithms;Approximation algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081494},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347039.pdf},\n}\n\n
\n
\n\n\n
\n We present a new random sampling strategy for k-bandlimited signals defined on graphs, based on determinantal point processes (DPP). For small graphs, i.e., in cases where the spectrum of the graph is accessible, we exhibit a DPP sampling scheme that enables perfect recovery of bandlimited signals. For large graphs, i.e., in cases where the graph's spectrum is not accessible, we investigate, both theoretically and empirically, a sub-optimal but much faster DPP based on loop-erased random walks on the graph. Preliminary experiments show promising results especially in cases where the number of measurements should stay as small as possible and for graphs that have a strong community structure. Our sampling scheme is efficient and can be applied to graphs with up to 106 nodes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Inference of spatiotemporal processes over graphs via kernel kriged Kalman filtering.\n \n \n \n \n\n\n \n Ioannidis, V. N.; Romero, D.; and Giannakis, G. B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1679-1683, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"InferencePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081495,\n  author = {V. N. Ioannidis and D. Romero and G. B. Giannakis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Inference of spatiotemporal processes over graphs via kernel kriged Kalman filtering},\n  year = {2017},\n  pages = {1679-1683},\n  abstract = {Inference of space-time signals evolving over graphs emerges naturally in a number of network science related applications. A frequently encountered challenge pertains to reconstructing such dynamic processes given their values over a subset of vertices and time instants. The present paper develops a graph-aware kernel-based kriged Kalman filtering approach that leverages the spatio-temporal dynamics to allow for efficient online reconstruction, while also coping with dynamically evolving network topologies. Laplacian kernels are employed to perform kriging over the graph when spatial second-order statistics are unknown, as is often the case. Numerical tests with synthetic and real data illustrate the superior reconstruction performance of the proposed approach.},\n  keywords = {graph theory;Kalman filters;spatiotemporal phenomena;statistical analysis;spatiotemporal processes;graphs;space-time signals;time instants;graph-aware kernel;Kalman filtering approach;spatio-temporal dynamics;network topologies;kernel kriged Kalman filtering;signal inference;Laplacian kernels;spatial second-order statistics;Kernel;Kalman filters;Laplace equations;Delays;Europe;Spatiotemporal phenomena;Graph signal reconstruction;time series on graphs;kriged Kalman filtering;Laplacian kernels},\n  doi = {10.23919/EUSIPCO.2017.8081495},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347100.pdf},\n}\n\n
\n
\n\n\n
\n Inference of space-time signals evolving over graphs emerges naturally in a number of network science related applications. A frequently encountered challenge pertains to reconstructing such dynamic processes given their values over a subset of vertices and time instants. The present paper develops a graph-aware kernel-based kriged Kalman filtering approach that leverages the spatio-temporal dynamics to allow for efficient online reconstruction, while also coping with dynamically evolving network topologies. Laplacian kernels are employed to perform kriging over the graph when spatial second-order statistics are unknown, as is often the case. Numerical tests with synthetic and real data illustrate the superior reconstruction performance of the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimal sampling strategies for adaptive learning of graph signals.\n \n \n \n \n\n\n \n Di Lorenzo, P.; Banelli, P.; and Barbarossa, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1684-1688, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OptimalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081496,\n  author = {P. {Di Lorenzo} and P. Banelli and S. Barbarossa},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Optimal sampling strategies for adaptive learning of graph signals},\n  year = {2017},\n  pages = {1684-1688},\n  abstract = {The aim of this paper is to propose optimal sampling strategies for adaptive learning of signals defined over graphs. Introducing a novel least mean square (LMS) estimation strategy with probabilistic sampling, we propose two different methods to select the sampling probability at each node, with the aim of optimizing the sampling rate, or the mean-square performance, while at the same time guaranteeing a prescribed learning rate. The resulting solutions naturally lead to sparse sampling probability vectors that optimize the tradeoff between graph sampling rate, steady-state performance, and learning rate of the LMS algorithm. Numerical simulations validate the proposed approach, and assess the performance of the proposed sampling strategies for adaptive learning of graph signals.},\n  keywords = {adaptive filters;graph theory;learning (artificial intelligence);least mean squares methods;mean square error methods;optimisation;sampling methods;signal sampling;numerical simulation;graph sampling rate;sparse sampling probability vectors;prescribed learning rate;mean-square performance;probabilistic sampling;mean square estimation strategy;graph signals;adaptive learning;optimal sampling strategies;Signal processing algorithms;Optimization;Algorithm design and analysis;Estimation;Probabilistic logic;Laplace equations;Adaptive LMS estimation;graph signal processing;sampling;successive convex approximation},\n  doi = {10.23919/EUSIPCO.2017.8081496},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347378.pdf},\n}\n\n
\n
\n\n\n
\n The aim of this paper is to propose optimal sampling strategies for adaptive learning of signals defined over graphs. Introducing a novel least mean square (LMS) estimation strategy with probabilistic sampling, we propose two different methods to select the sampling probability at each node, with the aim of optimizing the sampling rate, or the mean-square performance, while at the same time guaranteeing a prescribed learning rate. The resulting solutions naturally lead to sparse sampling probability vectors that optimize the tradeoff between graph sampling rate, steady-state performance, and learning rate of the LMS algorithm. Numerical simulations validate the proposed approach, and assess the performance of the proposed sampling strategies for adaptive learning of graph signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Coordinate descent accelerations for signal recovery on scale-free graphs based on total variation minimization.\n \n \n \n \n\n\n \n Berger, P.; Hannak, G.; and Matz, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1689-1693, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CoordinatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081497,\n  author = {P. Berger and G. Hannak and G. Matz},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Coordinate descent accelerations for signal recovery on scale-free graphs based on total variation minimization},\n  year = {2017},\n  pages = {1689-1693},\n  abstract = {We extend our previous work on learning smooth graph signals from a small number of noisy signal samples. Minimizing the signal's total variation amounts to a non-smooth convex optimization problem. We propose to solve this problem using a combination of Nesterov's smoothing technique and accelerated coordinate descent. The resulting algorithm converges substantially faster, specifically for graphs with vastly varying node degrees (e.g., scale-free graphs).},\n  keywords = {convex programming;graph theory;minimisation;signal processing;signal recovery;scale-free graphs;total variation minimization;smooth graph signals;noisy signal samples;nonsmooth convex optimization problem;coordinate descent accelerations;Nesterov smoothing technique;Signal processing algorithms;TV;Convergence;Noise measurement;Smoothing methods;Signal processing;Acceleration},\n  doi = {10.23919/EUSIPCO.2017.8081497},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347628.pdf},\n}\n\n
\n
\n\n\n
\n We extend our previous work on learning smooth graph signals from a small number of noisy signal samples. Minimizing the signal's total variation amounts to a non-smooth convex optimization problem. We propose to solve this problem using a combination of Nesterov's smoothing technique and accelerated coordinate descent. The resulting algorithm converges substantially faster, specifically for graphs with vastly varying node degrees (e.g., scale-free graphs).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Collaborative filtering via graph signal processing.\n \n \n \n \n\n\n \n Huang, W.; Marques, A. G.; and Ribeiro, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1094-1098, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CollaborativePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081498,\n  author = {W. Huang and A. G. Marques and A. Ribeiro},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Collaborative filtering via graph signal processing},\n  year = {2017},\n  pages = {1094-1098},\n  abstract = {This paper develops new designs for recommender systems inspired by recent advances in graph signal processing. Recommender systems aim to predict unknown ratings by exploiting the information revealed in a subset of user-item observed ratings. Leveraging the notions of graph frequency and graph filters, we demonstrate that a common collaborative filtering method - k-nearest neighbors - can be modeled as a specific band-stop graph filter on networks describing similarities between users or items. These new interpretations pave the way to new methods for enhanced rating prediction. For collaborative filtering, we develop more general band stop graph filters. The performance of our algorithms is assessed in the MovieLens-100k dataset, showing that our designs reduce the root mean squared error (up to a 6.20% improvement) compared to one incurred by the benchmark collaborative filtering approach.},\n  keywords = {band-stop filters;collaborative filtering;graph theory;mean square error methods;pattern classification;recommender systems;signal processing;graph signal processing;recommender systems;user-item observed ratings;graph frequency;common collaborative filtering method;general band stop graph filters;benchmark collaborative filtering approach;k-nearest neighbors;root mean squared error;Signal processing algorithms;Artificial neural networks;Signal processing;Collaboration;Algorithm design and analysis;Europe;Recommender systems;Collaborative filtering;recommender systems;graph signal processing;bandlimited graph signals;graph filters},\n  doi = {10.23919/EUSIPCO.2017.8081498},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347735.pdf},\n}\n\n
\n
\n\n\n
\n This paper develops new designs for recommender systems inspired by recent advances in graph signal processing. Recommender systems aim to predict unknown ratings by exploiting the information revealed in a subset of user-item observed ratings. Leveraging the notions of graph frequency and graph filters, we demonstrate that a common collaborative filtering method - k-nearest neighbors - can be modeled as a specific band-stop graph filter on networks describing similarities between users or items. These new interpretations pave the way to new methods for enhanced rating prediction. For collaborative filtering, we develop more general band stop graph filters. The performance of our algorithms is assessed in the MovieLens-100k dataset, showing that our designs reduce the root mean squared error (up to a 6.20% improvement) compared to one incurred by the benchmark collaborative filtering approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tunable load MIMO with quantized loads.\n \n \n \n \n\n\n \n Li, A.; Masouros, C.; Sellathurai, M.; and Papadias, C. B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1699-1703, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"TunablePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081499,\n  author = {A. Li and C. Masouros and M. Sellathurai and C. B. Papadias},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Tunable load MIMO with quantized loads},\n  year = {2017},\n  pages = {1699-1703},\n  abstract = {In this paper, we study the application of precoding schemes on practical electronically steerable parasitic array radiators (ESPARs), where quantized load impedances are considered for each antenna element. The presence of quantization in the loads results in a performance loss for practical ESPARs. To alleviate the performance loss, we propose to approximate the ideal current vector with convex optimization, where it is further shown that the optimality is achieved by optimizing the feeding voltages only. Specifically, we obtain the closed-form expression when single-fed ESPARs are assumed. Numerical results show that the proposed quantization-robust scheme can achieve a significant performance gain over ESPARs with quantized loads.},\n  keywords = {antenna arrays;antenna feeds;beam steering;concave programming;MIMO communication;precoding;quantisation (signal);tunable load MIMO;quantized load impedances;antenna element;electronically steerable parasitic array radiators;ESPAR;quantization-robust scheme;convex optimization;Antenna arrays;Impedance;Quantization (signal);MIMO;Precoding;Load modeling;Optimization;MIMO;ESPARs;quantization;optimization},\n  doi = {10.23919/EUSIPCO.2017.8081499},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340007.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we study the application of precoding schemes on practical electronically steerable parasitic array radiators (ESPARs), where quantized load impedances are considered for each antenna element. The presence of quantization in the loads results in a performance loss for practical ESPARs. To alleviate the performance loss, we propose to approximate the ideal current vector with convex optimization, where it is further shown that the optimality is achieved by optimizing the feeding voltages only. Specifically, we obtain the closed-form expression when single-fed ESPARs are assumed. Numerical results show that the proposed quantization-robust scheme can achieve a significant performance gain over ESPARs with quantized loads.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-way massive MIMO with maximum-ratio processing and imperfect CSI.\n \n \n \n \n\n\n \n Ho, C. D.; Ngo, H. Q.; Matthaiou, M.; and Duong, T. Q.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1704-1708, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-wayPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081500,\n  author = {C. D. Ho and H. Q. Ngo and M. Matthaiou and T. Q. Duong},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-way massive MIMO with maximum-ratio processing and imperfect CSI},\n  year = {2017},\n  pages = {1704-1708},\n  abstract = {This paper considers a multi-way massive multiple-input multiple-output amplify-and-forward relaying system, where single-antenna users exchange their information-bearing signals with the assistance of one relay station equipped with unconventionally many antennas. The relay first estimates the channels to all users through the pilot signals transmitted from them. Then, the relay uses maximum-ratio processing (i.e. maximum-ratio combining in the multiple-access phase and maximum-ratio transmission in the broadcast phase) to process the signals. A rigorous closed-form expression for the spectral efficiency is derived. We show that by deploying massive antenna arrays at the relay and simple maximum-ratio processing, we can serve many users in the same time-frequency resource, while maintaining a given quality-of-service for each user.},\n  keywords = {antenna arrays;MIMO communication;relay networks (telecommunication);wireless channels;multiway massive MIMO;maximum-ratio processing;multiway massive multiple-input multiple-output;information-bearing signals;relay station;maximum-ratio combining;maximum-ratio transmission;massive antenna arrays;antennas;Relays;MIMO;Channel estimation;Antennas;Fading channels;Closed-form solutions;Time-frequency analysis;Channel state information;massive MIMO;multi-way relay networks},\n  doi = {10.23919/EUSIPCO.2017.8081500},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341207.pdf},\n}\n\n
\n
\n\n\n
\n This paper considers a multi-way massive multiple-input multiple-output amplify-and-forward relaying system, where single-antenna users exchange their information-bearing signals with the assistance of one relay station equipped with unconventionally many antennas. The relay first estimates the channels to all users through the pilot signals transmitted from them. Then, the relay uses maximum-ratio processing (i.e. maximum-ratio combining in the multiple-access phase and maximum-ratio transmission in the broadcast phase) to process the signals. A rigorous closed-form expression for the spectral efficiency is derived. We show that by deploying massive antenna arrays at the relay and simple maximum-ratio processing, we can serve many users in the same time-frequency resource, while maintaining a given quality-of-service for each user.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Scheduling and precoding in hybrid analog-digital multiantenna spectrum sharing systems.\n \n \n \n \n\n\n \n Vázquez, M. Á.; Pérez-Neira, A.; Corvaja, R.; Armada, A. G.; and Lagunas, M. Á.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1709-1713, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SchedulingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081501,\n  author = {M. Á. Vázquez and A. Pérez-Neira and R. Corvaja and A. G. Armada and M. Á. Lagunas},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Scheduling and precoding in hybrid analog-digital multiantenna spectrum sharing systems},\n  year = {2017},\n  pages = {1709-1713},\n  abstract = {This paper investigates scheduling and precoding techniques via hybrid analog-digital transmitters in mm-wave frequency bands. As in spectrum sharing sub-6GHz scenarios, the presence of non-intended receivers limits the overall achievable rates. In order to circumvent this problem, we propose a scheduling and precoding algorithm able to maximize the sum-rate while keeping the interference to the external users under a certain threshold. The method consists of a first scheduling algorithm followed by the optimization of the analog and digital beamforming parts. Numerical simulations validate the conceived technique and they show that data rates are increased compared to current designs.},\n  keywords = {array signal processing;cognitive radio;millimetre wave antenna arrays;optimisation;precoding;radio spectrum management;radio transmitters;radiofrequency interference;telecommunication scheduling;precoding techniques;hybrid analog-digital transmitters;mm-wave frequency bands;nonintended receivers;scheduling algorithm;data rates;digital beamforming;hybrid analog-digital multiantenna spectrum sharing system;sum-rate maximization;analog beamforming;numerical simulations;interference;Array signal processing;Precoding;Receivers;Job shop scheduling;Analog-digital conversion;Interference},\n  doi = {10.23919/EUSIPCO.2017.8081501},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341612.pdf},\n}\n\n
\n
\n\n\n
\n This paper investigates scheduling and precoding techniques via hybrid analog-digital transmitters in mm-wave frequency bands. As in spectrum sharing sub-6GHz scenarios, the presence of non-intended receivers limits the overall achievable rates. In order to circumvent this problem, we propose a scheduling and precoding algorithm able to maximize the sum-rate while keeping the interference to the external users under a certain threshold. The method consists of a first scheduling algorithm followed by the optimization of the analog and digital beamforming parts. Numerical simulations validate the conceived technique and they show that data rates are increased compared to current designs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A novel hybrid analog-digital transmitter for multi-antenna base stations.\n \n \n \n \n\n\n \n Sedaghat, M. A.; Gade, B.; Müller, R. R.; and Fischer, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1714-1718, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081502,\n  author = {M. A. Sedaghat and B. Gade and R. R. Müller and G. Fischer},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A novel hybrid analog-digital transmitter for multi-antenna base stations},\n  year = {2017},\n  pages = {1714-1718},\n  abstract = {A new hybrid Analog-Digital (hybrid A-D) precoder is proposed for multi-antenna base stations in massive Multiple-Input Multiple-Output (MIMO) which allows a trade-off between the number of required RF-chains and the update rate of the analog part. It is shown that the number of RF-chains can be reduced even below the number of eigenmodes of the channel, thereby closing the gap between the standard hybrid A-D and the single-RF MIMO. This is achieved by dividing the input data streams into blocks and jointly optimizing the digital and the analog precoder parts for each block. The analog part of the precoder needs to be updated once per block and remains static over each block interval. Out of band radiation due to switching is resolved by inserting a short guard interval between blocks. It is shown the number of RF-chains can be any arbitrary positive integer to obtain zero distortion at the user terminals if the update rate is high enough. The proposed precoder offers a significant performance gain at the expenses of data dependent precoding and higher update rates of the analog part.},\n  keywords = {antenna arrays;MIMO communication;optimisation;precoding;radio transmitters;novel hybrid analog-digital transmitter;multiantenna base stations;block interval;data dependent precoding;higher update rates;analog precoder;massive Multiple-Input Multiple-Output system;MIMO system;MIMO;Base stations;Distortion;Precoding;Radio frequency;Antennas;Baseband},\n  doi = {10.23919/EUSIPCO.2017.8081502},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345863.pdf},\n}\n\n
\n
\n\n\n
\n A new hybrid Analog-Digital (hybrid A-D) precoder is proposed for multi-antenna base stations in massive Multiple-Input Multiple-Output (MIMO) which allows a trade-off between the number of required RF-chains and the update rate of the analog part. It is shown that the number of RF-chains can be reduced even below the number of eigenmodes of the channel, thereby closing the gap between the standard hybrid A-D and the single-RF MIMO. This is achieved by dividing the input data streams into blocks and jointly optimizing the digital and the analog precoder parts for each block. The analog part of the precoder needs to be updated once per block and remains static over each block interval. Out of band radiation due to switching is resolved by inserting a short guard interval between blocks. It is shown the number of RF-chains can be any arbitrary positive integer to obtain zero distortion at the user terminals if the update rate is high enough. The proposed precoder offers a significant performance gain at the expenses of data dependent precoding and higher update rates of the analog part.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-antenna transmission using ESPAR with peak power constraints.\n \n \n \n \n\n\n \n Zhou, L.; Khan, F. A.; Ratnarajah, T.; and Papadias, C. B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1719-1723, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-antennaPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081503,\n  author = {L. Zhou and F. A. Khan and T. Ratnarajah and C. B. Papadias},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-antenna transmission using ESPAR with peak power constraints},\n  year = {2017},\n  pages = {1719-1723},\n  abstract = {Electronically steerable parasitic array radiator (ESPAR) technology provides multi-antenna transmission with a single radio frequency (RF) unit. In order to achieve stable transmission using an ESPAR antenna (EA), two approaches have been proposed in literature. One is to increase the self-resistance of an EA, the other is to transmit signals closely approximating the actual signals that keep the EA stable. In both approaches, no constraint on the transmission power of an EA was considered. This is not the case in actual systems, as the practical power amplifier normally has limited peak power. Taking into account the limited power availability, an optimization problem is formulated with the objective to minimize the MSE between the currents corresponding to the ideal and the approximate transmission signals. The non-convex problem is solved analytically by coordination transformation and a novel algorithm is proposed. It is shown that the system employing the proposed transmission scheme gives similar performance to that of a standard multiple antenna system, especially at low SNRs. In addition, it is shown that increasing the self-resistance of an EA to achieve stability is highly power inefficient.},\n  keywords = {antenna arrays;mean square error methods;optimisation;transmission scheme;standard multiple antenna system;multiantenna transmission;peak power constraints;single radio frequency unit;stable transmission;ESPAR antenna;transmission power;electronically steerable parasitic array radiator technology;Optimization;Transmitting antennas;Radio frequency;Impedance;Resistance;Reconfigurable antenna;ESPAR;MIMO transmission;single RF chain;optimization},\n  doi = {10.23919/EUSIPCO.2017.8081503},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347460.pdf},\n}\n\n
\n
\n\n\n
\n Electronically steerable parasitic array radiator (ESPAR) technology provides multi-antenna transmission with a single radio frequency (RF) unit. In order to achieve stable transmission using an ESPAR antenna (EA), two approaches have been proposed in literature. One is to increase the self-resistance of an EA, the other is to transmit signals closely approximating the actual signals that keep the EA stable. In both approaches, no constraint on the transmission power of an EA was considered. This is not the case in actual systems, as the practical power amplifier normally has limited peak power. Taking into account the limited power availability, an optimization problem is formulated with the objective to minimize the MSE between the currents corresponding to the ideal and the approximate transmission signals. The non-convex problem is solved analytically by coordination transformation and a novel algorithm is proposed. It is shown that the system employing the proposed transmission scheme gives similar performance to that of a standard multiple antenna system, especially at low SNRs. In addition, it is shown that increasing the self-resistance of an EA to achieve stability is highly power inefficient.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Large load-controlled multiple-active multiple-passive antenna arrays: Transmit beamforming and multi-user precoding.\n \n \n \n \n\n\n \n Ntougias, K.; Ntaikos, D.; Gizas, B.; Papageorgiou, G. K.; and Papadias, C. B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1224-1228, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LargePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081504,\n  author = {K. Ntougias and D. Ntaikos and B. Gizas and G. K. Papageorgiou and C. B. Papadias},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Large load-controlled multiple-active multiple-passive antenna arrays: Transmit beamforming and multi-user precoding},\n  year = {2017},\n  pages = {1224-1228},\n  abstract = {In this work, we present the design of a novel large load-controlled multiple-active multiple-passive (LC-MAMP) antenna array that operates at 19.25 GHz. In addition, we describe a method that enables us to perform robust, low-complexity, arbitrary channel-dependent precoding with such arrays as well as a communication protocol that limits the computational complexity associated with beam tracking and dynamic load computation in static or low-mobility scenarios, such as indoor wireless access or wireless terrestrial backhaul use cases. Finally, we study the application of various user- and symbol-level precoding schemes in coordinated multiple-input multiple-output setups equipped with LC-MAMP arrays and we evaluate their performance through numerical simulations using a realistic channel model. The simulation results show that LC-MAMPs outperform equivalent digital antenna arrays.},\n  keywords = {antenna arrays;array signal processing;channel coding;computational complexity;indoor radio;microwave antennas;MIMO communication;mobile antennas;mobility management (mobile radio);precoding;protocols;wireless channels;arbitrary channel-dependent precoding;computational complexity;dynamic load computation;low-mobility scenarios;indoor wireless access;wireless terrestrial backhaul use cases;symbol-level precoding schemes;LC-MAMP arrays;equivalent digital antenna arrays;load-controlled multiple-active multiple-passive antenna arrays;communication protocol;user-level precoding schemes;channel model;frequency 19.25 GHz;Antenna arrays;Precoding;Interference;Loaded antennas;Wireless communication;Signal to noise ratio;Transmitting antennas;Load-controlled multiple-active multiple-passive arrays (LC-MAMP);coordinated multi-cell multiple-input multiple-output (Co-MC-MIMO);constructive-interference zero-forcing beamforming (CI-ZFBF);centimetre-wave (cm-wave) access;terrestrial backhaul},\n  doi = {10.23919/EUSIPCO.2017.8081504},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347677.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we present the design of a novel large load-controlled multiple-active multiple-passive (LC-MAMP) antenna array that operates at 19.25 GHz. In addition, we describe a method that enables us to perform robust, low-complexity, arbitrary channel-dependent precoding with such arrays as well as a communication protocol that limits the computational complexity associated with beam tracking and dynamic load computation in static or low-mobility scenarios, such as indoor wireless access or wireless terrestrial backhaul use cases. Finally, we study the application of various user- and symbol-level precoding schemes in coordinated multiple-input multiple-output setups equipped with LC-MAMP arrays and we evaluate their performance through numerical simulations using a realistic channel model. The simulation results show that LC-MAMPs outperform equivalent digital antenna arrays.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stacked convolutional and recurrent neural networks for bird audio detection.\n \n \n \n \n\n\n \n Adavanne, S.; Drossos, K.; Çakir, E.; and Virtanen, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1729-1733, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"StackedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081505,\n  author = {S. Adavanne and K. Drossos and E. Çakir and T. Virtanen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Stacked convolutional and recurrent neural networks for bird audio detection},\n  year = {2017},\n  pages = {1729-1733},\n  abstract = {This paper studies the detection of bird calls in audio segments using stacked convolutional and recurrent neural networks. Data augmentation by blocks mixing and domain adaptation using a novel method of test mixing are proposed and evaluated in regard to making the method robust to unseen data. The contributions of two kinds of acoustic features (dominant frequency and log mel-band energy) and their combinations are studied in the context of bird audio detection. Our best achieved AUC measure on five cross-validations of the development data is 95.5% and 88.1% on the unseen evaluation data.},\n  keywords = {audio signal processing;biology computing;convolution;feature extraction;learning (artificial intelligence);recurrent neural nets;zoology;stacked convolutional networks;recurrent neural networks;bird audio detection;audio segments;data augmentation;test mixing;bird call detection;Birds;Feature extraction;Training;Recurrent neural networks;Harmonic analysis},\n  doi = {10.23919/EUSIPCO.2017.8081505},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341578.pdf},\n}\n\n
\n
\n\n\n
\n This paper studies the detection of bird calls in audio segments using stacked convolutional and recurrent neural networks. Data augmentation by blocks mixing and domain adaptation using a novel method of test mixing are proposed and evaluated in regard to making the method robust to unseen data. The contributions of two kinds of acoustic features (dominant frequency and log mel-band energy) and their combinations are studied in the context of bird audio detection. Our best achieved AUC measure on five cross-validations of the development data is 95.5% and 88.1% on the unseen evaluation data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Densely connected CNNs for bird audio detection.\n \n \n \n \n\n\n \n Pellegrini, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1734-1738, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DenselyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081506,\n  author = {T. Pellegrini},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Densely connected CNNs for bird audio detection},\n  year = {2017},\n  pages = {1734-1738},\n  abstract = {Detecting bird sounds in audio recordings automatically, if accurate enough, is expected to be of great help to the research community working in bio- and ecoacoustics, interested in monitoring biodiversity based on audio field recordings. To estimate how accurate the state-of-the-art machine learning approaches are, the Bird Audio Detection challenge involving large audio datasets was recently organized. In this paper, experiments using several types of convolutional neural networks (i.e. standard CNNs, residual nets and densely connected nets) are reported in the framework of this challenge. DenseNets were the preferred solution since they were the best performing and most compact models, leading to a 88.22% area under the receiver operator curve score on the test set of the challenge (ranked 3rd/30)1. Performance gains were obtained thank to data augmentation through time and frequency shifting, model parameter averaging during training and ensemble methods using the geometric mean. On the contrary, the attempts to enlarge the training dataset with samples of the test set with automatic predictions used as pseudo-groundtruth labels consistently degraded performance.},\n  keywords = {audio recording;audio signal processing;fuzzy neural nets;learning (artificial intelligence);bird sounds;audio recordings;audio datasets;convolutional neural networks;residual nets;receiver operator curve score;machine learning approaches;bird audio detection;densely-connected CNN;DenseNets;performance gains;data augmentation;time shifting;frequency shifting;pseudogroundtruth labels;Birds;Training;Convolution;Feature extraction;Standards;Time-frequency analysis},\n  doi = {10.23919/EUSIPCO.2017.8081506},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342502.pdf},\n}\n\n
\n
\n\n\n
\n Detecting bird sounds in audio recordings automatically, if accurate enough, is expected to be of great help to the research community working in bio- and ecoacoustics, interested in monitoring biodiversity based on audio field recordings. To estimate how accurate the state-of-the-art machine learning approaches are, the Bird Audio Detection challenge involving large audio datasets was recently organized. In this paper, experiments using several types of convolutional neural networks (i.e. standard CNNs, residual nets and densely connected nets) are reported in the framework of this challenge. DenseNets were the preferred solution since they were the best performing and most compact models, leading to a 88.22% area under the receiver operator curve score on the test set of the challenge (ranked 3rd/30)1. Performance gains were obtained thank to data augmentation through time and frequency shifting, model parameter averaging during training and ensemble methods using the geometric mean. On the contrary, the attempts to enlarge the training dataset with samples of the test set with automatic predictions used as pseudo-groundtruth labels consistently degraded performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Classification of bird song syllables using Wigner-Ville ambiguity function cross-terms.\n \n \n \n \n\n\n \n Sandsten, M.; and Brynolfsson, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1739-1743, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ClassificationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081507,\n  author = {M. Sandsten and J. Brynolfsson},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Classification of bird song syllables using Wigner-Ville ambiguity function cross-terms},\n  year = {2017},\n  pages = {1739-1743},\n  abstract = {A novel feature extraction method for low-dimensional signal representation is presented. The features are useful for classification of non-stationary multi-component signals with stochastic variation in amplitudes and time-frequency locations. Using a penalty function to suppress the Wigner-Ville ambiguity function auto-terms, the proposed feature set is based on the cross-term doppler- and lag profiles. The investigation considers classification where strong similar components appear in all signals and where the differences between classes are related to weaker components. The approach is evaluated and compared with established methods for simulated data and bird song syllables of the great reed warbler. The results show that the novel feature extraction method gives a better classification than established methods used in bird song analysis.},\n  keywords = {feature extraction;signal classification;signal representation;stochastic processes;penalty function;Wigner-Ville ambiguity function auto-terms;cross-term doppler;lag profiles;bird song syllables;bird song analysis;low-dimensional signal representation;feature extraction method;Wigner-Ville ambiguity function cross-terms;nonstationary multicomponent signal classification;great reed warbler;stochastic variation;Time-frequency analysis;Mel frequency cepstral coefficient;Spectrogram;Birds;Signal to noise ratio;Jitter;Kernel},\n  doi = {10.23919/EUSIPCO.2017.8081507},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342748.pdf},\n}\n\n
\n
\n\n\n
\n A novel feature extraction method for low-dimensional signal representation is presented. The features are useful for classification of non-stationary multi-component signals with stochastic variation in amplitudes and time-frequency locations. Using a penalty function to suppress the Wigner-Ville ambiguity function auto-terms, the proposed feature set is based on the cross-term doppler- and lag profiles. The investigation considers classification where strong similar components appear in all signals and where the differences between classes are related to weaker components. The approach is evaluated and compared with established methods for simulated data and bird song syllables of the great reed warbler. The results show that the novel feature extraction method gives a better classification than established methods used in bird song analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Convolutional recurrent neural networks for bird audio detection.\n \n \n \n \n\n\n \n Cakir, E.; Adavanne, S.; Parascandolo, G.; Drossos, K.; and Virtanen, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1744-1748, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ConvolutionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081508,\n  author = {E. Cakir and S. Adavanne and G. Parascandolo and K. Drossos and T. Virtanen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Convolutional recurrent neural networks for bird audio detection},\n  year = {2017},\n  pages = {1744-1748},\n  abstract = {Bird sounds possess distinctive spectral structure which may exhibit small shifts in spectrum depending on the bird species and environmental conditions. In this paper, we propose using convolutional recurrent neural networks on the task of automated bird audio detection in real-life environments. In the proposed method, convolutional layers extract high dimensional, local frequency shift invariant features, while recurrent layers capture longer term dependencies between the features extracted from short time frames. This method achieves 88.5% Area Under ROC Curve (AUC) score on the unseen evaluation data and obtains the second place in the Bird Audio Detection challenge.},\n  keywords = {audio signal processing;feature extraction;recurrent neural nets;convolutional recurrent neural networks;bird species;environmental conditions;automated bird audio detection;convolutional layers;Bird Audio Detection challenge;bird sounds;spectral structure;bird audio detection;high-dimensional local-frequency shift invariant feature extraction;area under ROC curve score;AUC score;Birds;Convolution;Feature extraction;Acoustics;Recurrent neural networks;Time-frequency analysis;Electronic mail},\n  doi = {10.23919/EUSIPCO.2017.8081508},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342815.pdf},\n}\n\n
\n
\n\n\n
\n Bird sounds possess distinctive spectral structure which may exhibit small shifts in spectrum depending on the bird species and environmental conditions. In this paper, we propose using convolutional recurrent neural networks on the task of automated bird audio detection in real-life environments. In the proposed method, convolutional layers extract high dimensional, local frequency shift invariant features, while recurrent layers capture longer term dependencies between the features extracted from short time frames. This method achieves 88.5% Area Under ROC Curve (AUC) score on the unseen evaluation data and obtains the second place in the Bird Audio Detection challenge.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint detection and classification convolutional neural network on weakly labelled bird audio detection.\n \n \n \n \n\n\n \n Kong, Q.; Xu, Y.; and Plumbley, M. D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1749-1753, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081509,\n  author = {Q. Kong and Y. Xu and M. D. Plumbley},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Joint detection and classification convolutional neural network on weakly labelled bird audio detection},\n  year = {2017},\n  pages = {1749-1753},\n  abstract = {Bird audio detection (BAD) aims to detect whether there is a bird call in an audio recording or not. One difficulty of this task is that the bird sound datasets are weakly labelled, that is only the presence or absence of a bird in a recording is known, without knowing when the birds call. We propose to apply joint detection and classification (JDC) model on the weakly labelled data (WLD) to detect and classify an audio clip at the same time. First, we apply VGG like convolutional neural network (CNN) on mel spectrogram as baseline. Then we propose a JDC-CNN model with VGG as a classifier and CNN as a detector. We report the denoising method including optimally-modified log-spectral amplitude (OM-LSA), median filter and spectral spectrogram will worse the classification accuracy on the contrary to previous work. JDC-CNN can predict the time stamps of the events from weakly labelled data, so is able to do sound event detection from WLD. We obtained area under curve (AUC) of 95.70% on the development data and 81.36% on the unseen evaluation data, which is nearly comparable to the baseline CNN model.},\n  keywords = {acoustic signal detection;acoustic signal processing;audio recording;audio signal processing;feature extraction;image denoising;learning (artificial intelligence);median filters;neural nets;signal classification;speech enhancement;bird call;audio recording;bird sound datasets;weakly labelled data;audio clip;JDC-CNN model;optimally-modified log-spectral amplitude;sound event detection;baseline CNN model;classification convolutional neural network;weakly labelled bird audio detection;spectral spectrogram;median filter;denoising method;mel spectrogram;joint detection and classification},\n  doi = {10.23919/EUSIPCO.2017.8081509},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343626.pdf},\n}\n\n
\n
\n\n\n
\n Bird audio detection (BAD) aims to detect whether there is a bird call in an audio recording or not. One difficulty of this task is that the bird sound datasets are weakly labelled, that is only the presence or absence of a bird in a recording is known, without knowing when the birds call. We propose to apply joint detection and classification (JDC) model on the weakly labelled data (WLD) to detect and classify an audio clip at the same time. First, we apply VGG like convolutional neural network (CNN) on mel spectrogram as baseline. Then we propose a JDC-CNN model with VGG as a classifier and CNN as a detector. We report the denoising method including optimally-modified log-spectral amplitude (OM-LSA), median filter and spectral spectrogram will worse the classification accuracy on the contrary to previous work. JDC-CNN can predict the time stamps of the events from weakly labelled data, so is able to do sound event detection from WLD. We obtained area under curve (AUC) of 95.70% on the development data and 81.36% on the unseen evaluation data, which is nearly comparable to the baseline CNN model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Rapid bird activity detection using probabilistic sequence kernels.\n \n \n \n \n\n\n \n Thakur, A.; Jyothi, R.; Rajan, P.; and Dileep, A. D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1754-1758, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RapidPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081510,\n  author = {A. Thakur and R. Jyothi and P. Rajan and A. D. Dileep},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Rapid bird activity detection using probabilistic sequence kernels},\n  year = {2017},\n  pages = {1754-1758},\n  abstract = {Bird activity detection is the task of determining if a bird sound is present in a given audio recording. This paper describes a bird activity detector which utilises a support vector machine (SVM) with a dynamic kernel. Dynamic kernels are used to process sets of feature vectors having different cardinalities. Probabilistic sequence kernel (PSK) is one such dynamic kernel. The PSK converts a set of feature vectors from a recording into a fixed-length vector. We propose to use a variant of PSK in this work. Before computing the fixed-length vector, cepstral mean and variance normalisation and short-time Gaussianization is performed on the feature vectors. This reduces environment mismatch between different recordings. Additionally, we also demonstrate a simple procedure to speed up the proposed method by reducing the size of fixed-length vector. A speedup of almost 70% is observed, with a very small drop in accuracy. The proposed method is also compared with a random forest classifier and is shown to outperform it.},\n  keywords = {audio recording;audio signal processing;cepstral analysis;feature extraction;Gaussian processes;random processes;signal classification;support vector machines;vectors;rapid bird activity detection;probabilistic sequence kernel;bird sound;bird activity detector;support vector machine;dynamic kernel;feature vectors;PSK;fixed-length vector;cepstral mean;variance normalisation;audio recording;SVM;short-time Gaussianization;random forest classifier;Birds;Phase shift keying;Probabilistic logic;Kernel;Audio recording;Mel frequency cepstral coefficient;Training},\n  doi = {10.23919/EUSIPCO.2017.8081510},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345165.pdf},\n}\n\n
\n
\n\n\n
\n Bird activity detection is the task of determining if a bird sound is present in a given audio recording. This paper describes a bird activity detector which utilises a support vector machine (SVM) with a dynamic kernel. Dynamic kernels are used to process sets of feature vectors having different cardinalities. Probabilistic sequence kernel (PSK) is one such dynamic kernel. The PSK converts a set of feature vectors from a recording into a fixed-length vector. We propose to use a variant of PSK in this work. Before computing the fixed-length vector, cepstral mean and variance normalisation and short-time Gaussianization is performed on the feature vectors. This reduces environment mismatch between different recordings. Additionally, we also demonstrate a simple procedure to speed up the proposed method by reducing the size of fixed-length vector. A speedup of almost 70% is observed, with a very small drop in accuracy. The proposed method is also compared with a random forest classifier and is shown to outperform it.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic frequency feature extraction for bird species delimitation.\n \n \n \n \n\n\n \n Or'Reilly, C.; Kcöküer, M.; Jančović, P.; Drennan, R.; and Harte, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1959-1763, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081511,\n  author = {C. Or'Reilly and M. Kcöküer and P. Jančović and R. Drennan and N. Harte},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic frequency feature extraction for bird species delimitation},\n  year = {2017},\n  pages = {1959-1763},\n  abstract = {Zoologists have long studied species distinctions, but until recently a quantitative system which could be applied to all birds which satisfies rigor and repeatability was absent from the zoology literature. A system which uses morphology, acoustic and plumage evidence to review species status of bird populations was presented by Tobias et al. The acoustic evidence in that work was extracted using manual inspection of spectrograms. The current work seeks to automate this process. Signal processing techniques are employed in this paper to automate the extraction of the acoustic features: maximum, minimum and peak frequency, and bandwidth. YIN-bird, a pitch detection algorithm optimized for birds, and sine-track method, successfully applied to bird species recognition previously, are the automatic methods employed. The performance of automatic methods is compared to the manual method currently used by zoologists. Both methods are well suited to this task, and demonstrate the strong potential to begin to automate the task of acoustic comparison of bird species.},\n  keywords = {acoustic signal processing;feature extraction;zoology;zoology;bird species recognition;sine-track method;pitch detection algorithm;signal processing techniques;bird species delimitation;automatic frequency feature extraction;Birds;Feature extraction;Acoustics;Sociology;Statistics;Frequency measurement;Pitch;bird song;sinusoidal tracking},\n  doi = {10.23919/EUSIPCO.2017.8081511},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346420.pdf},\n}\n\n
\n
\n\n\n
\n Zoologists have long studied species distinctions, but until recently a quantitative system which could be applied to all birds which satisfies rigor and repeatability was absent from the zoology literature. A system which uses morphology, acoustic and plumage evidence to review species status of bird populations was presented by Tobias et al. The acoustic evidence in that work was extracted using manual inspection of spectrograms. The current work seeks to automate this process. Signal processing techniques are employed in this paper to automate the extraction of the acoustic features: maximum, minimum and peak frequency, and bandwidth. YIN-bird, a pitch detection algorithm optimized for birds, and sine-track method, successfully applied to bird species recognition previously, are the automatic methods employed. The performance of automatic methods is compared to the manual method currently used by zoologists. Both methods are well suited to this task, and demonstrate the strong potential to begin to automate the task of acoustic comparison of bird species.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Two convolutional neural networks for bird detection in audio signals.\n \n \n \n \n\n\n \n Grill, T.; and Schlüter, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1764-1768, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"TwoPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081512,\n  author = {T. Grill and J. Schlüter},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Two convolutional neural networks for bird detection in audio signals},\n  year = {2017},\n  pages = {1764-1768},\n  abstract = {We present and compare two approaches to detect the presence of bird calls in audio recordings using convolutional neural networks on mel spectrograms. In a signal processing challenge using environmental recordings from three very different sources, only two of them available for supervised training, we obtained an Area Under Curve (AUC) measure of 89% on the hidden test set, higher than any other contestant. By comparing multiple variations of our systems, we find that despite very different architectures, both approaches can be tuned to perform equally well. Further improvements will likely require a radically different approach to dealing with the discrepancy between data sources.},\n  keywords = {audio recording;audio signal processing;feature extraction;learning (artificial intelligence);neural nets;convolutional neural networks;bird detection;audio signals;bird calls;audio recordings;mel spectrograms;environmental recordings;supervised training;area under curve measure;AUC measure;Birds;Training;Spectrogram;Convolution;Training data;Computer architecture},\n  doi = {10.23919/EUSIPCO.2017.8081512},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347092.pdf},\n}\n\n
\n
\n\n\n
\n We present and compare two approaches to detect the presence of bird calls in audio recordings using convolutional neural networks on mel spectrograms. In a signal processing challenge using environmental recordings from three very different sources, only two of them available for supervised training, we obtained an Area Under Curve (AUC) measure of 89% on the hidden test set, higher than any other contestant. By comparing multiple variations of our systems, we find that despite very different architectures, both approaches can be tuned to perform equally well. Further improvements will likely require a radically different approach to dealing with the discrepancy between data sources.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Masked Non-negative Matrix Factorization for Bird Detection Using Weakly Labeled Data.\n \n \n \n \n\n\n \n Sobieraj, I.; Kong, Q.; and Plumbley, M. D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1769-1773, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MaskedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081513,\n  author = {I. Sobieraj and Q. Kong and M. D. Plumbley},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Masked Non-negative Matrix Factorization for Bird Detection Using Weakly Labeled Data},\n  year = {2017},\n  pages = {1769-1773},\n  abstract = {Acoustic monitoring of bird species is an increasingly important field in signal processing. Many available bird sound datasets do not contain exact timestamp of the bird call but have a coarse weak label instead. Traditional Non-negative Matrix Factorization (NMF) models are not well designed to deal with weakly labeled data. In this paper we propose a novel Masked Non-negative Matrix Factorization (Masked NMF) approach for bird detection using weakly labeled data. During dictionary extraction we introduce a binary mask on the activation matrix. In that way we are able to control which parts of dictionary are used to reconstruct the training data. We compare our method with conventional NMF approaches and current state of the art methods. The proposed method outperforms the NMF baseline and offers a parsimonious model for bird detection on weakly labeled data. Moreover, to our knowledge, the proposed Masked NMF achieved the best result among non-deep learning methods on a test dataset used for the recent Bird Audio Detection Challenge.},\n  keywords = {acoustic signal processing;audio signal processing;learning (artificial intelligence);matrix decomposition;traditional nonnegative matrix factorization models;bird audio detection challenge;novel Masked Nonnegative Matrix Factorization approach;coarse weak label;available bird sound datasets;bird species;Masked NMF;bird detection;weakly labeled data;Birds;Dictionaries;Training;Matrix decomposition;Signal processing;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081513},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347329.pdf},\n}\n\n
\n
\n\n\n
\n Acoustic monitoring of bird species is an increasingly important field in signal processing. Many available bird sound datasets do not contain exact timestamp of the bird call but have a coarse weak label instead. Traditional Non-negative Matrix Factorization (NMF) models are not well designed to deal with weakly labeled data. In this paper we propose a novel Masked Non-negative Matrix Factorization (Masked NMF) approach for bird detection using weakly labeled data. During dictionary extraction we introduce a binary mask on the activation matrix. In that way we are able to control which parts of dictionary are used to reconstruct the training data. We compare our method with conventional NMF approaches and current state of the art methods. The proposed method outperforms the NMF baseline and offers a parsimonious model for bird detection on weakly labeled data. Moreover, to our knowledge, the proposed Masked NMF achieved the best result among non-deep learning methods on a test dataset used for the recent Bird Audio Detection Challenge.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Archetypal analysis based sparse convex sequence kernel for bird activity detection.\n \n \n \n \n\n\n \n Abrol, V.; Sharma, P.; Thakur, A.; Rajan, P.; Dileep, A. D.; and Sao, A. K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1774-1778, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ArchetypalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081514,\n  author = {V. Abrol and P. Sharma and A. Thakur and P. Rajan and A. D. Dileep and A. K. Sao},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Archetypal analysis based sparse convex sequence kernel for bird activity detection},\n  year = {2017},\n  pages = {1774-1778},\n  abstract = {This paper proposes a novel method based on the archetypal analysis (AA) for bird activity detection (BAD) task. The proposed method extracts a convex representation (frame-wise) by projecting a given audio signal on to a learned dictionary. The AA based dictionary is trained only on bird class signals, which makes the method robust to background noise. Further, it is shown that due to the inherent sparsity property of convex representations, non-bird class signals will have a denser representation as compared to the bird counterpart, which helps in effective discrimination. In order to detect presence/absence of bird vocalization, a fixed length representation is obtained by averaging the obtained frame wise representations of an audio signal. Classification of these fixed length representations is performed using support vector machines (SVM) with a dynamic kernel. In this work, we propose a variant of probabilistic sequence kernel called sparse convex sequence kernel (SCSK) for the BAD task. Experimental results show that the proposed method can efficiently discriminate bird from non-bird class signals.},\n  keywords = {audio signal processing;feature extraction;learning (artificial intelligence);signal classification;signal representation;support vector machines;archetypal analysis;sparse convex sequence kernel;convex representation;learned dictionary;AA based dictionary;bird class signals;bird vocalization;fixed length representation;frame wise representations;probabilistic sequence kernel;bird activity detection;audio signal;support vector machines;Birds;Kernel;Dictionaries;Training;Training data;Probabilistic logic;Archetypal analysis;dictionary learning;kernel methods;bird activity detection},\n  doi = {10.23919/EUSIPCO.2017.8081514},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347411.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a novel method based on the archetypal analysis (AA) for bird activity detection (BAD) task. The proposed method extracts a convex representation (frame-wise) by projecting a given audio signal on to a learned dictionary. The AA based dictionary is trained only on bird class signals, which makes the method robust to background noise. Further, it is shown that due to the inherent sparsity property of convex representations, non-bird class signals will have a denser representation as compared to the bird counterpart, which helps in effective discrimination. In order to detect presence/absence of bird vocalization, a fixed length representation is obtained by averaging the obtained frame wise representations of an audio signal. Classification of these fixed length representations is performed using support vector machines (SVM) with a dynamic kernel. In this work, we propose a variant of probabilistic sequence kernel called sparse convex sequence kernel (SCSK) for the BAD task. Experimental results show that the proposed method can efficiently discriminate bird from non-bird class signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic detection of bird species from audio field recordings using HMM-based modelling of frequency tracks.\n \n \n \n \n\n\n \n Jančovič, P.; and Köküer, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1779-1783, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081515,\n  author = {P. Jančovič and M. Köküer},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic detection of bird species from audio field recordings using HMM-based modelling of frequency tracks},\n  year = {2017},\n  pages = {1779-1783},\n  abstract = {This paper presents an automatic system for detection of bird species in field recordings. A sinusoidal detection algorithm is employed to segment the acoustic scene into isolated spectro-temporal segments. Each segment is represented as a temporal sequence of frequencies of the detected sinusoid, referred to as frequency track. Each bird species is represented by a set of hidden Markov models (HMMs), each HMM modelling an individual type of bird vocalisation element. These HMMs are obtained in an unsupervised manner. The detection is based on a likelihood ratio of the test utterance against the target bird species and non-target background model. We explore on selection of cohort for modelling the background model, z-norm and t-norm score normalisation techniques and score compensation to deal with outlier data. Experiments are performed using over 40 hours of audio field recordings from 48 bird species plus an additional 16 hours of field recordings as impostor trials. Evaluations are performed using detection error trade-off plots. The equal error rate of 5% is achieved when impostor trials are non-target bird species vocalisations and 1.2% when using field recordings which do not contain bird vocalisations.},\n  keywords = {acoustic signal detection;acoustic signal processing;audio recording;bioacoustics;biocommunications;hidden Markov models;zoology;audio field recordings;nontarget bird species vocalisations;bird vocalisations;sinusoidal detection algorithm;isolated spectro-temporal segments;hidden Markov models;HMM modelling;bird vocalisation element;nontarget background model;bird species detection;frequency track modelling;acoustic scene segment;z-norm score normalisation technique;t-norm score normalisation technique;score compensation;time 40.0 hour;time 16.0 hour;Hidden Markov models;Birds;Feature extraction;Training;Acoustics;Data models;bird species detection;field recording;hidden Markov model;HMM;score normalisation;cohort;outlier;vocalisation;element;unsupervised training;sinusoid detection;sinusoidal modelling;frequency track},\n  doi = {10.23919/EUSIPCO.2017.8081515},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347643.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents an automatic system for detection of bird species in field recordings. A sinusoidal detection algorithm is employed to segment the acoustic scene into isolated spectro-temporal segments. Each segment is represented as a temporal sequence of frequencies of the detected sinusoid, referred to as frequency track. Each bird species is represented by a set of hidden Markov models (HMMs), each HMM modelling an individual type of bird vocalisation element. These HMMs are obtained in an unsupervised manner. The detection is based on a likelihood ratio of the test utterance against the target bird species and non-target background model. We explore on selection of cohort for modelling the background model, z-norm and t-norm score normalisation techniques and score compensation to deal with outlier data. Experiments are performed using over 40 hours of audio field recordings from 48 bird species plus an additional 16 hours of field recordings as impostor trials. Evaluations are performed using detection error trade-off plots. The equal error rate of 5% is achieved when impostor trials are non-target bird species vocalisations and 1.2% when using field recordings which do not contain bird vocalisations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A variational EM method for pole-zero modeling of speech with mixed block sparse and Gaussian excitation.\n \n \n \n \n\n\n \n Shi, L.; Nielsen, J. K.; Jensen, J. R.; and Christensen, M. G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1784-1788, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081516,\n  author = {L. Shi and J. K. Nielsen and J. R. Jensen and M. G. Christensen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A variational EM method for pole-zero modeling of speech with mixed block sparse and Gaussian excitation},\n  year = {2017},\n  pages = {1784-1788},\n  abstract = {The modeling of speech can be used for speech synthesis and speech recognition. We present a speech analysis method based on pole-zero modeling of speech with mixed block sparse and Gaussian excitation. By using a pole-zero model, instead of the all-pole model, a better spectral fitting can be expected. Moreover, motivated by the block sparse glottal flow excitation during voiced speech and the white noise excitation for unvoiced speech, we model the excitation sequence as a combination of block sparse signals and white noise. A variational EM (VEM) method is proposed for estimating the posterior PDFs of the block sparse residuals and point estimates of modelling parameters within a sparse Bayesian learning framework. Compared to conventional pole-zero and all-pole based methods, experimental results show that the proposed method has lower spectral distortion and good performance in reconstructing of the block sparse excitation.},\n  keywords = {Bayes methods;expectation-maximisation algorithm;poles and zeros;speech processing;speech recognition;speech synthesis;white noise;expectation-maximization algorithm;sparse Bayesian learning framework;block sparse residuals;block sparse signals;excitation sequence;unvoiced speech;white noise excitation;voiced speech;glottal flow excitation;all-pole model;speech analysis method;speech synthesis;Gaussian excitation;mixed block sparse;pole-zero modeling;variational EM method;block sparse excitation;all-pole based methods;Speech;Analytical models;Probability density function;White noise;Estimation;Speech analysis;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081516},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343400.pdf},\n}\n\n
\n
\n\n\n
\n The modeling of speech can be used for speech synthesis and speech recognition. We present a speech analysis method based on pole-zero modeling of speech with mixed block sparse and Gaussian excitation. By using a pole-zero model, instead of the all-pole model, a better spectral fitting can be expected. Moreover, motivated by the block sparse glottal flow excitation during voiced speech and the white noise excitation for unvoiced speech, we model the excitation sequence as a combination of block sparse signals and white noise. A variational EM (VEM) method is proposed for estimating the posterior PDFs of the block sparse residuals and point estimates of modelling parameters within a sparse Bayesian learning framework. Compared to conventional pole-zero and all-pole based methods, experimental results show that the proposed method has lower spectral distortion and good performance in reconstructing of the block sparse excitation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Order adaptive Golomb rice coding for high variability sources.\n \n \n \n \n\n\n \n Vasilache, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1789-1793, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OrderPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081517,\n  author = {A. Vasilache},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Order adaptive Golomb rice coding for high variability sources},\n  year = {2017},\n  pages = {1789-1793},\n  abstract = {This paper presents a new perspective on the adaptive Golomb Rice codes that is especially suitable for sources having a highly variable distribution in time. Instead of adapting the Golomb Rice parameter, the encoder adapts the order of the symbols based on a count of occurrences measure. The proposed order adaptive Golomb Rice method is compared against different versions of adaptive arithmetic encoder at the encoding of real audio data stereo parameters. The proposed method shows very fast adaptability in the presence of rapidly changing data with respect to the initial data statistics.},\n  keywords = {adaptive codes;arithmetic codes;data compression;Golomb Rice parameter;adaptive arithmetic encoder;encoding;audio data stereo parameters;rice coding;adaptive Golomb Rice codes;Encoding;Image coding;Indexes;Speech;Histograms;Additives;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081517},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347528.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a new perspective on the adaptive Golomb Rice codes that is especially suitable for sources having a highly variable distribution in time. Instead of adapting the Golomb Rice parameter, the encoder adapts the order of the symbols based on a count of occurrences measure. The proposed order adaptive Golomb Rice method is compared against different versions of adaptive arithmetic encoder at the encoding of real audio data stereo parameters. The proposed method shows very fast adaptability in the presence of rapidly changing data with respect to the initial data statistics.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Time-scale wavelet scattering using hyperbolic tangent function for vessel sound classification.\n \n \n \n \n\n\n \n Can, G.; Akbaş, C. E.; and Çetin, A. E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1794-1798, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Time-scalePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081518,\n  author = {G. Can and C. E. Akbaş and A. E. Çetin},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Time-scale wavelet scattering using hyperbolic tangent function for vessel sound classification},\n  year = {2017},\n  pages = {1794-1798},\n  abstract = {We introduce a time-frequency scattering method using hyperbolic tangent function for vessel sound classification. The sound data is wavelet transformed using a two channel filter-bank and filter-bank outputs are scattered using tanh function. A feature vector similar to mel-scale cepstrum is obtained after a wavelet packed transform-like structure approximating the mel-frequency scale. Feature vectors of vessel sounds are classified using a support vector machine (SVM). Experimental results are presented and the new feature extraction method produces better classification results than the ordinary Mel-Frequency Cepstral Coefficients (MFCC) vectors.},\n  keywords = {cepstral analysis;feature extraction;signal classification;speech recognition;support vector machines;wavelet transforms;feature vector;support vector machine;feature extraction method;time-scale wavelet scattering;hyperbolic tangent function;vessel sound classification;time-frequency scattering method;channel filter-bank;filter-bank outputs;tanh function;ordinary mel-frequency cepstral coefficient vectors;Mel frequency cepstral coefficient;Scattering;Wavelet transforms;Signal processing;Feature extraction;Vessel Sound Classification;Time-frequency Representation;Scattering Filter-bank;Hyperbolic Tangent Function},\n  doi = {10.23919/EUSIPCO.2017.8081518},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342261.pdf},\n}\n\n
\n
\n\n\n
\n We introduce a time-frequency scattering method using hyperbolic tangent function for vessel sound classification. The sound data is wavelet transformed using a two channel filter-bank and filter-bank outputs are scattered using tanh function. A feature vector similar to mel-scale cepstrum is obtained after a wavelet packed transform-like structure approximating the mel-frequency scale. Feature vectors of vessel sounds are classified using a support vector machine (SVM). Experimental results are presented and the new feature extraction method produces better classification results than the ordinary Mel-Frequency Cepstral Coefficients (MFCC) vectors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non-intrusive bit-rate detection of coded speech.\n \n \n \n \n\n\n \n Sharma, D.; Jost, U.; and Naylor, P. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1799-1803, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Non-intrusivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081519,\n  author = {D. Sharma and U. Jost and P. A. Naylor},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Non-intrusive bit-rate detection of coded speech},\n  year = {2017},\n  pages = {1799-1803},\n  abstract = {We present a non-intrusive codec type and bit-rate detection algorithm that extracts a number of features from a decoded speech signal and models their statistics using a Deep Neural Network (DNN) classifier. We also present a method for reducing the computational complexity and improving the robustness of the algorithm by pruning features that have a low importance and high computational cost using a CART binary tree. The proposed method is tested on a database that includes additive noise and transcoding as well as a real voicemail database. We show that the proposed method has 25% lower complexity than the baseline, 19% higher accuracy in the bitrate detection task and 10% higher accuracy in the CODEC classification experiment.},\n  keywords = {computational complexity;feature extraction;neural nets;speech coding;speech recognition;bitrate detection task;nonintrusive bit-rate detection;coded speech;bit-rate detection algorithm;decoded speech signal;computational complexity;deep neural network classifier;CART binary tree;additive noise;transcoding;voicemail database;CODEC classification experiment;Codecs;Speech;Signal processing algorithms;Feature extraction;Databases;Speech coding;Training;CODEC-Identification;Deep-Neural-Network;Bit-Rate;Voicemail-Classifcation;Speech-Quality},\n  doi = {10.23919/EUSIPCO.2017.8081519},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341301.pdf},\n}\n\n
\n
\n\n\n
\n We present a non-intrusive codec type and bit-rate detection algorithm that extracts a number of features from a decoded speech signal and models their statistics using a Deep Neural Network (DNN) classifier. We also present a method for reducing the computational complexity and improving the robustness of the algorithm by pruning features that have a low importance and high computational cost using a CART binary tree. The proposed method is tested on a database that includes additive noise and transcoding as well as a real voicemail database. We show that the proposed method has 25% lower complexity than the baseline, 19% higher accuracy in the bitrate detection task and 10% higher accuracy in the CODEC classification experiment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Harmonic and percussive source separation using a convolutional auto encoder.\n \n \n \n \n\n\n \n Lim, W.; and Lee, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1804-1808, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"HarmonicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081520,\n  author = {W. Lim and T. Lee},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Harmonic and percussive source separation using a convolutional auto encoder},\n  year = {2017},\n  pages = {1804-1808},\n  abstract = {Real world audio signals are generally a mixture of harmonic and percussive sounds. In this paper, we present a novel method for separating the harmonic and percussive audio signals from an audio mixture. Proposed method involves the use of a convolutional auto-encoder on a magnitude of the spectrogram to separate the harmonic and percussive signals. This network structure enables automatic high-level feature learning and spectral domain audio decomposition. An evaluation was performed using professionally produced music recording. Consequently, we confirm that the proposed method provides superior separation performance compared to conventional methods.},\n  keywords = {audio signal processing;convolutional codes;learning (artificial intelligence);music;source separation;harmonic signals;percussive signals;high-level feature learning;spectral domain audio decomposition;percussive source separation;convolutional auto encoder;world audio signals;percussive sounds;percussive audio signals;convolutional auto-encoder;Harmonic analysis;Power harmonic filters;Convolution;Databases;Spectrogram;Filtering algorithms;Source Separation;Deep Learning;Auto-Encoder;Convolutional Neural Networks},\n  doi = {10.23919/EUSIPCO.2017.8081520},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346835.pdf},\n}\n\n
\n
\n\n\n
\n Real world audio signals are generally a mixture of harmonic and percussive sounds. In this paper, we present a novel method for separating the harmonic and percussive audio signals from an audio mixture. Proposed method involves the use of a convolutional auto-encoder on a magnitude of the spectrogram to separate the harmonic and percussive signals. This network structure enables automatic high-level feature learning and spectral domain audio decomposition. An evaluation was performed using professionally produced music recording. Consequently, we confirm that the proposed method provides superior separation performance compared to conventional methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Novel TEO-based Gammatone features for environmental sound classification.\n \n \n \n \n\n\n \n Agrawal, D. M.; Sailor, H. B.; Soni, M. H.; and Patil, H. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1809-1813, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"NovelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081521,\n  author = {D. M. Agrawal and H. B. Sailor and M. H. Soni and H. A. Patil},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Novel TEO-based Gammatone features for environmental sound classification},\n  year = {2017},\n  pages = {1809-1813},\n  abstract = {In this paper, we propose to use modified Gammatone filterbank with Teager Energy Operator (TEO) for environmental sound classification (ESC) task. TEO can track energy as a function of both amplitude and frequency of an audio signal. TEO is better for capturing energy variations in the signal that is produced by a real physical system, such as, environmental sounds that contain amplitude and frequency modulations. In proposed feature set, we have used Gammatone filterbank since it represents characteristics of human auditory processing. Here, we have used two classifiers, namely, Gaussian Mixture Model (GMM) using cepstral features, and Convolutional Neural Network (CNN) using spectral features. We performed experiments on two datasets, namely, ESC-50, and UrbanSound8K. We compared TEO-based coefficients with Mel filter cepstral coefficients (MFCC) and Gammatone cepstral coefficients (GTCC), in which GTCC used mean square energy. Using GMM, the proposed TEO-based Gammatone Cepstral Coefficients (TEO-GTCC), and its score-level fusion with MFCC gave absolute improvement of 0.45 %, and 3.85 % in classification accuracy over MFCC on ESC-50 dataset. Similarly, on UrbanSound8K dataset the proposed TEO-GTCC, and its score-level fusion with GTCC gave absolute improvement of 1.40 %, and 2.44 % in classification accuracy over MFCC. Using CNN, the score-level fusion of Gammatone spectral coefficient (GTSC) and the proposed TEO-based Gammatone spectral coefficients (TEO-GTSC) gave absolute improvement of 14.10 %, and 14.52 % in classification accuracy over Mel filterbank energies (FBE) on ESC-50 and UrbanSond8K datasets, respectively. This shows that proposed TEO-based Gammatone features contain complementary information which is helpful in ESC task.},\n  keywords = {audio signal processing;cepstral analysis;channel bank filters;feature extraction;feedforward neural nets;Gaussian processes;signal classification;speaker recognition;speech recognition;environmental sound classification task;energy variations;frequency modulations;cepstral features;spectral features;score-level fusion;ESC-50 dataset;UrbanSound8K dataset;Gammatone spectral coefficient;TEO-GTSC;Mel filterbank energies;CNN classifier;convolutional neural network;Gaussian mixture model classifier;human auditory processing;audio signal amplitude;audio signal frequency;ESC task;TEO-based Gammatone features;modified Gammatone filterbank;Teager energy operator;Frequency modulation;Mel frequency cepstral coefficient;Feature extraction;Databases;Convolution},\n  doi = {10.23919/EUSIPCO.2017.8081521},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347591.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose to use modified Gammatone filterbank with Teager Energy Operator (TEO) for environmental sound classification (ESC) task. TEO can track energy as a function of both amplitude and frequency of an audio signal. TEO is better for capturing energy variations in the signal that is produced by a real physical system, such as, environmental sounds that contain amplitude and frequency modulations. In proposed feature set, we have used Gammatone filterbank since it represents characteristics of human auditory processing. Here, we have used two classifiers, namely, Gaussian Mixture Model (GMM) using cepstral features, and Convolutional Neural Network (CNN) using spectral features. We performed experiments on two datasets, namely, ESC-50, and UrbanSound8K. We compared TEO-based coefficients with Mel filter cepstral coefficients (MFCC) and Gammatone cepstral coefficients (GTCC), in which GTCC used mean square energy. Using GMM, the proposed TEO-based Gammatone Cepstral Coefficients (TEO-GTCC), and its score-level fusion with MFCC gave absolute improvement of 0.45 %, and 3.85 % in classification accuracy over MFCC on ESC-50 dataset. Similarly, on UrbanSound8K dataset the proposed TEO-GTCC, and its score-level fusion with GTCC gave absolute improvement of 1.40 %, and 2.44 % in classification accuracy over MFCC. Using CNN, the score-level fusion of Gammatone spectral coefficient (GTSC) and the proposed TEO-based Gammatone spectral coefficients (TEO-GTSC) gave absolute improvement of 14.10 %, and 14.52 % in classification accuracy over Mel filterbank energies (FBE) on ESC-50 and UrbanSond8K datasets, respectively. This shows that proposed TEO-based Gammatone features contain complementary information which is helpful in ESC task.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic organisation and quality analysis of user-generated content with audio fingerprinting.\n \n \n \n \n\n\n \n Mordido, G.; Magalhães, J.; and Cavaco, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1814-1818, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081522,\n  author = {G. Mordido and J. Magalhães and S. Cavaco},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic organisation and quality analysis of user-generated content with audio fingerprinting},\n  year = {2017},\n  pages = {1814-1818},\n  abstract = {The increase of the quantity of user-generated content experienced in social media has boosted the importance of analysing and organising the content by its quality. Here, we propose a method that uses audio fingerprinting to organise and infer the quality of user-generated audio content. The proposed method detects the overlapping segments between different audio clips to organise and cluster the data according to events, and to infer the audio quality of the samples. A test setup with concert recordings manually crawled from YouTube is used to validate the presented method. The results show that the proposed method achieves better results than previous methods.},\n  keywords = {audio signal processing;pattern clustering;social networking (online);automatic organisation analysis;quality analysis;audio fingerprinting;user-generated audio content;audio clips;data clustering;data organization;YouTube;Clustering algorithms;Databases;Signal processing algorithms;Synchronization;Algorithm design and analysis;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081522},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347778.pdf},\n}\n\n
\n
\n\n\n
\n The increase of the quantity of user-generated content experienced in social media has boosted the importance of analysing and organising the content by its quality. Here, we propose a method that uses audio fingerprinting to organise and infer the quality of user-generated audio content. The proposed method detects the overlapping segments between different audio clips to organise and cluster the data according to events, and to infer the audio quality of the samples. A test setup with concert recordings manually crawled from YouTube is used to validate the presented method. The results show that the proposed method achieves better results than previous methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A subjective evaluation on mixtures of crowdsourced audio recordings.\n \n \n \n \n\n\n \n Stefanakis, N.; Viskadouros, M.; and Mouchtaris, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1819-1823, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081523,\n  author = {N. Stefanakis and M. Viskadouros and A. Mouchtaris},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A subjective evaluation on mixtures of crowdsourced audio recordings},\n  year = {2017},\n  pages = {1819-1823},\n  abstract = {Exploiting correlations in the audio, several works in the past have demonstrated the ability to automatically match and synchronize User Generated Recordings (UGRs) of the same event. Considering a small number of synchronized UGRs, we formulate in this paper simple linear audio mixing approaches to combine the available audio content. We use data from two different public events to perform a comparative listening test with the goal to assess the potential of such mixtures in improving the listening experience of the captured event, as opposed to when each UGR is consumed individually. The results of the listening tests indicate that, even with just a small number of overlapping UGRs, the outcome of the mixing process gains higher preference in comparison to original UGRs played back individually.},\n  keywords = {audio recording;audio signal processing;crowdsourcing;available audio content;crowdsourced audio recordings;User Generated Recordings;User Generated Recording;linear audio mixing;public events;Synchronization;Acoustics;Correlation;Europe;Audio recording;Visualization},\n  doi = {10.23919/EUSIPCO.2017.8081523},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346177.pdf},\n}\n\n
\n
\n\n\n
\n Exploiting correlations in the audio, several works in the past have demonstrated the ability to automatically match and synchronize User Generated Recordings (UGRs) of the same event. Considering a small number of synchronized UGRs, we formulate in this paper simple linear audio mixing approaches to combine the available audio content. We use data from two different public events to perform a comparative listening test with the goal to assess the potential of such mixtures in improving the listening experience of the captured event, as opposed to when each UGR is consumed individually. The results of the listening tests indicate that, even with just a small number of overlapping UGRs, the outcome of the mixing process gains higher preference in comparison to original UGRs played back individually.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Discriminant kernel learning for acoustic scene classification with multiple observations.\n \n \n \n \n\n\n \n Ye, J.; Kobayashi, T.; Tsuda, H.; and Murakawa, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1824-1828, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DiscriminantPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081524,\n  author = {J. Ye and T. Kobayashi and H. Tsuda and M. Murakawa},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Discriminant kernel learning for acoustic scene classification with multiple observations},\n  year = {2017},\n  pages = {1824-1828},\n  abstract = {In this paper, we propose a novel kernel learning scheme for acoustic scene classification using multiple short-term observations. The method takes inspiration from the recent result of psychological research - {"}Humans use summary statistics to perceive auditory sequences{"} we endeavor to devise computational framework imitating such important auditory mechanism for acoustic scene parsing. Conventional schemes usually encode spectro-temporal patterns with a compact feature vector by time-averaging, e.g. in Gaussian Mixture models (GMM). However, such integration may not be the ideal, since the arithmetic mean is vulnerable to extreme outliers which can be generated by sounds irrelevant to scene category. In this work, an effective scheme has been developed to exploit rich discriminant information from multiple short-term observations of an acoustic scene. Concretely, we first segment audio recording into short slices, e.g. 2 seconds; one vector can be extracted from each slice consisting of descriptive features. Then, we employ the resultant feature matrix to represent an acoustic scene. Since discriminant information of an acoustic scene can be characterized by either global structure or local patterns, we perform heterogeneous kernel analysis in hybrid feature spaces. Moreover, we conditionally fuse the two-way discriminant information to achieve better classification. The proposed method is validated using DCASE2016 challenge dataset. Experimental results demonstrated the effectiveness of our approach.},\n  keywords = {acoustic signal processing;audio signal processing;feature extraction;learning (artificial intelligence);sensor fusion;signal classification;signal representation;acoustic scene classification;short-term observations;auditory sequences;acoustic scene parsing;compact feature vector;scene category;rich discriminant information;short slices;resultant feature matrix;heterogeneous kernel analysis;auditory mechanism;discriminant kernel learning;computational framework;pectro-temporal pattern encoding;audio recording segmentation;descriptive features;acoustic scene representation;global structure;local patterns;hybrid feature spaces;two-way discriminant information fusion;DCASE2016 challenge dataset;time 2.0 s;Acoustics;Kernel;Feature extraction;Support vector machines;Manifolds;Signal processing;Matrix converters},\n  doi = {10.23919/EUSIPCO.2017.8081524},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347384.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a novel kernel learning scheme for acoustic scene classification using multiple short-term observations. The method takes inspiration from the recent result of psychological research - \"Humans use summary statistics to perceive auditory sequences\" we endeavor to devise computational framework imitating such important auditory mechanism for acoustic scene parsing. Conventional schemes usually encode spectro-temporal patterns with a compact feature vector by time-averaging, e.g. in Gaussian Mixture models (GMM). However, such integration may not be the ideal, since the arithmetic mean is vulnerable to extreme outliers which can be generated by sounds irrelevant to scene category. In this work, an effective scheme has been developed to exploit rich discriminant information from multiple short-term observations of an acoustic scene. Concretely, we first segment audio recording into short slices, e.g. 2 seconds; one vector can be extracted from each slice consisting of descriptive features. Then, we employ the resultant feature matrix to represent an acoustic scene. Since discriminant information of an acoustic scene can be characterized by either global structure or local patterns, we perform heterogeneous kernel analysis in hybrid feature spaces. Moreover, we conditionally fuse the two-way discriminant information to achieve better classification. The proposed method is validated using DCASE2016 challenge dataset. Experimental results demonstrated the effectiveness of our approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dual-band PWM audio coding for ultrasound artefact reduction.\n \n \n \n \n\n\n \n Kaleris, K.; and Mourjopoulos, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1829-1863, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Dual-bandPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081525,\n  author = {K. Kaleris and J. Mourjopoulos},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Dual-band PWM audio coding for ultrasound artefact reduction},\n  year = {2017},\n  pages = {1829-1863},\n  abstract = {A novel approach to PWM coding is introduced based on generating two complementary PWM streams out of the in-band signal's spectrum. The 2 streams are then recombined in a suitable way, so that out-of-phase cancellation of the carrier frequency harmonics is achieved. The approach suppresses the strong out-of-band frequencies of the carrier signal without introducing distortion of the in-band coded signal. Such method can achieve superior reduction compared to the out-of-band artefact suppression induced by traditional analog low-pass filters employed in typical Class-D audio amplifiers or other switching power delivery systems, hence allowing designs with reduced filter requirements or even filterless implementations.},\n  keywords = {audio coding;audio-frequency amplifiers;low-pass filters;PWM power convertors;out-of-phase cancellation;carrier frequency harmonics;carrier signal;out-of-band artefact suppression;ultrasound artefact reduction;PWM coding;complementary PWM streams;dual-band PWM audio coding;in-band coded signal;analog low-pass filters;typical class-D audio amplifiers;Pulse width modulation;Power harmonic filters;Harmonic analysis;Low pass filters;Phase change materials;Ultrasonic imaging;Acoustics;Pulse Width Modulation;Class-D audio amplifier;audio coding;PWM ultrasound},\n  doi = {10.23919/EUSIPCO.2017.8081525},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343787.pdf},\n}\n\n
\n
\n\n\n
\n A novel approach to PWM coding is introduced based on generating two complementary PWM streams out of the in-band signal's spectrum. The 2 streams are then recombined in a suitable way, so that out-of-phase cancellation of the carrier frequency harmonics is achieved. The approach suppresses the strong out-of-band frequencies of the carrier signal without introducing distortion of the in-band coded signal. Such method can achieve superior reduction compared to the out-of-band artefact suppression induced by traditional analog low-pass filters employed in typical Class-D audio amplifiers or other switching power delivery systems, hence allowing designs with reduced filter requirements or even filterless implementations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Feasibility of vocal emotion conversion on modulation spectrogram for simulated cochlear implants.\n \n \n \n \n\n\n \n Zhu, Z.; Miyauchi, R.; Araki, Y.; and Unoki, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1834-1838, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"FeasibilityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081526,\n  author = {Z. Zhu and R. Miyauchi and Y. Araki and M. Unoki},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Feasibility of vocal emotion conversion on modulation spectrogram for simulated cochlear implants},\n  year = {2017},\n  pages = {1834-1838},\n  abstract = {Cochlear implant (CI) listeners were found to have great difficulty with vocal emotion recognition because of the limited spectral cues provided by CI devices. Previous studies have shown that the modulation spectral features of temporal envelopes may be important cues for vocal emotion recognition of noise-vocoded speech (NVS) as simulated CIs. In this paper, the feasibility of vocal emotion conversion on a modulation spectrogram for simulated CIs for correctly recognizing vocal emotion is confirmed. A method based on a linear prediction scheme is proposed to modify the modulation spectrogram and its features of neutral speech to match that of emotional speech. The logic of this approach is that if vocal emotion perception of NVS is based on the modulation spectral features, NVS with similar modulation spectral features of emotional speech will be recognized as the same emotion. As a result, it was found that the modulation spectrogram of neutral speech can be successfully converted to that of emotional speech. The results of the evaluation experiment showed the feasibility of vocal emotion conversion on the modulation spectrogram for simulated CIs. The vocal emotion enhancement on the modulation spectrogram was also further discussed.},\n  keywords = {cochlear implants;emotion recognition;hearing;speech enhancement;speech intelligibility;vocoders;vocal emotion conversion;modulation spectrogram;simulated cochlear implants;cochlear implant listeners;vocal emotion recognition;spectral cues;NVS;neutral speech;emotional speech;vocal emotion perception;similar modulation spectral features;vocal emotion enhancement;Speech;Spectrogram;Emotion recognition;Acoustics;Frequency conversion;Frequency modulation},\n  doi = {10.23919/EUSIPCO.2017.8081526},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342458.pdf},\n}\n\n
\n
\n\n\n
\n Cochlear implant (CI) listeners were found to have great difficulty with vocal emotion recognition because of the limited spectral cues provided by CI devices. Previous studies have shown that the modulation spectral features of temporal envelopes may be important cues for vocal emotion recognition of noise-vocoded speech (NVS) as simulated CIs. In this paper, the feasibility of vocal emotion conversion on a modulation spectrogram for simulated CIs for correctly recognizing vocal emotion is confirmed. A method based on a linear prediction scheme is proposed to modify the modulation spectrogram and its features of neutral speech to match that of emotional speech. The logic of this approach is that if vocal emotion perception of NVS is based on the modulation spectral features, NVS with similar modulation spectral features of emotional speech will be recognized as the same emotion. As a result, it was found that the modulation spectrogram of neutral speech can be successfully converted to that of emotional speech. The results of the evaluation experiment showed the feasibility of vocal emotion conversion on the modulation spectrogram for simulated CIs. The vocal emotion enhancement on the modulation spectrogram was also further discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Detection of alarm sounds in noisy environments.\n \n \n \n \n\n\n \n Carmel, D.; Yeshurun, A.; and Moshe, Y.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1839-1843, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DetectionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081527,\n  author = {D. Carmel and A. Yeshurun and Y. Moshe},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Detection of alarm sounds in noisy environments},\n  year = {2017},\n  pages = {1839-1843},\n  abstract = {Sirens and alarms play an important role in everyday life since they warn people of hazardous situations, even when these are out of sight. Automatic detection of this class of sounds can help hearing impaired or distracted people, e.g., on the road, and contribute to their independence and safety. In this paper, we present a technique for the detection of alarm sounds in noisy environments. The technique is not limited to particular alarms and can detect most electronically generated alerting sounds within 200 ms. We consider a set of acoustic features and use the ReliefF algorithm to select only the ones that best differentiate between alarms and other sounds. We use an SVM classifier as the detector. On the tested dataset, consisting of several dozen alarm sounds and several dozen background noises, the proposed technique shows an accuracy of 98% per audio frame. With a larger training dataset, this result is expected to substantially improve.},\n  keywords = {alarm systems;hearing;learning (artificial intelligence);signal classification;signal detection;support vector machines;noisy environments;automatic detection;electronically generated alerting sounds;alarm sound detection;ReliefF algorithm;alarm sounds;SVM classifier;time 200.0 ms;Feature extraction;Band-pass filters;Auditory system;Noise measurement;Support vector machines;Training;Acoustics;alarm detection;siren detection;acoustic event detection;sound recognition;assistance for hearing impaired},\n  doi = {10.23919/EUSIPCO.2017.8081527},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341315.pdf},\n}\n\n
\n
\n\n\n
\n Sirens and alarms play an important role in everyday life since they warn people of hazardous situations, even when these are out of sight. Automatic detection of this class of sounds can help hearing impaired or distracted people, e.g., on the road, and contribute to their independence and safety. In this paper, we present a technique for the detection of alarm sounds in noisy environments. The technique is not limited to particular alarms and can detect most electronically generated alerting sounds within 200 ms. We consider a set of acoustic features and use the ReliefF algorithm to select only the ones that best differentiate between alarms and other sounds. We use an SVM classifier as the detector. On the tested dataset, consisting of several dozen alarm sounds and several dozen background noises, the proposed technique shows an accuracy of 98% per audio frame. With a larger training dataset, this result is expected to substantially improve.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Voice pathology distinction using autoassociative neural networks.\n \n \n \n \n\n\n \n Hemmerling, D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1844-1847, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"VoicePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081528,\n  author = {D. Hemmerling},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Voice pathology distinction using autoassociative neural networks},\n  year = {2017},\n  pages = {1844-1847},\n  abstract = {Acoustic analysis is a non-invasive technique that supports voice disease screening, especially the detection and diagnosis of distinction between chosen voice pathologies and healthy control group. This work put en effort on creation of efficient and accurate system for automatic detection and differentiation of normal and three different voice pathologies. This system ensures non-invasive and fully automated analysis of voice characteristics and decision system based on neural networks. The feature vector describing the vocal tract is set up from 35 parameters. Recordings of patients suffering from hyperfunctional dysphonia, recurrent laryngeal nerve paralysis, laryngitis and healthy control group are considered in our experiments. From the experimental results it is observed that effectiveness of auto-associative neural networks seems to be promising in the application of pathological voice distinction.},\n  keywords = {acoustic signal processing;biological organs;diseases;feature extraction;medical disorders;medical signal processing;neural nets;neurophysiology;patient diagnosis;signal classification;speech;speech processing;voice pathology distinction;autoassociative neural networks;acoustic analysis;noninvasive technique;voice disease screening;voice characteristics;decision system;feature vector;recurrent laryngeal nerve paralysis;auto-associative neural networks;pathological voice distinction;voice pathologies;Pathology;Training;Hafnium;Signal processing algorithms;Frequency measurement;Biological neural networks},\n  doi = {10.23919/EUSIPCO.2017.8081528},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347034.pdf},\n}\n\n
\n
\n\n\n
\n Acoustic analysis is a non-invasive technique that supports voice disease screening, especially the detection and diagnosis of distinction between chosen voice pathologies and healthy control group. This work put en effort on creation of efficient and accurate system for automatic detection and differentiation of normal and three different voice pathologies. This system ensures non-invasive and fully automated analysis of voice characteristics and decision system based on neural networks. The feature vector describing the vocal tract is set up from 35 parameters. Recordings of patients suffering from hyperfunctional dysphonia, recurrent laryngeal nerve paralysis, laryngitis and healthy control group are considered in our experiments. From the experimental results it is observed that effectiveness of auto-associative neural networks seems to be promising in the application of pathological voice distinction.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic music transcription using low rank non-negative matrix decomposition.\n \n \n \n \n\n\n \n O'Brien, C.; and Plumbley, M. D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1848-1852, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081529,\n  author = {C. O'Brien and M. D. Plumbley},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic music transcription using low rank non-negative matrix decomposition},\n  year = {2017},\n  pages = {1848-1852},\n  abstract = {Automatic Music Transcription (AMT) is concerned with the problem of producing the pitch content of a piece of music given a recorded signal. Many methods rely on sparse or low rank models, where the observed magnitude spectra are represented as a linear combination of dictionary atoms corresponding to individual pitches. Some of the most successful approaches use Non-negative Matrix Decomposition (NMD) or Factorization (NMF), which can be used to learn a dictionary and pitch activation matrix from a given signal. Here we introduce a further refinement of NMD in which we assume the transcription itself is approximately low rank. The intuition behind this approach is that the total number of distinct activation patterns should be relatively small since the pitch content between adjacent frames should be similar. A rank penalty is introduced into the NMD objective function and solved using an iterative algorithm based on Singular Value thresholding. We find that the low rank assumption leads to a significant increase in performance compared to NMD using β-divergence on a standard AMT dataset.},\n  keywords = {iterative methods;matrix decomposition;music;signal processing;automatic music transcription;pitch content;dictionary atoms;pitch activation matrix;distinct activation patterns;rank penalty;NMD objective function;low rank nonnegative matrix decomposition;dictionary learning;iterative algorithm;singular value thresholding;β-divergence;Matrix decomposition;Dictionaries;Signal processing;Standards;Multiple signal classification;Indexes;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081529},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347001.pdf},\n}\n\n
\n
\n\n\n
\n Automatic Music Transcription (AMT) is concerned with the problem of producing the pitch content of a piece of music given a recorded signal. Many methods rely on sparse or low rank models, where the observed magnitude spectra are represented as a linear combination of dictionary atoms corresponding to individual pitches. Some of the most successful approaches use Non-negative Matrix Decomposition (NMD) or Factorization (NMF), which can be used to learn a dictionary and pitch activation matrix from a given signal. Here we introduce a further refinement of NMD in which we assume the transcription itself is approximately low rank. The intuition behind this approach is that the total number of distinct activation patterns should be relatively small since the pitch content between adjacent frames should be similar. A rank penalty is introduced into the NMD objective function and solved using an iterative algorithm based on Singular Value thresholding. We find that the low rank assumption leads to a significant increase in performance compared to NMD using β-divergence on a standard AMT dataset.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Feature-based characterization of violin timbre.\n \n \n \n \n\n\n \n Setragno, F.; Zanoni, M.; Sarti, A.; and Antonacci, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1853-1857, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Feature-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081530,\n  author = {F. Setragno and M. Zanoni and A. Sarti and F. Antonacci},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Feature-based characterization of violin timbre},\n  year = {2017},\n  pages = {1853-1857},\n  abstract = {Timbrai quality of historical violins has been discussed for years. In this paper, we show that it is possible to characterize it from an objective, low-level features perspective. Feature selection algorithms are used to select the features that most characterize historical and contemporary violins. The feature representation of violins is then reduced by means of the T-SNE method. In the low-dimensional space which is obtained, historical violins tend to group together.},\n  keywords = {feature selection;music;musical instruments;feature selection algorithms;contemporary violins;historical violins;violin timbre;timbrai quality;T-SNE method;Feature extraction;Instruments;Timbre;Mel frequency cepstral coefficient;Aerospace electronics;Signal processing algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081530},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347617.pdf},\n}\n\n
\n
\n\n\n
\n Timbrai quality of historical violins has been discussed for years. In this paper, we show that it is possible to characterize it from an objective, low-level features perspective. Feature selection algorithms are used to select the features that most characterize historical and contemporary violins. The feature representation of violins is then reduced by means of the T-SNE method. In the low-dimensional space which is obtained, historical violins tend to group together.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Empirical study of drone sound detection in real-life environment with deep neural networks.\n \n \n \n \n\n\n \n Jeon, S.; Shin, J.; Lee, Y.; Kim, W.; Kwon, Y.; and Yang, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1858-1862, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"EmpiricalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081531,\n  author = {S. Jeon and J. Shin and Y. Lee and W. Kim and Y. Kwon and H. Yang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Empirical study of drone sound detection in real-life environment with deep neural networks},\n  year = {2017},\n  pages = {1858-1862},\n  abstract = {This work aims to investigate the use of deep neural network to detect commercial hobby drones in real-life environments by analyzing their sound data. The purpose of work is to contribute to a system for detecting drones used for malicious purposes, such as for terrorism. Specifically, we present a method capable of detecting the presence of commercial hobby drones as a binary classification problem based on sound event detection. We recorded the sound produced by a few popular commercial hobby drones, and then augmented this data with diverse environmental sound data to remedy the scarcity of drone sound data in diverse environments. We investigated the effectiveness of state-of-the-art event sound classification methods, i.e., a Gaussian Mixture Model (GMM), Convolutional Neural Network (CNN), and Recurrent Neural Network (RNN), for drone sound detection. Our empirical results, which were obtained with a testing dataset collected on an urban street, confirmed the effectiveness of these models for operating in a real environment. In summary, our RNN models showed the best detection performance with an F-Score of 0.8009 with 240 ms of input audio with a short processing time, indicating their applicability to real-time detection systems.},\n  keywords = {acoustic signal detection;autonomous aerial vehicles;convolution;Gaussian processes;mixture models;recurrent neural nets;signal classification;drone sound detection;deep neural network;sound event detection;hobby drones;binary classification problem;event sound classification methods;Gaussian Mixture Model;Recurrent Neural Network;Convolutional Neural Network;Drones;Training;Testing;Recurrent neural networks;Mel frequency cepstral coefficient;Data models},\n  doi = {10.23919/EUSIPCO.2017.8081531},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570337347.pdf},\n}\n\n
\n
\n\n\n
\n This work aims to investigate the use of deep neural network to detect commercial hobby drones in real-life environments by analyzing their sound data. The purpose of work is to contribute to a system for detecting drones used for malicious purposes, such as for terrorism. Specifically, we present a method capable of detecting the presence of commercial hobby drones as a binary classification problem based on sound event detection. We recorded the sound produced by a few popular commercial hobby drones, and then augmented this data with diverse environmental sound data to remedy the scarcity of drone sound data in diverse environments. We investigated the effectiveness of state-of-the-art event sound classification methods, i.e., a Gaussian Mixture Model (GMM), Convolutional Neural Network (CNN), and Recurrent Neural Network (RNN), for drone sound detection. Our empirical results, which were obtained with a testing dataset collected on an urban street, confirmed the effectiveness of these models for operating in a real environment. In summary, our RNN models showed the best detection performance with an F-Score of 0.8009 with 240 ms of input audio with a short processing time, indicating their applicability to real-time detection systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An approach for self-training audio event detectors using web data.\n \n \n \n \n\n\n \n Elizalde, B.; Shah, A.; Dalmia, S.; Lee, M. H.; Badlani, R.; Kumar, A.; Raj, B.; and Lane, I.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1863-1867, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081532,\n  author = {B. Elizalde and A. Shah and S. Dalmia and M. H. Lee and R. Badlani and A. Kumar and B. Raj and I. Lane},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An approach for self-training audio event detectors using web data},\n  year = {2017},\n  pages = {1863-1867},\n  abstract = {Audio Event Detection (AED) aims to recognize sounds within audio and video recordings. AED employs machine learning algorithms commonly trained and tested on annotated datasets. However, available datasets are limited in number of samples and hence it is difficult to model acoustic diversity. Therefore, we propose combining labeled audio from a dataset and unlabeled audio from the web to improve the sound models. The audio event detectors are trained on the labeled audio and ran on the unlabeled audio downloaded from YouTube. Whenever the detectors recognized any of the known sounds with high confidence, the unlabeled audio was use to re-train the detectors. The performance of the re-trained detectors is compared to the one from the original detectors using the annotated test set. Results showed an improvement of the AED, and uncovered challenges of using web audio from videos.},\n  keywords = {audio signal processing;learning (artificial intelligence);self-training audio event detectors;AED;audio recordings;video recordings;annotated datasets;unlabeled audio;sound models;labeled audio;re-trained detectors;Web data;Web audio;Detectors;Training;Mel frequency cepstral coefficient;Feature extraction;YouTube;Artificial neural networks;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081532},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570338274.pdf},\n}\n\n
\n
\n\n\n
\n Audio Event Detection (AED) aims to recognize sounds within audio and video recordings. AED employs machine learning algorithms commonly trained and tested on annotated datasets. However, available datasets are limited in number of samples and hence it is difficult to model acoustic diversity. Therefore, we propose combining labeled audio from a dataset and unlabeled audio from the web to improve the sound models. The audio event detectors are trained on the labeled audio and ran on the unlabeled audio downloaded from YouTube. Whenever the detectors recognized any of the known sounds with high confidence, the unlabeled audio was use to re-train the detectors. The performance of the re-trained detectors is compared to the one from the original detectors using the annotated test set. Results showed an improvement of the AED, and uncovered challenges of using web audio from videos.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated detection of geometric defects on connecting rod via acoustic resonance testing.\n \n \n \n \n\n\n \n Zheng, Y.; Heinrich, M.; Osman, A.; and Valeske, B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1868-1872, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081533,\n  author = {Y. Zheng and M. Heinrich and A. Osman and B. Valeske},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Automated detection of geometric defects on connecting rod via acoustic resonance testing},\n  year = {2017},\n  pages = {1868-1872},\n  abstract = {Fully automated defect detection and classification of automobile components are crucial for solving quality and efficiency problems for automotive manufacturers, due to the rising wage, production costs and warranty claims. However, metrological deviations in form still represent unsolved problems using state-of-the-art techniques, especially for forged or casted components with complex geometry. In this paper, we attempt to overcome these challenges by using an acoustic resonance testing model that combines features extraction with defect classification from acoustic natural vibration signals. In this case the study doesn't focus on typical defects like cracks but on defective components in the sense of geometric configurations which are out of tolerance range. With an optimal feature extraction algorithm and a classifier training step, the proposed approach significantly accelerates the detection speed of unacceptable deviations in dimensions and parallely enhances the accuracy. The main contribution of this paper is that an optimal feature from acoustic signals is found which represents the geometric parameters most appropriately, meanwhile, the most appropriate classifier is obtained which significantly improves the efficiency and accuracy in defect classification.},\n  keywords = {acoustic signal processing;automotive components;condition monitoring;feature extraction;vibrations;geometric parameters;detection speed;classifier training step;optimal feature extraction algorithm;geometric configurations;defective components;acoustic natural vibration signals;defect classification;acoustic resonance testing model;complex geometry;forged casted components;metrological deviations;warranty claims;production costs;rising wage;automotive manufacturers;efficiency problems;quality;automobile components;defect detection;geometric defects;automated detection;Geometry;Feature extraction;Training;Decision trees;Principal component analysis;Acoustics;Testing},\n  doi = {10.23919/EUSIPCO.2017.8081533},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347241.pdf},\n}\n\n
\n
\n\n\n
\n Fully automated defect detection and classification of automobile components are crucial for solving quality and efficiency problems for automotive manufacturers, due to the rising wage, production costs and warranty claims. However, metrological deviations in form still represent unsolved problems using state-of-the-art techniques, especially for forged or casted components with complex geometry. In this paper, we attempt to overcome these challenges by using an acoustic resonance testing model that combines features extraction with defect classification from acoustic natural vibration signals. In this case the study doesn't focus on typical defects like cracks but on defective components in the sense of geometric configurations which are out of tolerance range. With an optimal feature extraction algorithm and a classifier training step, the proposed approach significantly accelerates the detection speed of unacceptable deviations in dimensions and parallely enhances the accuracy. The main contribution of this paper is that an optimal feature from acoustic signals is found which represents the geometric parameters most appropriately, meanwhile, the most appropriate classifier is obtained which significantly improves the efficiency and accuracy in defect classification.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Frequency estimation for monophonical music by using a modified VMD method.\n \n \n \n \n\n\n \n Simsek, B. O.; and Akan, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1873-1876, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"FrequencyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081534,\n  author = {B. O. Simsek and A. Akan},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Frequency estimation for monophonical music by using a modified VMD method},\n  year = {2017},\n  pages = {1873-1876},\n  abstract = {In this paper, a new Variational Mode Decomposition (VMD) is introduced, and applied to the fundamental frequency estimation of monophonical Turkish maqam music. VMD is a method to decompose an input signal into an ensemble of sub-signals (modes) which is entirely non-recursive. It determines the relevant bands adaptively, and estimates the corresponding modes concurrently. In order to optimally decompose a given signal, VMD seeks an ensemble of modes with narrow-band properties corresponding to the Intrinsic Mode Function (IMF) definition used in Empirical Mode Decomposition (EMD). In our proposed modified VMD approach, in order to obtain the bandwidth of a mode, each mode is shifted to baseband by mixing an exponential that is adjusted to the respective center frequency. The bandwidth is estimated through elastic net method that linearly combines penalties of the Lasso and Ridge Regression methods. Simulation results on fundamental frequency estimation of real music and synthetic test data show better performance compared to classical VMD based approach, and other common methods used for music signals, such as YIN and MELODIA based methods.},\n  keywords = {frequency estimation;music;regression analysis;signal processing;classical VMD based approach;music signals;modified VMD method;Variational Mode Decomposition;monophonical Turkish maqam music;narrow-band properties;Intrinsic Mode Function definition;Empirical Mode Decomposition;elastic net method;frequency estimation;Lasso and Ridge Regression methods;input signal decomposition;Frequency estimation;Bandwidth;Multiple signal classification;Estimation;Wavelet transforms;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081534},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570338081.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, a new Variational Mode Decomposition (VMD) is introduced, and applied to the fundamental frequency estimation of monophonical Turkish maqam music. VMD is a method to decompose an input signal into an ensemble of sub-signals (modes) which is entirely non-recursive. It determines the relevant bands adaptively, and estimates the corresponding modes concurrently. In order to optimally decompose a given signal, VMD seeks an ensemble of modes with narrow-band properties corresponding to the Intrinsic Mode Function (IMF) definition used in Empirical Mode Decomposition (EMD). In our proposed modified VMD approach, in order to obtain the bandwidth of a mode, each mode is shifted to baseband by mixing an exponential that is adjusted to the respective center frequency. The bandwidth is estimated through elastic net method that linearly combines penalties of the Lasso and Ridge Regression methods. Simulation results on fundamental frequency estimation of real music and synthetic test data show better performance compared to classical VMD based approach, and other common methods used for music signals, such as YIN and MELODIA based methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning directed-acyclic-graphs from multiple genomic data sources.\n \n \n \n \n\n\n \n Nikolay, F.; and Pesavento, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1877-1881, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081535,\n  author = {F. Nikolay and M. Pesavento},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Learning directed-acyclic-graphs from multiple genomic data sources},\n  year = {2017},\n  pages = {1877-1881},\n  abstract = {In this paper we consider the problem of learning the topology of a directed-acyclic-graph, that describes the interactions among a set of genes, based on noisy double knockout data and genetic-interactions-profile data. We propose a novel linear integer optimization approach to identify the complex biological dependencies among genes and to compute the topology of the directed-acyclic-graph that matches the data best. Finally, we apply a sequential scalability technique for large sets of genes along with our proposed algorithm, in order to provide statistically significant results for experimental data.},\n  keywords = {directed graphs;genetics;genomics;integer programming;learning (artificial intelligence);mathematics computing;multiple genomic data sources;genes;noisy double knockout data;genetic-interactions-profile data;directed-acyclic-graph learning;topology;linear integer optimization approach;complex biological dependencies;sequential scalability;Topology;Microorganisms;Europe;Signal processing;Genomics;Bioinformatics;Gene networks;discrete optimization;big data;graph learning},\n  doi = {10.23919/EUSIPCO.2017.8081535},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345193.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we consider the problem of learning the topology of a directed-acyclic-graph, that describes the interactions among a set of genes, based on noisy double knockout data and genetic-interactions-profile data. We propose a novel linear integer optimization approach to identify the complex biological dependencies among genes and to compute the topology of the directed-acyclic-graph that matches the data best. Finally, we apply a sequential scalability technique for large sets of genes along with our proposed algorithm, in order to provide statistically significant results for experimental data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse-low rank matrix decomposition framework for identifying potential biomarkers for inflammatory bowel disease.\n \n \n \n \n\n\n \n Alshawaqfeh, M.; Kawam, A. A.; and Serpedin, E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1882-1886, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Sparse-lowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081536,\n  author = {M. Alshawaqfeh and A. A. Kawam and E. Serpedin},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Sparse-low rank matrix decomposition framework for identifying potential biomarkers for inflammatory bowel disease},\n  year = {2017},\n  pages = {1882-1886},\n  abstract = {Inflammatory bowel disease (IBD) is a class of uncured chronic diseases which causes severe discomfort and in some cases could lead to life-threatening complications. Recent studies suggest a relationship between IBD and the gut microbiota. These findings reveal potential for identifying bacterial biomarkers for IBD to enable the detection and further investigation into unknown aspects of the disease. This work presents a novel method for identifying microbial biomarkers using robust principal component analysis (RPCA). Our method uses matrix decomposition to separate bacteria exhibiting a difference in abundance between healthy and diseased samples from the bacteria that have not undergone substantial change in abundance. Our method then ranks and identifies the top bacteria to be used as biomarkers. We contrast the proposed method with three well used state-of-the-art bacterial biomarker detection approaches over two datasets in relation to IBD. Our method outperforms the competing methods on the different evaluation cases.},\n  keywords = {biology computing;diseases;matrix decomposition;microorganisms;principal component analysis;sparse matrices;bacterial biomarker detection approaches;IBD;sparse-low rank matrix decomposition framework;inflammatory bowel disease;microbial biomarkers;robust principal component analysis;gut microbiota;RPCA;Microorganisms;Biomarkers;Diseases;Matrix decomposition;Sparse matrices;Principal component analysis;Intestines},\n  doi = {10.23919/EUSIPCO.2017.8081536},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342228.pdf},\n}\n\n
\n
\n\n\n
\n Inflammatory bowel disease (IBD) is a class of uncured chronic diseases which causes severe discomfort and in some cases could lead to life-threatening complications. Recent studies suggest a relationship between IBD and the gut microbiota. These findings reveal potential for identifying bacterial biomarkers for IBD to enable the detection and further investigation into unknown aspects of the disease. This work presents a novel method for identifying microbial biomarkers using robust principal component analysis (RPCA). Our method uses matrix decomposition to separate bacteria exhibiting a difference in abundance between healthy and diseased samples from the bacteria that have not undergone substantial change in abundance. Our method then ranks and identifies the top bacteria to be used as biomarkers. We contrast the proposed method with three well used state-of-the-art bacterial biomarker detection approaches over two datasets in relation to IBD. Our method outperforms the competing methods on the different evaluation cases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reconstructing the forest of lineage trees of diverse bacterial communities using bio-inspired image analysis.\n \n \n \n \n\n\n \n Balomenos, A. D.; and Manolakos, E. S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1887-1891, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ReconstructingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081537,\n  author = {A. D. Balomenos and E. S. Manolakos},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Reconstructing the forest of lineage trees of diverse bacterial communities using bio-inspired image analysis},\n  year = {2017},\n  pages = {1887-1891},\n  abstract = {Cell segmentation and tracking allow us to extract a plethora of cell attributes from bacterial time-lapse cell movies, thus promoting computational modeling and simulation of biological processes down to the single-cell level. However, to analyze successfully complex cell movies, imaging multiple interacting bacterial clones as they grow and merge to generate overcrowded bacterial communities with thousands of cells in the field of view, segmentation results should be near perfect to warrant good tracking results. We introduce here a fully automated closed-loop bio-inspired computational strategy that exploits prior knowledge about the expected structure of a colony's lineage tree to locate and correct segmentation errors in analyzed movie frames. We show that this correction strategy is effective, resulting in improved cell tracking and consequently trustworthy deep colony lineage trees. Our image analysis approach has the unique capability to keep tracking cells even after clonal subpopulations merge in the movie. This enables the reconstruction of the complete Forest of Lineage Trees (FLT) representation of evolving multi-clonal bacterial communities. Moreover, the percentage of valid cell trajectories extracted from the image analysis almost doubles after segmentation correction. This plethora of trustworthy data extracted from a complex cell movie analysis enables single-cell analytics as a tool for addressing compelling questions for human health, such as understanding the role of single-cell stochasticity in antibiotics resistance without losing site of the inter-cellular interactions and microenvironment effects that may shape it.},\n  keywords = {biomedical optical imaging;cellular biophysics;image reconstruction;image segmentation;medical image processing;microorganisms;diverse bacterial communities;cell attributes;bacterial time-lapse cell movies;computational modeling simulation;biological processes;single-cell level;multiple interacting bacterial clones;overcrowded bacterial communities;segmentation errors;correction strategy;improved cell tracking;image analysis approach;segmentation correction;complex cell movie analysis;single-cell analytics;single-cell stochasticity;cell trajectories;cell segmentation;bio-inspired image analysis;complex cell movies;field of view;fully automated closed-loop bio-inspired computational strategy;deep colony lineage trees;Forest of Lineage Trees representation;multiclonal bacterial communities;antibiotics resistance;intercellular interactions;microenvironment effects;cell segmentation and tracking;time-lapse microscopy;image analysis;forest of lineage trees;systems biology},\n  doi = {10.23919/EUSIPCO.2017.8081537},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347532.pdf},\n}\n\n
\n
\n\n\n
\n Cell segmentation and tracking allow us to extract a plethora of cell attributes from bacterial time-lapse cell movies, thus promoting computational modeling and simulation of biological processes down to the single-cell level. However, to analyze successfully complex cell movies, imaging multiple interacting bacterial clones as they grow and merge to generate overcrowded bacterial communities with thousands of cells in the field of view, segmentation results should be near perfect to warrant good tracking results. We introduce here a fully automated closed-loop bio-inspired computational strategy that exploits prior knowledge about the expected structure of a colony's lineage tree to locate and correct segmentation errors in analyzed movie frames. We show that this correction strategy is effective, resulting in improved cell tracking and consequently trustworthy deep colony lineage trees. Our image analysis approach has the unique capability to keep tracking cells even after clonal subpopulations merge in the movie. This enables the reconstruction of the complete Forest of Lineage Trees (FLT) representation of evolving multi-clonal bacterial communities. Moreover, the percentage of valid cell trajectories extracted from the image analysis almost doubles after segmentation correction. This plethora of trustworthy data extracted from a complex cell movie analysis enables single-cell analytics as a tool for addressing compelling questions for human health, such as understanding the role of single-cell stochasticity in antibiotics resistance without losing site of the inter-cellular interactions and microenvironment effects that may shape it.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Your gaze betrays your age.\n \n \n \n \n\n\n \n Le Meur, O.; Coutrot, A.; Liu, Z.; Rämä, P.; Le Roch, A.; and Helo, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1892-1896, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"YourPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081538,\n  author = {O. {Le Meur} and A. Coutrot and Z. Liu and P. Rämä and A. {Le Roch} and A. Helo},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Your gaze betrays your age},\n  year = {2017},\n  pages = {1892-1896},\n  abstract = {Visual attention networks are so pervasive in the human brain that eye movements carry a wealth of information that can be exploited for many purposes. In this paper, we present evidence that information derived from observers' gaze can be used to infer their age. This is the first study showing that simple features extracted from the ordered sequence of fixations and saccades allow us to predict the age of an observer. Eye movements of 101 participants split into 4 age groups (adults, 6-10 year-old, 4-6 year-old and 2 year-old) were recorded while exploring static images. The analysis of observers' gaze provides evidence of age-related differences in viewing patterns. Therefore, we extract from the scanpaths several features, including fixation durations and saccade amplitudes, and learn a direct mapping from those features to age using Gentle AdaBoost classifiers. Experimental results show that the proposed image-blind method succeeds in predicting the age of the observer up to 92% of the time. The use of predicted salience does not further improve the classification's accuracy.},\n  keywords = {cognition;eye;feature extraction;human computer interaction;image classification;visual perception;fixation durations;image-blind method;visual attention networks;human brain;eye movements;static images;Gentle AdaBoost classifiers;viewing patterns;Feature extraction;Observers;Visualization;Europe;Signal processing;Visual systems;Aging},\n  doi = {10.23919/EUSIPCO.2017.8081538},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347630.pdf},\n}\n\n
\n
\n\n\n
\n Visual attention networks are so pervasive in the human brain that eye movements carry a wealth of information that can be exploited for many purposes. In this paper, we present evidence that information derived from observers' gaze can be used to infer their age. This is the first study showing that simple features extracted from the ordered sequence of fixations and saccades allow us to predict the age of an observer. Eye movements of 101 participants split into 4 age groups (adults, 6-10 year-old, 4-6 year-old and 2 year-old) were recorded while exploring static images. The analysis of observers' gaze provides evidence of age-related differences in viewing patterns. Therefore, we extract from the scanpaths several features, including fixation durations and saccade amplitudes, and learn a direct mapping from those features to age using Gentle AdaBoost classifiers. Experimental results show that the proposed image-blind method succeeds in predicting the age of the observer up to 92% of the time. The use of predicted salience does not further improve the classification's accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic characterization of ambulatory patterns of utilitarian and leisure trips.\n \n \n \n \n\n\n \n Härmä, A.; and de Groot , K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1897-1901, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081539,\n  author = {A. Härmä and K. {de Groot}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Automatic characterization of ambulatory patterns of utilitarian and leisure trips},\n  year = {2017},\n  pages = {1897-1901},\n  abstract = {In the health self-management services it is beneficial to identify and address the already existing healthy activity patterns of the user. Some of these healthy activity patterns might be of a utilitarian nature, e.g. commuting to work by bike or on foot, or might be for leisure, like taking a walk in a park. In this paper we discuss one possibility to detect the utilitarian or leisure nature of a particular ambulatory path based on the geometry of the trajectory. In essence, a leisure trip is more commonly a round-trip while an utilitarian A-to-B trips follow the single shortest path between A and B. We define a generic measure for the characterization of utilitarian and leisure paths based on GPS location data and develop an algorithm for approaching the same based on only magnetometer data from a wearable device.},\n  keywords = {gait analysis;Global Positioning System;leisure industry;medical computing;object tracking;utilitarian ambulatory pattern automatic characterization;leisure trip ambulatory pattern automatic characterization;GPS location data;single-shortest path;healthy activity patterns;health self-management services;utilitarian A-to-B trips;Signal processing algorithms;Magnetometers;Legged locomotion;Global Positioning System;Signal processing;Time series analysis;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081539},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347583.pdf},\n}\n\n
\n
\n\n\n
\n In the health self-management services it is beneficial to identify and address the already existing healthy activity patterns of the user. Some of these healthy activity patterns might be of a utilitarian nature, e.g. commuting to work by bike or on foot, or might be for leisure, like taking a walk in a park. In this paper we discuss one possibility to detect the utilitarian or leisure nature of a particular ambulatory path based on the geometry of the trajectory. In essence, a leisure trip is more commonly a round-trip while an utilitarian A-to-B trips follow the single shortest path between A and B. We define a generic measure for the characterization of utilitarian and leisure paths based on GPS location data and develop an algorithm for approaching the same based on only magnetometer data from a wearable device.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Image coding based on patch-driven inpainting.\n \n \n \n \n\n\n \n Couto, N.; Naccari, M.; and Pereira, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1902-1906, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ImagePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081540,\n  author = {N. Couto and M. Naccari and F. Pereira},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Image coding based on patch-driven inpainting},\n  year = {2017},\n  pages = {1902-1906},\n  abstract = {Recent advances in capturing and display technologies, as well as the proliferation of platforms to share images on the Internet, will further increase the bandwidth and storage space required by image coding based applications. To reduce the image coding rate, some techniques taking into account the properties of the human visual system can be used. In this context, this paper proposes an inpainting based image codec which is able to improve the image compression efficiency. The proposed codec builds on top of the JPEG image coding standard and its rate-distortion performance is assessed using a novel methodology, particularly suitable for perceptual image codecs as the one proposed in this paper. According to this methodology, the proposed image codec allows a bitrate reduction of up to about 20% regarding the JPEG standard, at the same perceptual quality.},\n  keywords = {codecs;data compression;image coding;image coding rate;human visual system;inpainting based image codec;image compression efficiency;JPEG image coding standard;rate-distortion performance;perceptual image codecs;display technologies;storage space;image coding based applications;patch-driven inpainting;Image coding;Codecs;Transform coding;Decoding;Standards;Tools;Image edge detection;Image inpainting;patch-driven;JPEG coding;metric resolving power;objective quality assessment},\n  doi = {10.23919/EUSIPCO.2017.8081540},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341762.pdf},\n}\n\n
\n
\n\n\n
\n Recent advances in capturing and display technologies, as well as the proliferation of platforms to share images on the Internet, will further increase the bandwidth and storage space required by image coding based applications. To reduce the image coding rate, some techniques taking into account the properties of the human visual system can be used. In this context, this paper proposes an inpainting based image codec which is able to improve the image compression efficiency. The proposed codec builds on top of the JPEG image coding standard and its rate-distortion performance is assessed using a novel methodology, particularly suitable for perceptual image codecs as the one proposed in this paper. According to this methodology, the proposed image codec allows a bitrate reduction of up to about 20% regarding the JPEG standard, at the same perceptual quality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Microlens image sparse modelling for lossless compression of plenoptic camera sensor images.\n \n \n \n \n\n\n \n Tabus, I.; and Helin, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1907-1911, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MicrolensPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081541,\n  author = {I. Tabus and P. Helin},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Microlens image sparse modelling for lossless compression of plenoptic camera sensor images},\n  year = {2017},\n  pages = {1907-1911},\n  abstract = {This paper studies the lossless compressibility of raw sensor images acquired by plenoptic cameras, when optimally interpolating the microlens images in terms of already encoded microlens images. The geometrical information necessary for splitting the sensor image into projections of microlenses, together with a relatively small bitstream for encoding the raw image at the microlens centers are encoded as a first stage. The scanning order for sampling the data from the sensor follows row-by-row the approximate hexagonal lattice pattern of the microlenses, and the pixels inside each microlens are scanned in an ascending spiral order. The predictive encoding of a pixel from a microlens block uses the similarly located pixels (possibly slightly shifted) in the blocks from nine closest causal microlenses (those already encoded) and the pixels from its own microlens located in the encoded part of the spiral. A minimum description length optimal sparse predictor is designed for each microlens. The sparsity masks and prediction coefficients are encoded in a second stage and the prediction errors at every pixel are finally encoded in a third stage, in a view-by-view order (a view index being determined by the pixel's index in its block), using contexts accounting for the magnitude of errors at views already encoded. The experimental results show better performance than the JPEG 2000 image standard applied on the raw image.},\n  keywords = {cameras;data compression;image coding;image resolution;image sensors;interpolation;microlenses;microlens image sparse modelling;JPEG 2000 image standard;ascending spiral order;approximate hexagonal lattice pattern;scanning order;microlens centers;geometrical information;plenoptic camera sensor images;lossless compression;prediction errors;prediction coefficients;sparsity masks;minimum description length optimal sparse predictor design;predictive pixel encoding;data sampling;raw sensor image encoding;microlens image interpolation;Lenses;Microoptics;Spirals;Image coding;Cameras;Lattices;Indexes},\n  doi = {10.23919/EUSIPCO.2017.8081541},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346087.pdf},\n}\n\n
\n
\n\n\n
\n This paper studies the lossless compressibility of raw sensor images acquired by plenoptic cameras, when optimally interpolating the microlens images in terms of already encoded microlens images. The geometrical information necessary for splitting the sensor image into projections of microlenses, together with a relatively small bitstream for encoding the raw image at the microlens centers are encoded as a first stage. The scanning order for sampling the data from the sensor follows row-by-row the approximate hexagonal lattice pattern of the microlenses, and the pixels inside each microlens are scanned in an ascending spiral order. The predictive encoding of a pixel from a microlens block uses the similarly located pixels (possibly slightly shifted) in the blocks from nine closest causal microlenses (those already encoded) and the pixels from its own microlens located in the encoded part of the spiral. A minimum description length optimal sparse predictor is designed for each microlens. The sparsity masks and prediction coefficients are encoded in a second stage and the prediction errors at every pixel are finally encoded in a third stage, in a view-by-view order (a view index being determined by the pixel's index in its block), using contexts accounting for the magnitude of errors at views already encoded. The experimental results show better performance than the JPEG 2000 image standard applied on the raw image.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A preprocessing technique for improving the compression performance of JPEG 2000 for images with sparse or locally sparse histograms.\n \n \n \n \n\n\n \n Jallouli, S.; Zouari, S.; Masmoudi, A.; Puech, W.; and Masmoudi, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1912-1916, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081542,\n  author = {S. Jallouli and S. Zouari and A. Masmoudi and W. Puech and N. Masmoudi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A preprocessing technique for improving the compression performance of JPEG 2000 for images with sparse or locally sparse histograms},\n  year = {2017},\n  pages = {1912-1916},\n  abstract = {JPEG 2000 is one of the most efficient and well performing standards for continuous-tone natural images compression. However, a compression performance loss may occur when encoding images with sparse or locally sparse histograms. Images of the later type include only a subset of the available intensity values implied by the nominal alphabet. This article proposes a new adaptive block-based histogram packing which improves the lossless compression performance of JPEG 2000 with sparse histogram images. We take advantage, in this work, of the strength likelihood between symbol sets of the neighboring image blocks and the efficiency of the offline histogram packing with sparse or locally sparse histogram images. Results of its effectiveness with JPEG 2000 are presented.},\n  keywords = {data compression;image coding;JPEG 2000;locally sparse histograms;continuous-tone natural images compression;compression performance loss;lossless compression performance;neighboring image blocks;offline histogram packing;locally sparse histogram images;image encoding;Histograms;Image coding;Transform coding;Bit rate;Europe;Signal processing;Standards;lossless image compression;offline histogram packing;JPEG 2000;alphabet reduction scheme;sparse and locally sparse histograms},\n  doi = {10.23919/EUSIPCO.2017.8081542},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340752.pdf},\n}\n\n
\n
\n\n\n
\n JPEG 2000 is one of the most efficient and well performing standards for continuous-tone natural images compression. However, a compression performance loss may occur when encoding images with sparse or locally sparse histograms. Images of the later type include only a subset of the available intensity values implied by the nominal alphabet. This article proposes a new adaptive block-based histogram packing which improves the lossless compression performance of JPEG 2000 with sparse histogram images. We take advantage, in this work, of the strength likelihood between symbol sets of the neighboring image blocks and the efficiency of the offline histogram packing with sparse or locally sparse histogram images. Results of its effectiveness with JPEG 2000 are presented.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n PharmaPack: Mobile fine-grained recognition of pharma packages.\n \n \n \n \n\n\n \n Taran, O.; Rezaeifar, S.; Dabrowski, O.; Schlechten, J.; Holotyak, T.; and Voloshynovskiy, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1917-1921, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PharmaPack:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081543,\n  author = {O. Taran and S. Rezaeifar and O. Dabrowski and J. Schlechten and T. Holotyak and S. Voloshynovskiy},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {PharmaPack: Mobile fine-grained recognition of pharma packages},\n  year = {2017},\n  pages = {1917-1921},\n  abstract = {We consider the problem of fine-grained physical object recognition and introduce a dataset PharmaPack containing 1000 unique pharma packages enrolled in a controlled environment using consumer mobile phones as well as several recognition sets representing various scenarios. For performance evaluation, we extract two types of recently proposed local feature descriptors and aggregate them using popular tools. All enrolled raw and pre-processed images, extracted and aggregated descriptors are made public to promote reproducible research. To evaluate the baseline performance, we compare the methods based on aggregation of local descriptors with methods based on geometrical matching.},\n  keywords = {feature extraction;image matching;image representation;mobile computing;mobile handsets;object recognition;packaging;pharmaceuticals;fine-grained physical object recognition;dataset PharmaPack;consumer mobile phones;local feature descriptors;preprocessed images;pharma packages;geometrical matching;Feature extraction;Mobile communication;Mobile handsets;Visualization;Image recognition;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081543},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347067.pdf},\n}\n\n
\n
\n\n\n
\n We consider the problem of fine-grained physical object recognition and introduce a dataset PharmaPack containing 1000 unique pharma packages enrolled in a controlled environment using consumer mobile phones as well as several recognition sets representing various scenarios. For performance evaluation, we extract two types of recently proposed local feature descriptors and aggregate them using popular tools. All enrolled raw and pre-processed images, extracted and aggregated descriptors are made public to promote reproducible research. To evaluate the baseline performance, we compare the methods based on aggregation of local descriptors with methods based on geometrical matching.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Two-layer tracking for occlusion handling and inter-sensor identification in multiple depth sensorsbased object detection and tracking.\n \n \n \n\n\n \n Sabirin, H.; and Naito, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1922-1926, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081544,\n  author = {H. Sabirin and S. Naito},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Two-layer tracking for occlusion handling and inter-sensor identification in multiple depth sensorsbased object detection and tracking},\n  year = {2017},\n  pages = {1922-1926},\n  abstract = {The main challenge in depth-based object detection and tracking process is to provide correct identification of the detected objects during occlusion. This is because the information necessary to distinguish and consequently identify the objects throughout the occlusion events are limited, compared to conventional, color-based object tracking. In this paper we propose a two-layer tracking method that enables automatic occlusion handling and inter-sensor identification for object detection and tracking that utilizes more than one depth sensor. On the first layer, the tracking is first performed independently for each sensor to extract objects' feature and perform initial tracking with separation of the occluded objects. On the second layer, the tracking is performed in the perspective projection of the objects tracked on the first layer that are combined in a single processing plane to provide correct identification of the objects that are detected in one sensor to another. Experiment results show that the proposed method can correctly identified occluded objects and objects that are moving between sensors coverage area.},\n  keywords = {feature extraction;image colour analysis;image sensors;object detection;object tracking;sensor fusion;occlusion events;two-layer tracking method;automatic occlusion handling;inter-sensor identification;object tracking process;multiple depth sensors-based object detection;color-based object tracking;object feature extraction;object identification;surveillance;object detection and tracking;depth data;depth-based separation;range sensor},\n  doi = {10.23919/EUSIPCO.2017.8081544},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n The main challenge in depth-based object detection and tracking process is to provide correct identification of the detected objects during occlusion. This is because the information necessary to distinguish and consequently identify the objects throughout the occlusion events are limited, compared to conventional, color-based object tracking. In this paper we propose a two-layer tracking method that enables automatic occlusion handling and inter-sensor identification for object detection and tracking that utilizes more than one depth sensor. On the first layer, the tracking is first performed independently for each sensor to extract objects' feature and perform initial tracking with separation of the occluded objects. On the second layer, the tracking is performed in the perspective projection of the objects tracked on the first layer that are combined in a single processing plane to provide correct identification of the objects that are detected in one sensor to another. Experiment results show that the proposed method can correctly identified occluded objects and objects that are moving between sensors coverage area.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An approach to power allocation in MIMO radar with sparse modeling for coherence minimization.\n \n \n \n \n\n\n \n Ajorloo, A.; Amini, A.; and Bastani, M. H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1927-1931, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081545,\n  author = {A. Ajorloo and A. Amini and M. H. Bastani},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An approach to power allocation in MIMO radar with sparse modeling for coherence minimization},\n  year = {2017},\n  pages = {1927-1931},\n  abstract = {In this paper, we aim at improving the estimation performance of the direction of arrival (DOA) in a colocated MIMO radar through power allocation under the sparsity constraints. Specifically, by considering the sparse recovery techniques, we try to minimize the coherence of associated sensing matrix by optimally distributing the power among transmit antennas. To determine the optimal power distribution, we reformulate the coherence minimization problem and derive a convex optimization constrained by the total power budget. This helps us to efficiently evaluate and simulate the optimal power distribution policy. Simulation results confirm superiority of the proposed method compared to the existing techniques.},\n  keywords = {convex programming;direction-of-arrival estimation;matrix algebra;MIMO radar;minimisation;radar antennas;radar signal processing;transmitting antennas;power allocation;sparse modeling;estimation performance;colocated MIMO radar;sparsity constraints;sparse recovery techniques;associated sensing matrix;coherence minimization problem;convex optimization;total power budget;optimal power distribution policy;direction of arrival;Sensors;Resource management;Coherence;Sparse matrices;MIMO radar;Direction-of-arrival estimation;Azimuth},\n  doi = {10.23919/EUSIPCO.2017.8081545},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347781.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we aim at improving the estimation performance of the direction of arrival (DOA) in a colocated MIMO radar through power allocation under the sparsity constraints. Specifically, by considering the sparse recovery techniques, we try to minimize the coherence of associated sensing matrix by optimally distributing the power among transmit antennas. To determine the optimal power distribution, we reformulate the coherence minimization problem and derive a convex optimization constrained by the total power budget. This helps us to efficiently evaluate and simulate the optimal power distribution policy. Simulation results confirm superiority of the proposed method compared to the existing techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A new height-estimation method using FMCW radar Doppler beam sharpening.\n \n \n \n \n\n\n \n Laribi, A.; Hahn, M.; Dickmann, J.; and Waldschmidt, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1932-1396, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081546,\n  author = {A. Laribi and M. Hahn and J. Dickmann and C. Waldschmidt},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A new height-estimation method using FMCW radar Doppler beam sharpening},\n  year = {2017},\n  pages = {1932-1396},\n  abstract = {This paper presents a new method for estimating the height of extended objects using a Frequency Modulation Continuous Wave (FMCW) automotive radar. The proposed algorithm exploits the frequency shift caused by the Doppler effect while approaching stationary objects, to estimate target heights. Thus, the algorithm does not require multiple vertical antennas for height finding. First, the measured radial velocity is derived using sensor target geometry, then, a target height is formulated as a function of target range, vehicle velocity and elevation angle component of the measured radial velocity. Next, the processing pipeline of the proposed Doppler Beam Sharpening (DBS) algorithm is described, and the three dimensional (3D) high resolution RELAX is applied to collected radar data to provide accurate range, azimuth angle and Doppler estimations of the detected targets. Finally height measurement results of an entrance gate 4.5 m high are presented and discussed. The results show that the proposed height finding algorithm can achieve a root mean squared error of 0.26 m.},\n  keywords = {CW radar;Doppler effect;Doppler radar;FM radar;height measurement;mean square error methods;radar detection;radar receivers;radar signal processing;road vehicle radar;height-estimation method;root mean squared error;3D high resolution RELAX;Doppler beam sharpening algorithm;radial velocity;frequency modulation continuous-wave automotive radar;vertical antennas;FMCW radar;height finding algorithm;height measurement results;azimuth angle;elevation angle component;vehicle velocity;sensor target geometry;Doppler effect;frequency shift;size 4.5 m;Doppler effect;Three-dimensional displays;Geometry;Doppler radar;Signal processing algorithms;Scattering},\n  doi = {10.23919/EUSIPCO.2017.8081546},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347635.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a new method for estimating the height of extended objects using a Frequency Modulation Continuous Wave (FMCW) automotive radar. The proposed algorithm exploits the frequency shift caused by the Doppler effect while approaching stationary objects, to estimate target heights. Thus, the algorithm does not require multiple vertical antennas for height finding. First, the measured radial velocity is derived using sensor target geometry, then, a target height is formulated as a function of target range, vehicle velocity and elevation angle component of the measured radial velocity. Next, the processing pipeline of the proposed Doppler Beam Sharpening (DBS) algorithm is described, and the three dimensional (3D) high resolution RELAX is applied to collected radar data to provide accurate range, azimuth angle and Doppler estimations of the detected targets. Finally height measurement results of an entrance gate 4.5 m high are presented and discussed. The results show that the proposed height finding algorithm can achieve a root mean squared error of 0.26 m.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-Doppler resolution automotive radar.\n \n \n \n \n\n\n \n Bialer, O.; and Kolpinizki, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1937-1941, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-DopplerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081547,\n  author = {O. Bialer and S. Kolpinizki},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-Doppler resolution automotive radar},\n  year = {2017},\n  pages = {1937-1941},\n  abstract = {Automotive radars operate in challenging environments that include objects with similar position and speed as well as objects with diverse positions and velocities. Automated driving requires the radar to discriminate close objects and also to accurately estimate the position of the objects in the field-of-view. Doppler filtering is essential to fulfill this goal. In conventional automotive radars the Doppler processing has a fixed and predetermined filtering integration time and hence a fixed Doppler resolution. However, in this case, setting the Doppler resolution is a tradeoff between high resolution that enables discrimination of close objects and accurate estimation of their position. In this paper we develop a multi-resolution Doppler processing method that resolves this tradeoff. The performance advantages of multi-resolution Doppler processing compared to the conventional fixed Doppler resolution are evaluated in an automotive scenario. It is shown that the multi-resolution Doppler processing attains better discrimination of close objects as well as more accurate position estimation of the objects.},\n  keywords = {Doppler radar;object detection;radar detection;radar resolution;road vehicle radar;multiresolution Doppler processing method;multiDoppler resolution automotive radar;automated driving;Doppler filtering;Doppler resolution;position estimation;Doppler effect;Doppler radar;Automotive engineering;Time-frequency analysis;Indexes;Motorcycles},\n  doi = {10.23919/EUSIPCO.2017.8081547},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347552.pdf},\n}\n\n
\n
\n\n\n
\n Automotive radars operate in challenging environments that include objects with similar position and speed as well as objects with diverse positions and velocities. Automated driving requires the radar to discriminate close objects and also to accurately estimate the position of the objects in the field-of-view. Doppler filtering is essential to fulfill this goal. In conventional automotive radars the Doppler processing has a fixed and predetermined filtering integration time and hence a fixed Doppler resolution. However, in this case, setting the Doppler resolution is a tradeoff between high resolution that enables discrimination of close objects and accurate estimation of their position. In this paper we develop a multi-resolution Doppler processing method that resolves this tradeoff. The performance advantages of multi-resolution Doppler processing compared to the conventional fixed Doppler resolution are evaluated in an automotive scenario. It is shown that the multi-resolution Doppler processing attains better discrimination of close objects as well as more accurate position estimation of the objects.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multivariate change detection on high resolution monovariate SAR image using linear time-frequency analysis.\n \n \n \n \n\n\n \n Mian, A.; Ovarlez, J. P.; Ginolhac, G.; and Atto, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1942-1946, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MultivariatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081548,\n  author = {A. Mian and J. P. Ovarlez and G. Ginolhac and A. Atto},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multivariate change detection on high resolution monovariate SAR image using linear time-frequency analysis},\n  year = {2017},\n  pages = {1942-1946},\n  abstract = {In this paper, we propose a novel methodology for Change Detection between two monovariate complex SAR images. Linear Time-Frequency tools are used in order to recover a spectral and angular diversity of the scatterers present in the scene. This diversity is used in bi-date change detection framework to develop a detector, whose performances are better than the classic detector on monovariate SAR images.},\n  keywords = {image resolution;radar imaging;synthetic aperture radar;time-frequency analysis;multivariate change detection;monovariate complex SAR images;angular diversity;bi-date change detection framework;monovariate SAR images;spectral diversity;high resolution monovariate SAR image;linear time-frequency analysis;Synthetic aperture radar;Signal to noise ratio;Signal processing algorithms;Detectors;Europe;Change Detection;High-resolution SAR;Mono-variate Image;Linear Time-Frequency Analysis},\n  doi = {10.23919/EUSIPCO.2017.8081548},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347454.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a novel methodology for Change Detection between two monovariate complex SAR images. Linear Time-Frequency tools are used in order to recover a spectral and angular diversity of the scatterers present in the scene. This diversity is used in bi-date change detection framework to develop a detector, whose performances are better than the classic detector on monovariate SAR images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data-driven method of reverse modelling for multi-function radar.\n \n \n \n \n\n\n \n Ou, J.; Zhao, F.; Ai, X.; Yang, J.; and Chen, Y.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1947-1951, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Data-drivenPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081549,\n  author = {J. Ou and F. Zhao and X. Ai and J. Yang and Y. Chen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Data-driven method of reverse modelling for multi-function radar},\n  year = {2017},\n  pages = {1947-1951},\n  abstract = {The common researches for radar system modelling are mostly based on deductive methods, which forward simulate the radar working process according to the information of radar parameters and expert system. However, the radar prior information obtained is limited under battlefield background. So it is difficult to model the radar operating modes and schedule schemes accurately, especially for the multi-function radars (MFRs) which are able to employ multiple modes flexibly. A novel method of reverse modelling for MFR is proposed. The information of the waveform is translated into grammar according to the theory of formal language, and the corresponding Finite-state Automaton (FSA) is composed as initialization. Then, according to the thought of data-driven, the transition relations and probabilities between MFR modes are yielded by analysing the intercepted signals. Finally, the stochastic finite automaton (SFA) is composed, achieving the MFR model by reverse modelling. Simulation with hypothetical MFR signal data is presented, showing that the proposed method is able to compose its SFA effectively, which can be used in MFR state recognition to support the adaptive radar countermeasures.},\n  keywords = {adaptive radar;data handling;finite automata;finite state machines;formal languages;grammars;probability;radar computing;radar signal processing;reverse engineering;stochastic processes;MFR model;reverse modelling;hypothetical MFR signal data;adaptive radar countermeasures;data-driven method;multifunction radar;radar system modelling;radar parameters;radar prior information;radar operating modes;multiple modes;MFR modes;finite-state automaton;stochastic finite automaton;Radar;Grammar;Automata;Formal languages;Radar signal processing;Syntactics;Signal processing algorithms;Multi-function radar;reverse modelling;syntactic pattern recognition;data-driven},\n  doi = {10.23919/EUSIPCO.2017.8081549},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347370.pdf},\n}\n\n
\n
\n\n\n
\n The common researches for radar system modelling are mostly based on deductive methods, which forward simulate the radar working process according to the information of radar parameters and expert system. However, the radar prior information obtained is limited under battlefield background. So it is difficult to model the radar operating modes and schedule schemes accurately, especially for the multi-function radars (MFRs) which are able to employ multiple modes flexibly. A novel method of reverse modelling for MFR is proposed. The information of the waveform is translated into grammar according to the theory of formal language, and the corresponding Finite-state Automaton (FSA) is composed as initialization. Then, according to the thought of data-driven, the transition relations and probabilities between MFR modes are yielded by analysing the intercepted signals. Finally, the stochastic finite automaton (SFA) is composed, achieving the MFR model by reverse modelling. Simulation with hypothetical MFR signal data is presented, showing that the proposed method is able to compose its SFA effectively, which can be used in MFR state recognition to support the adaptive radar countermeasures.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimal design of sparse MIMO arrays for near-field ultrawideband imaging.\n \n \n \n \n\n\n \n Kocamis, M. B.; and Oktem, F. S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1952-1956, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OptimalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081550,\n  author = {M. B. Kocamis and F. S. Oktem},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Optimal design of sparse MIMO arrays for near-field ultrawideband imaging},\n  year = {2017},\n  pages = {1952-1956},\n  abstract = {Near-field ultrawideband imaging is a promising remote sensing technique in various applications such as airport security, surveillance, medical diagnosis, and through-wall imaging. Recently, there has been increasing interest in using sparse multiple-input-multiple-output (MIMO) arrays to reduce hardware complexity and cost. In this paper, based on a Bayesian estimation framework, an optimal design method is presented for two-dimensional MIMO arrays in ultrawideband imaging. The optimality criterion is defined based on the image reconstruction quality obtained with the design, and the optimization is performed over all possible locations of antenna elements using an algorithm called clustered sequential backward selection algorithm. The designs obtained with this approach are compared with that of some commonly used sparse array configurations in terms of image reconstruction quality for various noise levels.},\n  keywords = {antenna arrays;Bayes methods;estimation theory;image reconstruction;MIMO communication;near-field communication;optimisation;ultra wideband antennas;near-field ultrawideband imaging;Bayesian estimation framework;optimal design method;two-dimensional MIMO arrays;optimality criterion;image reconstruction quality;sparse MIMO arrays;remote sensing technique;hardware complexity reduction;sparse multiple-input-multiple-output arrays;cost reduction;antenna elements;clustered sequential backward selection algorithm;Antenna arrays;MIMO;Image reconstruction;Imaging;Antenna measurements;Transmitting antennas},\n  doi = {10.23919/EUSIPCO.2017.8081550},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347264.pdf},\n}\n\n
\n
\n\n\n
\n Near-field ultrawideband imaging is a promising remote sensing technique in various applications such as airport security, surveillance, medical diagnosis, and through-wall imaging. Recently, there has been increasing interest in using sparse multiple-input-multiple-output (MIMO) arrays to reduce hardware complexity and cost. In this paper, based on a Bayesian estimation framework, an optimal design method is presented for two-dimensional MIMO arrays in ultrawideband imaging. The optimality criterion is defined based on the image reconstruction quality obtained with the design, and the optimization is performed over all possible locations of antenna elements using an algorithm called clustered sequential backward selection algorithm. The designs obtained with this approach are compared with that of some commonly used sparse array configurations in terms of image reconstruction quality for various noise levels.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Design of multi-carrier MIMO radar array for DOA estimation.\n \n \n \n \n\n\n \n Ulrich, M.; Yang, Y.; and Yang, B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1957-1961, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DesignPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081551,\n  author = {M. Ulrich and Y. Yang and B. Yang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Design of multi-carrier MIMO radar array for DOA estimation},\n  year = {2017},\n  pages = {1957-1961},\n  abstract = {Multi-carrier (MC) multiple-input multiple-output (MIMO) radar offers an additional degree of freedom in the array optimization through the carrier frequencies. In this paper, we study the MC-MIMO array optimization with respect to the direction of arrival (DOA) estimation based on the Cramer-Rao bound (CRB). In particular, we choose the transmit and receive antenna positions as well as the carrier frequencies to minimize the single-target CRB subject to a constraint of the peak sidelobe level. A genetic algorithm is used to solve the problem and numerical examples demonstrate the superiority of our approach over both single-carrier MIMO radar and existing design rules.},\n  keywords = {antenna arrays;array signal processing;direction-of-arrival estimation;genetic algorithms;MIMO radar;radar antennas;radar signal processing;receiving antennas;wireless channels;single-target CRB subject;single-carrier MIMO radar;multicarrier MIMO radar array;DOA estimation;carrier frequencies;MC-MIMO array optimization;Cramer-Rao bound;genetic algorithm;transmit antenna positions;receive antenna positions;multicarrier multiple-input multiple-output radar;direction of arrival estimation;Direction-of-arrival estimation;Estimation;Radar;OFDM;Antenna arrays;MIMO;Radar antennas},\n  doi = {10.23919/EUSIPCO.2017.8081551},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347054.pdf},\n}\n\n
\n
\n\n\n
\n Multi-carrier (MC) multiple-input multiple-output (MIMO) radar offers an additional degree of freedom in the array optimization through the carrier frequencies. In this paper, we study the MC-MIMO array optimization with respect to the direction of arrival (DOA) estimation based on the Cramer-Rao bound (CRB). In particular, we choose the transmit and receive antenna positions as well as the carrier frequencies to minimize the single-target CRB subject to a constraint of the peak sidelobe level. A genetic algorithm is used to solve the problem and numerical examples demonstrate the superiority of our approach over both single-carrier MIMO radar and existing design rules.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Spectrum reconstruction with nonuniform fast Fourier transform for MIMO SAR azimuth nonuniform sampling.\n \n \n \n \n\n\n \n Zhao, G.; Fu, Y.; Nie, L.; Zhang, W.; and Zhuang, Z.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1962-1965, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SpectrumPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081552,\n  author = {G. Zhao and Y. Fu and L. Nie and W. Zhang and Z. Zhuang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Spectrum reconstruction with nonuniform fast Fourier transform for MIMO SAR azimuth nonuniform sampling},\n  year = {2017},\n  pages = {1962-1965},\n  abstract = {Multiple input multiple output (MIMO) synthetic aperture radar (SAR) shows much potential compared with traditional SAR in many interesting applications. In MIMO SAR high resolution wide swath imaging, azimuth ambiguity is a problem when the system azimuth sampling is nonuniform. A spectrum reconstruction method based on periodic nonuniform sampling theory has been used for azimuth ambiguity suppression. However, the computation cost is very high for MIMO SAR with a lot of transmitters/receivers. In this paper, MIMO SAR spectrum reconstruction with nonuniform fast Fourier transform (NUFFT) is proposed. The simulation results show the effectivity of the proposed spectrum reconstruction method.},\n  keywords = {fast Fourier transforms;image reconstruction;image sampling;MIMO radar;radar imaging;synthetic aperture radar;nonuniform fast Fourier transform;spectrum reconstruction;MIMO SAR spectrum reconstruction;azimuth ambiguity suppression;periodic nonuniform sampling theory;system azimuth sampling;MIMO SAR high resolution wide swath imaging;multiple input multiple output synthetic aperture radar;MIMO SAR azimuth nonuniform sampling;Synthetic aperture radar;MIMO;Image reconstruction;Azimuth;Reconstruction algorithms;Manganese;Azimuth ambiguity;reconstruction algorithm;nonuniform fast Fourier transform(NUFFT);multiple input multiple output synthetic aperture radar (MIMO SAR)},\n  doi = {10.23919/EUSIPCO.2017.8081552},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347509.pdf},\n}\n\n
\n
\n\n\n
\n Multiple input multiple output (MIMO) synthetic aperture radar (SAR) shows much potential compared with traditional SAR in many interesting applications. In MIMO SAR high resolution wide swath imaging, azimuth ambiguity is a problem when the system azimuth sampling is nonuniform. A spectrum reconstruction method based on periodic nonuniform sampling theory has been used for azimuth ambiguity suppression. However, the computation cost is very high for MIMO SAR with a lot of transmitters/receivers. In this paper, MIMO SAR spectrum reconstruction with nonuniform fast Fourier transform (NUFFT) is proposed. The simulation results show the effectivity of the proposed spectrum reconstruction method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Projector's weighting for W-MUSIC: An alternative to RMT.\n \n \n \n \n\n\n \n Ferréol, A.; and Larzabal, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1966-1970, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Projector'sPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081553,\n  author = {A. Ferréol and P. Larzabal},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Projector's weighting for W-MUSIC: An alternative to RMT},\n  year = {2017},\n  pages = {1966-1970},\n  abstract = {In the last decade, modified subspace DoA estimation methods such as G-MUSIC have been proposed, in the context where the number of available snapshots N is of the same order of magnitude than the number of sensors M. In this context, the conventional MUSIC algorithm fails in presence of close sources because the empirical covariance matrix is a poor estimate of the true covariance matrix. The G-MUSIC algorithm is based on Marcenko-Pastur's works about the distribution of the eigenvalues of the empirical covariance matrix. A new modified MUSIC algorithm is proposed. It is based on the correction of the noise projector obtained by complex Wishart distribution of the empirical covariance matrix.},\n  keywords = {covariance matrices;direction-of-arrival estimation;eigenvalues and eigenfunctions;signal classification;G-MUSIC algorithm;covariance matrix;W-MUSIC;DoA estimation methods;Marcenko-Pasturs works;Wishart distribution;Multiple signal classification;Covariance matrices;Direction-of-arrival estimation;Signal processing algorithms;Sensors;Estimation;Perturbation methods;MUSIC;DoA estimation;Performances analysis;Wishart distribution;Random matrices},\n  doi = {10.23919/EUSIPCO.2017.8081553},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346531.pdf},\n}\n\n
\n
\n\n\n
\n In the last decade, modified subspace DoA estimation methods such as G-MUSIC have been proposed, in the context where the number of available snapshots N is of the same order of magnitude than the number of sensors M. In this context, the conventional MUSIC algorithm fails in presence of close sources because the empirical covariance matrix is a poor estimate of the true covariance matrix. The G-MUSIC algorithm is based on Marcenko-Pastur's works about the distribution of the eigenvalues of the empirical covariance matrix. A new modified MUSIC algorithm is proposed. It is based on the correction of the noise projector obtained by complex Wishart distribution of the empirical covariance matrix.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Refinement of time-difference-of-arrival measurements via rank properties in two-dimensional space.\n \n \n \n \n\n\n \n Le, T.; and Ono, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1971-1975, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RefinementPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081554,\n  author = {T. Le and N. Ono},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Refinement of time-difference-of-arrival measurements via rank properties in two-dimensional space},\n  year = {2017},\n  pages = {1971-1975},\n  abstract = {Two new rank properties for time difference of arrival (TDOA) measurements in two-dimensional space are reported in this paper. On the basis of these rank properties, we propose a class of algorithms to refine TDOAs from their observations. Since only the singular value decomposition (SVD) technique is used, these proposed algorithms are very simple. Simulative experiments show that the accuracy of TDOA estimations is significantly improved using the proposed refining algorithms. Moreover, their ability to improve TDOA-based joint source and sensor localization is also proven by simulative experiments.},\n  keywords = {direction-of-arrival estimation;singular value decomposition;time-of-arrival estimation;rank properties;two-dimensional space;singular value decomposition technique;TDOA estimations;refining algorithms;time-difference-of-arrival measurements;Indexes;Signal processing algorithms;Manganese;Signal processing;Time difference of arrival;Europe;Redundancy;Time Difference of Arrival;Time of Arrival;Expansion of Time Difference of Arrival},\n  doi = {10.23919/EUSIPCO.2017.8081554},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342695.pdf},\n}\n\n
\n
\n\n\n
\n Two new rank properties for time difference of arrival (TDOA) measurements in two-dimensional space are reported in this paper. On the basis of these rank properties, we propose a class of algorithms to refine TDOAs from their observations. Since only the singular value decomposition (SVD) technique is used, these proposed algorithms are very simple. Simulative experiments show that the accuracy of TDOA estimations is significantly improved using the proposed refining algorithms. Moreover, their ability to improve TDOA-based joint source and sensor localization is also proven by simulative experiments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse linear nested array for active sensing.\n \n \n \n \n\n\n \n Rajamäki, R.; and Koivunen, V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1976-1980, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SparsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081555,\n  author = {R. Rajamäki and V. Koivunen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Sparse linear nested array for active sensing},\n  year = {2017},\n  pages = {1976-1980},\n  abstract = {Sparse sensor arrays can match the performance of fully populated arrays using substantially fewer elements. However, finding the array configuration with the smallest number of elements is generally a computationally difficult problem. Consequently, simple to generate array configurations that may be suboptimal are of high practical interest. This paper presents a novel closed-form sparse linear array configuration designed for active sensing, called the Concatenated Nested Array (CNA). The key parameters of the CNA are derived. The CNA is also compared to the optimal Minimum-Redundancy Array (MRA) in numerical simulations. The CNA is shown to require only about 10% more elements than the MRA in the limit of large apertures.},\n  keywords = {array signal processing;sensor arrays;closed-form sparse linear array configuration;active sensing;Concatenated Nested Array;CNA;optimal Minimum-Redundancy Array;sparse linear nested array;sparse sensor arrays;Redundancy;Apertures;Array signal processing;Sensor arrays;Geometry},\n  doi = {10.23919/EUSIPCO.2017.8081555},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341559.pdf},\n}\n\n
\n
\n\n\n
\n Sparse sensor arrays can match the performance of fully populated arrays using substantially fewer elements. However, finding the array configuration with the smallest number of elements is generally a computationally difficult problem. Consequently, simple to generate array configurations that may be suboptimal are of high practical interest. This paper presents a novel closed-form sparse linear array configuration designed for active sensing, called the Concatenated Nested Array (CNA). The key parameters of the CNA are derived. The CNA is also compared to the optimal Minimum-Redundancy Array (MRA) in numerical simulations. The CNA is shown to require only about 10% more elements than the MRA in the limit of large apertures.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Near-optimal greedy sensor selection for MVDR beamforming with modular budget constraint.\n \n \n \n \n\n\n \n Contino, M.; Chepuri, S.; and Leus, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1981-1985, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Near-optimalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081556,\n  author = {M. Contino and S. Chepuri and G. Leus},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Near-optimal greedy sensor selection for MVDR beamforming with modular budget constraint},\n  year = {2017},\n  pages = {1981-1985},\n  abstract = {In this paper, we present a greedy sensor selection algorithm for minimum variance distortionless response (MVDR) beamforming under a modular budget constraint. In particular, we propose a submodular set-function that can be maximized using a linear-time greedy heuristic that is near optimal. Different from the convex formulation that is typically used to solve the sensor selection problem, the method in this paper neither involves computationally intensive semidefinite programs nor convex relaxation of the Boolean variables. While numerical experiments show a comparable performance between the convex and submodular relaxations, in terms of output signal-to-noise ratio, the latter finds a near-optimal solution with a significantly reduced computational complexity.},\n  keywords = {approximation theory;array signal processing;computational complexity;convex programming;greedy algorithms;optimisation;modular budget constraint;greedy sensor selection algorithm;minimum variance distortionless response;submodular set-function;convex formulation;sensor selection problem;computationally intensive semidefinite programs;convex relaxation;submodular relaxations;near-optimal solution;MVDR beamforming;near-optimal greedy sensor selection;Array signal processing;Signal processing algorithms;Signal to noise ratio;Covariance matrices;Antenna arrays;Arrays;submodularity;MVDR beamforming;greedy algorithm;budget constraint;sensor selection},\n  doi = {10.23919/EUSIPCO.2017.8081556},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346790.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we present a greedy sensor selection algorithm for minimum variance distortionless response (MVDR) beamforming under a modular budget constraint. In particular, we propose a submodular set-function that can be maximized using a linear-time greedy heuristic that is near optimal. Different from the convex formulation that is typically used to solve the sensor selection problem, the method in this paper neither involves computationally intensive semidefinite programs nor convex relaxation of the Boolean variables. While numerical experiments show a comparable performance between the convex and submodular relaxations, in terms of output signal-to-noise ratio, the latter finds a near-optimal solution with a significantly reduced computational complexity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gridless compressed sensing for fully augmentable arrays.\n \n \n \n \n\n\n \n Suleiman, W.; Steffens, C.; Sorg, A.; and Pesavento, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1986-1990, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"GridlessPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081557,\n  author = {W. Suleiman and C. Steffens and A. Sorg and M. Pesavento},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Gridless compressed sensing for fully augmentable arrays},\n  year = {2017},\n  pages = {1986-1990},\n  abstract = {Direction-of-arrival (DOA) estimation using non-uniform linear arrays is considered. We focus on the so called {"}fully augmentable arrays{"} (FAAs) with full set of covariance lags. In FAAs, the number of covariance lags is usually larger than the number of sensors in the array. Thus, with FAAs more sources than the number of sensors can be identified. Existing DOA estimation algorithms for FAAs are based on the assumption of uncorrelated sources. In this paper, based on compressed sensing, we present a DOA estimation algorithm for FAAs without assuming uncorrelated sources. The proposed algorithm is based on the newly introduced gridless SPARse ROW-norm reconstruction (SPARROW) formulation for the joint sparse reconstruction from multiple measurement vectors. By numerical experiments, we show that the proposed algorithm outperforms the existing algorithms in the presence of correlated signals or small number of snapshots. Moreover, using simulations, the behavior of the Cramer-Rao Bound (CRB) for the case of correlated source is demonstrated and it is shown that, when the number of sources is larger than the number of sensors, the CRB for FAAs approaches zero at infinitely large signal-to-noise-ratio (SNR) only if the sources are fully correlated.},\n  keywords = {array signal processing;compressed sensing;covariance analysis;direction-of-arrival estimation;signal reconstruction;gridless compressed sensing;fully augmentable arrays;direction-of-arrival estimation;nonuniform linear arrays;covariance lags;DOA estimation algorithm;sparse reconstruction;gridless SPARse ROW-norm reconstruction formulation;SPARROW formulation;Cramer-Rao bound;Direction-of-arrival estimation;Sensor arrays;Covariance matrices;Estimation;FAA;Signal processing algorithms;Fully augmentable arrays;Joint Sparsity;Grid-less Parameter Estimation;Direction-of-arrival estimation},\n  doi = {10.23919/EUSIPCO.2017.8081557},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346689.pdf},\n}\n\n
\n
\n\n\n
\n Direction-of-arrival (DOA) estimation using non-uniform linear arrays is considered. We focus on the so called \"fully augmentable arrays\" (FAAs) with full set of covariance lags. In FAAs, the number of covariance lags is usually larger than the number of sensors in the array. Thus, with FAAs more sources than the number of sensors can be identified. Existing DOA estimation algorithms for FAAs are based on the assumption of uncorrelated sources. In this paper, based on compressed sensing, we present a DOA estimation algorithm for FAAs without assuming uncorrelated sources. The proposed algorithm is based on the newly introduced gridless SPARse ROW-norm reconstruction (SPARROW) formulation for the joint sparse reconstruction from multiple measurement vectors. By numerical experiments, we show that the proposed algorithm outperforms the existing algorithms in the presence of correlated signals or small number of snapshots. Moreover, using simulations, the behavior of the Cramer-Rao Bound (CRB) for the case of correlated source is demonstrated and it is shown that, when the number of sources is larger than the number of sensors, the CRB for FAAs approaches zero at infinitely large signal-to-noise-ratio (SNR) only if the sources are fully correlated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Frequency domain multipath fading channel simulator integrated with OFDM transmitter for E-UTRAN baseband traffic generator.\n \n \n \n \n\n\n \n Cisek, G.; and Zieliński, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1991-1995, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"FrequencyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081558,\n  author = {G. Cisek and T. Zieliński},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Frequency domain multipath fading channel simulator integrated with OFDM transmitter for E-UTRAN baseband traffic generator},\n  year = {2017},\n  pages = {1991-1995},\n  abstract = {The purpose of the study is to develop an efficient 3GPP compliant method to simulate multiple independent fading radio channels in software defined Evolved Universal Terrestrial Radio Access Network (E-EUTRAN) traffic generator. In this paper, frequency domain representation of commonly accepted Tapped Delay Line (TDL) model is discussed and three transformation algorithms are evaluated. The effects of multipath fading channel are applied to the signal at the level of Orthogonal Frequency Division Multiplexing1 (OFDM) transmitter prior to IFFT stage. Models 0 and 1 are based on Digital Fourier Transform (DFT) of TDL with and without consideration of Intercarrier Interference (ICI) phenomenon. Model 2 is the novel method that extends quasi-stationary model with low-cost linear approximation of ICI applied directly in frequency domain in order to gain overall accuracy with small computational effort. When limiting the ICI term to 16 neighboring subcarriers, Model 2 exhibits 12 dB SNR improvement comparing to stationary model and offers execution time advantage comparing to TDL model when the number of terminals sharing radio resources is high.},\n  keywords = {3G mobile communication;fading channels;Fourier transforms;frequency-domain analysis;intercarrier interference;multipath channels;OFDM modulation;radio access networks;radio transmitters;software radio;OFDM transmitter;E-UTRAN baseband traffic generator;efficient 3GPP compliant method;multiple independent fading radio channels;frequency domain representation;transformation algorithms;Intercarrier Interference phenomenon;quasistationary model;TDL model;radio resources;Frequency domain multipath fading channel simulator;software defined Evolved Universal Terrestrial Radio Access Network traffic generator;Tapped Delay Line model;Orthogonal Frequency Division Multiplexing transmitter;OFDM;Fading channels;Frequency-domain analysis;Time-domain analysis;Noise measurement;Computational modeling;Mathematical model;OFDM;Multipath channels;Simulation},\n  doi = {10.23919/EUSIPCO.2017.8081558},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347276.pdf},\n}\n\n
\n
\n\n\n
\n The purpose of the study is to develop an efficient 3GPP compliant method to simulate multiple independent fading radio channels in software defined Evolved Universal Terrestrial Radio Access Network (E-EUTRAN) traffic generator. In this paper, frequency domain representation of commonly accepted Tapped Delay Line (TDL) model is discussed and three transformation algorithms are evaluated. The effects of multipath fading channel are applied to the signal at the level of Orthogonal Frequency Division Multiplexing1 (OFDM) transmitter prior to IFFT stage. Models 0 and 1 are based on Digital Fourier Transform (DFT) of TDL with and without consideration of Intercarrier Interference (ICI) phenomenon. Model 2 is the novel method that extends quasi-stationary model with low-cost linear approximation of ICI applied directly in frequency domain in order to gain overall accuracy with small computational effort. When limiting the ICI term to 16 neighboring subcarriers, Model 2 exhibits 12 dB SNR improvement comparing to stationary model and offers execution time advantage comparing to TDL model when the number of terminals sharing radio resources is high.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimal compression of vibration data with lifting wavelet transform and context-based arithmetic coding.\n \n \n \n \n\n\n \n Zhang, Y.; Hutchinson, P.; Lieven, N. A. J.; and Nunez-Yanez, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 1996-2000, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OptimalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081559,\n  author = {Y. Zhang and P. Hutchinson and N. A. J. Lieven and J. Nunez-Yanez},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Optimal compression of vibration data with lifting wavelet transform and context-based arithmetic coding},\n  year = {2017},\n  pages = {1996-2000},\n  abstract = {This paper proposes an adaptive vibration signal compression scheme composed of a lifting discrete wavelet transform (LDWT) with set-partitioning embedded blocks (SPECK) that efficiently sorts the wavelet coefficients by significance. The output of the SPECK module is input to an optimized context-based arithmetic coder that generates the compressed bitstream. The algorithm is deployed as part of a reliable and effective health monitoring technology for machines and civil constructions (e.g. power generation system). This application area relies on the collection of large quantities of high quality vibration sensor data that needs to be compressed before storing and transmission. Experimental results indicate that the proposed method outperforms state-of-the-art coders, while retaining the characteristics in the compressed vibration signals to ensure accurate event analysis. For the same quality level, up to 59.41% bitrate reduction is achieved by the proposed method compared to JPEG2000.},\n  keywords = {arithmetic codes;condition monitoring;data compression;discrete wavelet transforms;optimal compression;adaptive vibration signal compression scheme;set-partitioning embedded blocks;wavelet coefficients;health monitoring technology;lifting discrete wavelet transform;optimized context-based arithmetic coding;machines constructions;compressed vibration signals;high quality vibration sensor data;power generation system;civil constructions;compressed bitstream;SPECK module;Encoding;Vibrations;Discrete wavelet transforms;Context modeling;Signal processing algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081559},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346906.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes an adaptive vibration signal compression scheme composed of a lifting discrete wavelet transform (LDWT) with set-partitioning embedded blocks (SPECK) that efficiently sorts the wavelet coefficients by significance. The output of the SPECK module is input to an optimized context-based arithmetic coder that generates the compressed bitstream. The algorithm is deployed as part of a reliable and effective health monitoring technology for machines and civil constructions (e.g. power generation system). This application area relies on the collection of large quantities of high quality vibration sensor data that needs to be compressed before storing and transmission. Experimental results indicate that the proposed method outperforms state-of-the-art coders, while retaining the characteristics in the compressed vibration signals to ensure accurate event analysis. For the same quality level, up to 59.41% bitrate reduction is achieved by the proposed method compared to JPEG2000.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hybrid digital-analog joint source channel coding for broadcast multiresolution communications.\n \n \n \n \n\n\n \n Fresnedo, O.; Suárez-Casal, P.; Castedo, L.; and García-Frías, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2001-2005, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"HybridPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081560,\n  author = {O. Fresnedo and P. Suárez-Casal and L. Castedo and J. García-Frías},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Hybrid digital-analog joint source channel coding for broadcast multiresolution communications},\n  year = {2017},\n  pages = {2001-2005},\n  abstract = {Multilayer coding represents an appealing solution for the broadcasting of common data when we want to ensure the receivers decode the signal with different distortion levels depending on the quality of the received signal. In addition, the combination of digital and analog techniques to encode the source data allows to solve the limitation of these approaches when they are applied separately. In this work, we consider a Hybrid Digital-Analog (HDA) multilayer system where the digital layers are employed to satisfy different Quality of Service (QoS) requirements and an analog layer refines the estimates of the source symbols. The resulting scheme provides good performance for different expansion factors and number of layers, and it also presents good scaling for all SNR values.},\n  keywords = {combined source-channel coding;quality of service;broadcast multiresolution communications;multilayer coding;digital analog techniques;Hybrid Digital-Analog multilayer system;digital layers;analog layer;source symbols;Quality of service;hybrid digital-analog joint source channel coding;distortion levels;Receivers;Distortion;Quantization (signal);Transmitters;Nonhomogeneous media;Modulation;Decoding},\n  doi = {10.23919/EUSIPCO.2017.8081560},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346809.pdf},\n}\n\n
\n
\n\n\n
\n Multilayer coding represents an appealing solution for the broadcasting of common data when we want to ensure the receivers decode the signal with different distortion levels depending on the quality of the received signal. In addition, the combination of digital and analog techniques to encode the source data allows to solve the limitation of these approaches when they are applied separately. In this work, we consider a Hybrid Digital-Analog (HDA) multilayer system where the digital layers are employed to satisfy different Quality of Service (QoS) requirements and an analog layer refines the estimates of the source symbols. The resulting scheme provides good performance for different expansion factors and number of layers, and it also presents good scaling for all SNR values.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Simplified analysis of HARQ cooperative networks using finite-state Markov chains.\n \n \n \n \n\n\n \n Maliqi, F.; Bassi, F.; Duhamel, P.; and Limani, I.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2006-2010, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SimplifiedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081561,\n  author = {F. Maliqi and F. Bassi and P. Duhamel and I. Limani},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Simplified analysis of HARQ cooperative networks using finite-state Markov chains},\n  year = {2017},\n  pages = {2006-2010},\n  abstract = {This paper considers the analysis of communication protocols in wireless networks implementing both cooperation and Hybrid Automatic Repeat reQuest (HARQ) for Type I decoder and Type II decoder with Chase Combining. Using an example of a three-node network, we show that the communication protocol can be modeled using Finite State Markov Chains. This model efficiently predicts the performance of the system. However, the complexity depends on the number of states, which increases very fast as the protocol gets more sophisticated. We then derive a simplified model using state aggregation, and obtain a compact description which can be used to predict the performance with a reduced complexity. Moreover, we show that the simplified model describes a probabilistic communication protocol on the same network. Monte Carlo simulations show that the theoretical predictions match the simulated performance.},\n  keywords = {automatic repeat request;cooperative communication;decoding;Markov processes;Monte Carlo methods;probability;protocols;probabilistic communication protocol;finite-state Markov chains;wireless networks;Type II decoder;Chase Combining;three-node network;Finite State Markov Chains;simplified model;state aggregation;hybrid automatic repeat request;HARQ cooperative networks;Monte Carlo simulations;Protocols;Markov processes;Decoding;Steady-state;Relays;Cooperative systems;Probabilistic logic},\n  doi = {10.23919/EUSIPCO.2017.8081561},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347413.pdf},\n}\n\n
\n
\n\n\n
\n This paper considers the analysis of communication protocols in wireless networks implementing both cooperation and Hybrid Automatic Repeat reQuest (HARQ) for Type I decoder and Type II decoder with Chase Combining. Using an example of a three-node network, we show that the communication protocol can be modeled using Finite State Markov Chains. This model efficiently predicts the performance of the system. However, the complexity depends on the number of states, which increases very fast as the protocol gets more sophisticated. We then derive a simplified model using state aggregation, and obtain a compact description which can be used to predict the performance with a reduced complexity. Moreover, we show that the simplified model describes a probabilistic communication protocol on the same network. Monte Carlo simulations show that the theoretical predictions match the simulated performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Spatial peak power minimization for relaxed phase M-PSK MIMO directional modulation transmitter.\n \n \n \n \n\n\n \n Kalantari, A.; Tsinos, C.; Soltanalian, M.; Chatzinotas, S.; Ma, W.; and Ottersten, B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2011-2015, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SpatialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081562,\n  author = {A. Kalantari and C. Tsinos and M. Soltanalian and S. Chatzinotas and W. Ma and B. Ottersten},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Spatial peak power minimization for relaxed phase M-PSK MIMO directional modulation transmitter},\n  year = {2017},\n  pages = {2011-2015},\n  abstract = {The burst in media content and access to smart phones has created an increasing demand for data. At the same time, powering up mobile base stations contributes notably to CO2 footprint. To address these issues, we need to design energy efficient communication systems with higher data rates while considering practical limitations. As a solution, we design an optimal M-PSK directional modulation precoder with spatial peak power minimization where the communicated symbol on each receiving antenna is placed in the optimal location of a predefined region. Such an approach allows less stringent design and results in further energy efficiency. In this work, we characterize the relaxed region, formulate the optimal symbollevel precoder design problem, and transform it into a standard form. The simulation results show that the relaxed design reduces the consumed power while the symbol error rate increment at the receiver due to the relaxed phase design is negligible.},\n  keywords = {antenna arrays;energy conservation;MIMO communication;phase shift keying;radio transmitters;receiving antennas;telecommunication power management;mobile base stations;energy efficient communication systems;optimal M-PSK directional modulation precoder;spatial peak power minimization;energy efficiency;smart phones;M-PSK MIMO directional modulation transmitter;CO2;Modulation;Minimization;Receiving antennas;Signal to noise ratio;Radio frequency;Transmitters;Directional modulation;energy efficiency;M-PSK modulation;spatial peak power;symbol-level precoding},\n  doi = {10.23919/EUSIPCO.2017.8081562},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570348169.pdf},\n}\n\n
\n
\n\n\n
\n The burst in media content and access to smart phones has created an increasing demand for data. At the same time, powering up mobile base stations contributes notably to CO2 footprint. To address these issues, we need to design energy efficient communication systems with higher data rates while considering practical limitations. As a solution, we design an optimal M-PSK directional modulation precoder with spatial peak power minimization where the communicated symbol on each receiving antenna is placed in the optimal location of a predefined region. Such an approach allows less stringent design and results in further energy efficiency. In this work, we characterize the relaxed region, formulate the optimal symbollevel precoder design problem, and transform it into a standard form. The simulation results show that the relaxed design reduces the consumed power while the symbol error rate increment at the receiver due to the relaxed phase design is negligible.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ongoing tests and improvements of the MPS algorithm for the automatic crack detection within grey level pavement images.\n \n \n \n \n\n\n \n Baltazart, V.; Nicolle, P.; and Yang, L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2016-2020, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OngoingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081563,\n  author = {V. Baltazart and P. Nicolle and L. Yang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Ongoing tests and improvements of the MPS algorithm for the automatic crack detection within grey level pavement images},\n  year = {2017},\n  pages = {2016-2020},\n  abstract = {The MPS approach (Minimal Path Selection) has shown in [1] to provide robust and accurate segmentation of cracks within pavement images compared to other algorithms. As a counterpart, MPS suffers from a large computing time. In this paper, we present three different ongoing improvements to reduce the computing time and to improve the overall segmentation performance. Most of the work focuses on the first three steps of the algorithm which achieve the segmentation of the crack skeleton. This is at first the improvement of the MPS methodology under Matlab coding, then, the C language MPS version and finally, the first attempt to parallelize MPS under the GPU platform. The results on pavement images illustrate the achieved improvements in terms of better segmentation and faster computational time.},\n  keywords = {crack detection;edge detection;geotechnical engineering;graphics processing units;image segmentation;roads;structural engineering computing;MPS algorithm;automatic crack detection;grey level pavement images;MPS approach;segmentation performance;crack skeleton;MPS methodology;C language;minimal path selection;Matlab coding;GPU platform;Graphics processing units;Image segmentation;Skeleton;MATLAB;Encoding;Surface cracks;Real-time systems;road surface monitoring;crack segmentation;performance assessment;optimization;parallelization;GPU},\n  doi = {10.23919/EUSIPCO.2017.8081563},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347065.pdf},\n}\n\n
\n
\n\n\n
\n The MPS approach (Minimal Path Selection) has shown in [1] to provide robust and accurate segmentation of cracks within pavement images compared to other algorithms. As a counterpart, MPS suffers from a large computing time. In this paper, we present three different ongoing improvements to reduce the computing time and to improve the overall segmentation performance. Most of the work focuses on the first three steps of the algorithm which achieve the segmentation of the crack skeleton. This is at first the improvement of the MPS methodology under Matlab coding, then, the C language MPS version and finally, the first attempt to parallelize MPS under the GPU platform. The results on pavement images illustrate the achieved improvements in terms of better segmentation and faster computational time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pseudo-ground truth data collection on pavement images.\n \n \n \n \n\n\n \n Baltazart, V.; Yang, L.; Nicolle, P.; and Moliard, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2021-2025, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Pseudo-groundPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081564,\n  author = {V. Baltazart and L. Yang and P. Nicolle and J. Moliard},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Pseudo-ground truth data collection on pavement images},\n  year = {2017},\n  pages = {2021-2025},\n  abstract = {The performance assessment of automatic crack detection algorithms within pavement images requires beforehand to establish a reference image, namely, the pseudo-ground truth image (PGT). In this context, this paper presents some existing pseudo-ground truth (PGT) data collection techniques which rely on image processing techniques. The processing of five Single Pair Shortest Path (SPSP) algorithms which are devoted to this aim are illustrated in terms of running time and segmentation accuracy on a pavement image.},\n  keywords = {crack detection;data acquisition;image segmentation;roads;structural engineering computing;performance assessment;pseudoground truth data collection techniques;pavement image segmentation;Single Pair Shortest Path algorithms;image processing techniques;PGT;pseudoground truth image;reference image;automatic crack detection algorithms;Signal processing algorithms;Image segmentation;Data collection;Detection algorithms;Europe;Signal processing;Road surface monitoring;crack detection;image processing;Single Pair Shortest Path;DICE similarity coefficient;performance assessment;pseudo-ground truth},\n  doi = {10.23919/EUSIPCO.2017.8081564},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347183.pdf},\n}\n\n
\n
\n\n\n
\n The performance assessment of automatic crack detection algorithms within pavement images requires beforehand to establish a reference image, namely, the pseudo-ground truth image (PGT). In this context, this paper presents some existing pseudo-ground truth (PGT) data collection techniques which rely on image processing techniques. The processing of five Single Pair Shortest Path (SPSP) algorithms which are devoted to this aim are illustrated in terms of running time and segmentation accuracy on a pavement image.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Road surface crack detection: Improved segmentation with pixel-based refinement.\n \n \n \n \n\n\n \n Oliveira, H.; and Correia, P. L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2026-2030, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RoadPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081565,\n  author = {H. Oliveira and P. L. Correia},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Road surface crack detection: Improved segmentation with pixel-based refinement},\n  year = {2017},\n  pages = {2026-2030},\n  abstract = {Cracks are among the most commonly found road surface degradations, requiring periodical road surveys for monitoring pavement quality. Images of road pavement surface can be automatically processed, typically employing segmentation algorithms to identify cracks. However, a set of distinct connected components often result, leading to the detection of several independent crack segments, although they may belong to the same pavement surface defect. This is often observed for cracks that exhibit a longer linear development or present several branches. This paper presents a new strategy to identify cracks on images captured during road pavement surveys, even when those cracks appear with a complex shape. The proposed crack segmentation algorithm includes two stages: (i) selection of prominent {"}crack seeds{"}, adopting an efficient segmentation procedure, after appropriate image smoothing, minimizing the detection of false positives; (ii) iterative binary pixel classification, into the crack or non-crack classes, extending the {"}seeds{"} to identify the complete crack shape. The paper also tests the combination of the proposed two stage crack segmentation with three smoothing techniques, to evaluate their suitability for crack detection. As a final step the system classifies the identified cracks as longitudinal, transversal or miscellaneous types. Tests performed with images acquired from different types of sensors (active and non-active), show improved crack segmentation results.},\n  keywords = {crack detection;geotechnical engineering;geotechnical structures;image classification;image segmentation;road building;roads;structural engineering computing;surface cracks;road pavement surveys;crack segmentation algorithm;road surface crack detection;periodical road surveys;road pavement surface;segmentation algorithms;independent crack segments;pavement surface defect;crack shape;crack segmentation;pixel-based refinement;road surface degradations;pavement quality monitoring;Image segmentation;Roads;Signal processing algorithms;Surface cracks;Surface treatment;Surface morphology;Filtering;Crack detection;Road surface;Segmentation;Pattern Recognition;Image Processing},\n  doi = {10.23919/EUSIPCO.2017.8081565},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347333.pdf},\n}\n\n
\n
\n\n\n
\n Cracks are among the most commonly found road surface degradations, requiring periodical road surveys for monitoring pavement quality. Images of road pavement surface can be automatically processed, typically employing segmentation algorithms to identify cracks. However, a set of distinct connected components often result, leading to the detection of several independent crack segments, although they may belong to the same pavement surface defect. This is often observed for cracks that exhibit a longer linear development or present several branches. This paper presents a new strategy to identify cracks on images captured during road pavement surveys, even when those cracks appear with a complex shape. The proposed crack segmentation algorithm includes two stages: (i) selection of prominent \"crack seeds\", adopting an efficient segmentation procedure, after appropriate image smoothing, minimizing the detection of false positives; (ii) iterative binary pixel classification, into the crack or non-crack classes, extending the \"seeds\" to identify the complete crack shape. The paper also tests the combination of the proposed two stage crack segmentation with three smoothing techniques, to evaluate their suitability for crack detection. As a final step the system classifies the identified cracks as longitudinal, transversal or miscellaneous types. Tests performed with images acquired from different types of sensors (active and non-active), show improved crack segmentation results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Challenges and lessons from the successful implementation of automated road condition surveys on a large highway system.\n \n \n \n \n\n\n \n Tsai, Y. J.; Chatterjee, A.; and Jiang, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2031-2035, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ChallengesPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081566,\n  author = {Y. J. Tsai and A. Chatterjee and C. Jiang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Challenges and lessons from the successful implementation of automated road condition surveys on a large highway system},\n  year = {2017},\n  pages = {2031-2035},\n  abstract = {Signal processing based automated road condition surveys (ARCS) system are the solution for the current unsafe, subjective and labor-intensive manual road condition surveys. Although extensive research has been conducted on methods for ARCS, application by transportation agencies is still minimal. In 2016, an ARCS system, developed by Georgia Tech, was successfully implemented on a 4,184km highway system in Georgia, USA. This paper presents the insights gained from the project and also discusses the remaining challenges with a focus on crack detection and classification. Crack fundamental elements were implemented to obtain a flexible multi-scale output. A combination of ARCS and QA/QC tools were used to obtain high accuracy results while minimizing human effort. Gaps in ARCS research, such as the lack of a crack detection algorithm performance measure were revealed. The solutions and new challenges revealed from this study will help ARCS researchers to create solutions which can be readily applied by transportation agencies.},\n  keywords = {civil engineering computing;condition monitoring;crack detection;cracks;object detection;quality control;roads;signal processing;transportation;highway system;signal processing;automated road condition surveys system;transportation agencies;ARCS system;Georgia Tech;crack detection;crack fundamental elements;ARCS research;manual road condition surveys;size 4.184 km;Roads;Detection algorithms;Protocols;Sensors;Data visualization;Cameras;Tools},\n  doi = {10.23919/EUSIPCO.2017.8081566},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347491.pdf},\n}\n\n
\n
\n\n\n
\n Signal processing based automated road condition surveys (ARCS) system are the solution for the current unsafe, subjective and labor-intensive manual road condition surveys. Although extensive research has been conducted on methods for ARCS, application by transportation agencies is still minimal. In 2016, an ARCS system, developed by Georgia Tech, was successfully implemented on a 4,184km highway system in Georgia, USA. This paper presents the insights gained from the project and also discusses the remaining challenges with a focus on crack detection and classification. Crack fundamental elements were implemented to obtain a flexible multi-scale output. A combination of ARCS and QA/QC tools were used to obtain high accuracy results while minimizing human effort. Gaps in ARCS research, such as the lack of a crack detection algorithm performance measure were revealed. The solutions and new challenges revealed from this study will help ARCS researchers to create solutions which can be readily applied by transportation agencies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D laser imaging and sparse points grouping for pavement crack detection.\n \n \n \n \n\n\n \n Li, Q.; Zhang, D.; Zou, Q.; and Lin, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2036-2040, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081567,\n  author = {Q. Li and D. Zhang and Q. Zou and H. Lin},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {3D laser imaging and sparse points grouping for pavement crack detection},\n  year = {2017},\n  pages = {2036-2040},\n  abstract = {Traditional optical imaging has limitations in capturing and representing pavement cracks due to the impact of illumination variations and cast shadows. In this work, laser-imaging techniques are employed to model the pavement surface with dense 3D points, and a sparse points grouping method is proposed to detect cracks from the 3D point clouds. Firstly, an algorithm based on frequency analysis is presented to separate potential cracks from the control profile and material texture of the pavement. Secondly, range images generated from point clouds are partitioned into image patches, and a learning algorithm is used to identify image patches probably containing cracks. Thirdly, the extracted patches are further filtered by checking the consistency of potential crack directions. Finally, edge weights are assigned to crack seed pairs by referring to the Gestalt law, and minimum spanning tree based algorithms are developed to extract the final cracks. Extensive experiments demonstrate the effective of the proposed method.},\n  keywords = {crack detection;learning (artificial intelligence);road building;stereo image processing;structural engineering computing;trees (mathematics);pavement crack detection;laser-imaging techniques;frequency analysis;image patches;learning algorithm;extracted patches;optical imaging;spanning tree based algorithms;3D laser imaging techniques;sparse points grouping method;3D point clouds;Gestalt law;Three-dimensional displays;Roads;Surface cracks;Surface emitting lasers;Optical imaging},\n  doi = {10.23919/EUSIPCO.2017.8081567},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341755.pdf},\n}\n\n
\n
\n\n\n
\n Traditional optical imaging has limitations in capturing and representing pavement cracks due to the impact of illumination variations and cast shadows. In this work, laser-imaging techniques are employed to model the pavement surface with dense 3D points, and a sparse points grouping method is proposed to detect cracks from the 3D point clouds. Firstly, an algorithm based on frequency analysis is presented to separate potential cracks from the control profile and material texture of the pavement. Secondly, range images generated from point clouds are partitioned into image patches, and a learning algorithm is used to identify image patches probably containing cracks. Thirdly, the extracted patches are further filtered by checking the consistency of potential crack directions. Finally, edge weights are assigned to crack seed pairs by referring to the Gestalt law, and minimum spanning tree based algorithms are developed to extract the final cracks. Extensive experiments demonstrate the effective of the proposed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pairwise Markov models for stock index forecasting.\n \n \n \n \n\n\n \n Gorynin, I.; Monfrini, E.; and Pieczynski, W.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2041-2045, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PairwisePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081568,\n  author = {I. Gorynin and E. Monfrini and W. Pieczynski},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Pairwise Markov models for stock index forecasting},\n  year = {2017},\n  pages = {2041-2045},\n  abstract = {Common well-known properties of time series of financial asset values include volatility clustering and asymmetric volatility phenomenon. Hidden Markov models (HMMs) have been proposed for modeling these characteristics, however, due to their simplicity, HMMs may lack two important features. We identify these features and propose modeling financial time series by recent Pairwise Markov models (PMMs) with a finite discrete state space. PMMs are extended versions of HMMs and allow a more flexible modeling. A real-world application example demonstrates substantial gains of PMMs compared to the HMMs.},\n  keywords = {asset management;economic forecasting;financial management;hidden Markov models;stock markets;time series;Hidden Markov models;PMMs;finite discrete state space;flexible modeling;stock index forecasting;financial asset values;volatility clustering;asymmetric volatility phenomenon;financial time series;HMM;pairwise Markov models;Hidden Markov models;Markov processes;Forecasting;Time series analysis;Mathematical model;Probability distribution;Signal processing algorithms;Hidden Markov models;Forecasting;Financial time series;Pairwise Markov models;Technical analysis},\n  doi = {10.23919/EUSIPCO.2017.8081568},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347653.pdf},\n}\n\n
\n
\n\n\n
\n Common well-known properties of time series of financial asset values include volatility clustering and asymmetric volatility phenomenon. Hidden Markov models (HMMs) have been proposed for modeling these characteristics, however, due to their simplicity, HMMs may lack two important features. We identify these features and propose modeling financial time series by recent Pairwise Markov models (PMMs) with a finite discrete state space. PMMs are extended versions of HMMs and allow a more flexible modeling. A real-world application example demonstrates substantial gains of PMMs compared to the HMMs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed object tracking based on information weighted UAV selection with priory objects.\n \n \n \n \n\n\n \n Bhuvana, V. P.; and Tonello, A. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2046-2050, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081569,\n  author = {V. P. Bhuvana and A. M. Tonello},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed object tracking based on information weighted UAV selection with priory objects},\n  year = {2017},\n  pages = {2046-2050},\n  abstract = {In this work, we propose a distributed cubature information filter based multi-object tracking method with an information weighted selection for unmanned aerial vehicle (UAV) networks. In an UAV network, multiple UAVs can observe multiple objects in the region of interest. Further, the UAVs can exchange the objects local information among themselves and fuse them together to obtain the global state of the objects. As the number of UAVs in the network increases, the information exchange among the UAVs suffers from scalability, bandwidth and energy limitations. Thus, it is usually desirable to allow only a desired number of UAVs with highly relevant information to participate in the information exchange. In our approach, the innovation vector within the information filtering framework is used to calculate the amount of information associated with each UAV. Further, a threshold based selection mechanism is proposed to facilitate the UAVs to take independent decisions on whether to participate in the information exchange or not. In the proposed method, the UAVs take the decision to participate in the information exchange based on the information associated with a dynamic subset of objects known as priory objects while keeping the total number of information exchanges in the network to a desired number (on average).},\n  keywords = {autonomous aerial vehicles;filtering theory;information filtering;object detection;object tracking;robot vision;target tracking;threshold based selection mechanism;region of interest;object local information;information filtering framework;information exchange;multiple objects;multiple UAVs;UAV network;unmanned aerial vehicle networks;information weighted selection;multiobject tracking method;distributed cubature information filter;priory objects;information weighted UAV selection;distributed object tracking;Technological innovation;Unmanned aerial vehicles;Covariance matrices;Object tracking;Information exchange;Silicon;Time measurement},\n  doi = {10.23919/EUSIPCO.2017.8081569},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347254.pdf},\n}\n\n
\n
\n\n\n
\n In this work, we propose a distributed cubature information filter based multi-object tracking method with an information weighted selection for unmanned aerial vehicle (UAV) networks. In an UAV network, multiple UAVs can observe multiple objects in the region of interest. Further, the UAVs can exchange the objects local information among themselves and fuse them together to obtain the global state of the objects. As the number of UAVs in the network increases, the information exchange among the UAVs suffers from scalability, bandwidth and energy limitations. Thus, it is usually desirable to allow only a desired number of UAVs with highly relevant information to participate in the information exchange. In our approach, the innovation vector within the information filtering framework is used to calculate the amount of information associated with each UAV. Further, a threshold based selection mechanism is proposed to facilitate the UAVs to take independent decisions on whether to participate in the information exchange or not. In the proposed method, the UAVs take the decision to participate in the information exchange based on the information associated with a dynamic subset of objects known as priory objects while keeping the total number of information exchanges in the network to a desired number (on average).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust distributed sequential detection via robust estimation.\n \n \n \n \n\n\n \n Hou, W.; Leonard, M. R.; and Zoubir, A. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2051-2055, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081570,\n  author = {W. Hou and M. R. Leonard and A. M. Zoubir},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust distributed sequential detection via robust estimation},\n  year = {2017},\n  pages = {2051-2055},\n  abstract = {We study the problem of sequential binary hypothesis testing in a distributed multi-sensor network in non-Gaussian noise. To this end, we develop three robust extensions of the Consensus+Innovations Sequential Probability Ratio Test (CISPRT), namely, the Median-CISPRT, the M-CISPRT, and the Myriad-CISPRT, and validate their performance in a shift-in-mean as well as a change-in-variance test. Simulations show the superiority of the proposed algorithms over the alternative R-CISPRT.},\n  keywords = {Gaussian noise;probability;sensor fusion;sequential estimation;signal detection;statistical testing;wireless sensor networks;change-in-variance test;robust estimation;sequential binary hypothesis testing;nonGaussian noise;robust extensions;M-CISPRT;median-CISPRT;myriad-CISPRT;consensus-innovations sequential probability ratio test;robust distributed sequential detection;distributed multisensor network;Robustness;Signal processing algorithms;Technological innovation;Error probability;Signal processing;Mathematical model;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081570},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346553.pdf},\n}\n\n
\n
\n\n\n
\n We study the problem of sequential binary hypothesis testing in a distributed multi-sensor network in non-Gaussian noise. To this end, we develop three robust extensions of the Consensus+Innovations Sequential Probability Ratio Test (CISPRT), namely, the Median-CISPRT, the M-CISPRT, and the Myriad-CISPRT, and validate their performance in a shift-in-mean as well as a change-in-variance test. Simulations show the superiority of the proposed algorithms over the alternative R-CISPRT.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Nonlinear model selection for PARMA processes using RJMCMC.\n \n \n \n \n\n\n \n Karakuş, O.; Kuruoğlu, E. E.; and Altinkaya, M. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2056-2060, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"NonlinearPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081571,\n  author = {O. Karakuş and E. E. Kuruoğlu and M. A. Altinkaya},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Nonlinear model selection for PARMA processes using RJMCMC},\n  year = {2017},\n  pages = {2056-2060},\n  abstract = {Many prediction studies using real life measurements such as wind speed, power, electricity load and rainfall utilize linear autoregressive moving average (ARMA) based models due to their simplicity and general character. However, most of the real life applications exhibit nonlinear character and modelling them with linear time series may become problematic. Among nonlinear ARMA models, polynomial ARMA (PARMA) models belong to the class of linear-in-the-parameters. In this paper, we propose a reversible jump Markov chain Monte Carlo (RJMCMC) based complete model estimation method which estimates PARMA models with all their parameters including the nonlinearity degree. The proposed method is unique in the manner of estimating the nonlinearity degree and all other model orders and model coefficients at the same time. Moreover, in this paper, RJMCMC has been examined in an anomalous way by performing transitions between linear and nonlinear model spaces.},\n  keywords = {autoregressive moving average processes;Markov processes;Monte Carlo methods;parameter estimation;time series;nonlinear model selection;PARMA processes;RJMCMC;wind speed;electricity load;life applications;nonlinear character;linear time series;nonlinear ARMA models;polynomial ARMA models;PARMA models;model coefficients;nonlinear model spaces;reversible jump Markov chain Monte Carlo based complete model estimation method;linear autoregressive moving average based models;Autoregressive processes;Estimation;Load modeling;Predictive models;Numerical models;Mathematical model;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081571},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343555.pdf},\n}\n\n
\n
\n\n\n
\n Many prediction studies using real life measurements such as wind speed, power, electricity load and rainfall utilize linear autoregressive moving average (ARMA) based models due to their simplicity and general character. However, most of the real life applications exhibit nonlinear character and modelling them with linear time series may become problematic. Among nonlinear ARMA models, polynomial ARMA (PARMA) models belong to the class of linear-in-the-parameters. In this paper, we propose a reversible jump Markov chain Monte Carlo (RJMCMC) based complete model estimation method which estimates PARMA models with all their parameters including the nonlinearity degree. The proposed method is unique in the manner of estimating the nonlinearity degree and all other model orders and model coefficients at the same time. Moreover, in this paper, RJMCMC has been examined in an anomalous way by performing transitions between linear and nonlinear model spaces.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Measure-transformed Gaussian quasi score test.\n \n \n \n \n\n\n \n Todros, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2061-2065, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Measure-transformedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081572,\n  author = {K. Todros},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Measure-transformed Gaussian quasi score test},\n  year = {2017},\n  pages = {2061-2065},\n  abstract = {In this paper, we develop a robust generalization of the Gaussian quasi score test (GQST) for composite binary hypothesis testing. The proposed test, called measure-transformed GQST (MT-GQST), is based on a transformation applied to the probability distribution of the data. The considered transform is structured by a non-negative function, called MT-function, that weights the data points. By appropriate selection of the MT-function we show that, unlike the GQST, the proposed MT-GQST incorporates higher-order moments and can gain robustness to outliers. The MT-GQST is applied for testing the parameter of a non-linear model. Simulation example illustrates its advantages as compared to the standard GQST and other robust detectors.},\n  keywords = {Gaussian distribution;higher order statistics;probability;signal detection;statistical distributions;statistical testing;MT-GQST;standard GQST;robust generalization;composite binary hypothesis testing;nonnegative function;measure-transformed Gaussian quasiscore test;data probability distribution;MT-function;higher-order moments;nonlinear model;robust detectors;Robustness;Testing;Covariance matrices;Transforms;Europe;Signal processing;Probability},\n  doi = {10.23919/EUSIPCO.2017.8081572},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341829.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we develop a robust generalization of the Gaussian quasi score test (GQST) for composite binary hypothesis testing. The proposed test, called measure-transformed GQST (MT-GQST), is based on a transformation applied to the probability distribution of the data. The considered transform is structured by a non-negative function, called MT-function, that weights the data points. By appropriate selection of the MT-function we show that, unlike the GQST, the proposed MT-GQST incorporates higher-order moments and can gain robustness to outliers. The MT-GQST is applied for testing the parameter of a non-linear model. Simulation example illustrates its advantages as compared to the standard GQST and other robust detectors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Blind parallel interrogation of ultrasonic neural dust motes based on canonical polyadic decomposition: A simulation study.\n \n \n \n \n\n\n \n Bertrand, A.; Seo, D.; Carmena, J. M.; Maharbiz, M. M.; Alon, E.; and Rabaey, J. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2066-2070, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BlindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081573,\n  author = {A. Bertrand and D. Seo and J. M. Carmena and M. M. Maharbiz and E. Alon and J. M. Rabaey},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Blind parallel interrogation of ultrasonic neural dust motes based on canonical polyadic decomposition: A simulation study},\n  year = {2017},\n  pages = {2066-2070},\n  abstract = {Neural dust (ND) is a wireless ultrasonic backscatter system for communicating with implanted sensor devices, referred to as ND motes (NDMs). Due to its scalability, ND could allow to chronically record electro-physiological signals in the brain cortex at a micro-scale pitch. The free-floating NDMs are read out by an array of ultrasonic (US) transducers through passive backscattering, by sequentially steering a US beam to the target NDM. In order to perform such beam steering, the NDM positions or the channels between the NDMs and the US transducers have to be estimated, which is a non-trivial task. Furthermore, such a sequential beam steering approach is too slow to sample a dense ND grid with a sufficiently high sampling rate. In this paper, we propose a new ND interrogation scheme which is fast enough to completely sample the entire ND grid, and which does not need any information on the NDM positions or the per-NDM channel characteristics. For each sample time, the US transducers transmit only a few grid-wide US beams to the entire ND grid, in which case the reflected beams will consist of mixtures of multiple NDM signals. We arrange the demodulated backscattered signals in a 3-way tensor, and then use a canonical polyadic decomposition (CPD) to blindly estimate the neural signals from each underlying NDM. Based on a validated simulation model, we demonstrate that this new CPD-based interrogation scheme allows to reconstruct the neural signals from the entire ND grid with a sufficiently high accuracy, even at relatively low SNR regimes.},\n  keywords = {array signal processing;beam steering;brain;medical signal processing;tensors;canonical polyadic decomposition;neural signals;blind parallel interrogation;ultrasonic neural dust motes;wireless ultrasonic backscatter system;implanted sensor devices;electro-physiological signals;passive backscattering;NDM;sequential beam steering;ultrasonic transducers;US;Tensile stress;Transducers;Backscatter;Brain modeling;Signal processing;Acoustics;Data models},\n  doi = {10.23919/EUSIPCO.2017.8081573},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342128.pdf},\n}\n\n
\n
\n\n\n
\n Neural dust (ND) is a wireless ultrasonic backscatter system for communicating with implanted sensor devices, referred to as ND motes (NDMs). Due to its scalability, ND could allow to chronically record electro-physiological signals in the brain cortex at a micro-scale pitch. The free-floating NDMs are read out by an array of ultrasonic (US) transducers through passive backscattering, by sequentially steering a US beam to the target NDM. In order to perform such beam steering, the NDM positions or the channels between the NDMs and the US transducers have to be estimated, which is a non-trivial task. Furthermore, such a sequential beam steering approach is too slow to sample a dense ND grid with a sufficiently high sampling rate. In this paper, we propose a new ND interrogation scheme which is fast enough to completely sample the entire ND grid, and which does not need any information on the NDM positions or the per-NDM channel characteristics. For each sample time, the US transducers transmit only a few grid-wide US beams to the entire ND grid, in which case the reflected beams will consist of mixtures of multiple NDM signals. We arrange the demodulated backscattered signals in a 3-way tensor, and then use a canonical polyadic decomposition (CPD) to blindly estimate the neural signals from each underlying NDM. Based on a validated simulation model, we demonstrate that this new CPD-based interrogation scheme allows to reconstruct the neural signals from the entire ND grid with a sufficiently high accuracy, even at relatively low SNR regimes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Error correction output codding coupled with the CSP for motor imagery BCI systems.\n \n \n \n\n\n \n Shahtalebi, S.; and Mohammadi, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2071-2075, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081574,\n  author = {S. Shahtalebi and A. Mohammadi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Error correction output codding coupled with the CSP for motor imagery BCI systems},\n  year = {2017},\n  pages = {2071-2075},\n  abstract = {Motivated by the fact that modeling and representation of multi-class signal patterns plays a critical role in Electroencephalogram (EEG)-based brain computer interface (BCI) systems, the paper proposes the coupling of error correction output coding (ECOC) with the common spatial pattern (CSP) analysis. Referred to as the ECO-CSP framework, the ECOC approach is applied to EEG motor imagery classification problem. A BCI system designed to operate in real world conditions, must be able to discriminate multiple tasks and activities. This fact, expresses the urge to develop/implement classifiers intrinsically designed for multi-class problems. One of such techniques which is well regarded in other fields but has not yet been applied to EEG-based classification is the ECOC. The paper addresses this gap. The BCI Competition IV-2a dataset is used to evaluate the performance of the proposed ECO-CSP framework. Our results show that ECO-CSP achieve similar performance in comparison to the state-of-the-art algorithms but is extensively simpler with significantly less computational overhead making it a practical alternative for real-time EEG motor imagery classification tasks.},\n  keywords = {brain-computer interfaces;electroencephalography;error correction codes;medical signal processing;signal classification;brain computer interface systems;error correction output coding;common spatial pattern analysis;ECO-CSP framework;ECOC approach;EEG motor imagery classification problem;multiclass problems;BCI Competition IV-2a dataset;EEG motor imagery classification tasks;motor imagery BCI systems;multiclass signal patterns;electroencephalogram;Electroencephalography;Feature extraction;Covariance matrices;Brain;Encoding;Signal processing;Brain-computer interface (BCI);Common spatial patterns;Electroencephalogram (EEG);Error correction output coding;Motor Imagery},\n  doi = {10.23919/EUSIPCO.2017.8081574},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Motivated by the fact that modeling and representation of multi-class signal patterns plays a critical role in Electroencephalogram (EEG)-based brain computer interface (BCI) systems, the paper proposes the coupling of error correction output coding (ECOC) with the common spatial pattern (CSP) analysis. Referred to as the ECO-CSP framework, the ECOC approach is applied to EEG motor imagery classification problem. A BCI system designed to operate in real world conditions, must be able to discriminate multiple tasks and activities. This fact, expresses the urge to develop/implement classifiers intrinsically designed for multi-class problems. One of such techniques which is well regarded in other fields but has not yet been applied to EEG-based classification is the ECOC. The paper addresses this gap. The BCI Competition IV-2a dataset is used to evaluate the performance of the proposed ECO-CSP framework. Our results show that ECO-CSP achieve similar performance in comparison to the state-of-the-art algorithms but is extensively simpler with significantly less computational overhead making it a practical alternative for real-time EEG motor imagery classification tasks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Finger and forehead PPG signal comparison for respiratory rate estimation based on pulse amplitude variability.\n \n \n \n \n\n\n \n Hernando, A.; Peláez, M. D.; Lozano, M. T.; Aiger, M.; Gil, E.; and Lázaro, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2076-2080, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"FingerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081575,\n  author = {A. Hernando and M. D. Peláez and M. T. Lozano and M. Aiger and E. Gil and J. Lázaro},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Finger and forehead PPG signal comparison for respiratory rate estimation based on pulse amplitude variability},\n  year = {2017},\n  pages = {2076-2080},\n  abstract = {Pulse photopletysmographic signal (PPG) is modulated by the respiratory rate, so there are some algorithms capable to extract respiratory information from the derived PPG signals, as the Pulse Amplitude Variability (PAV). Previous works have shown that the use of the PPG leads to different results depending on the PPG sensor location (finger and forehead). Therefore, a database recording finger and forehead PPG signals and respiration is done, breathing with fixed frequencies. Results show that while finger PAV signal works correctly, forehead PAV signal has a non respiratory component that do not allow to properly estimate the respiratory rate.},\n  keywords = {medical signal processing;patient monitoring;photoplethysmography;pneumodynamics;pulse amplitude variability;database recording finger;finger PAV signal;forehead PAV signal;nonrespiratory component;respiratory rate estimation;PPG signals;pulse photopletysmographic signal;PPG sensor;PPG sensor;Forehead;Estimation;Fingers;Data mining;Europe;Signal processing;Signal processing algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081575},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347442.pdf},\n}\n\n
\n
\n\n\n
\n Pulse photopletysmographic signal (PPG) is modulated by the respiratory rate, so there are some algorithms capable to extract respiratory information from the derived PPG signals, as the Pulse Amplitude Variability (PAV). Previous works have shown that the use of the PPG leads to different results depending on the PPG sensor location (finger and forehead). Therefore, a database recording finger and forehead PPG signals and respiration is done, breathing with fixed frequencies. Results show that while finger PAV signal works correctly, forehead PAV signal has a non respiratory component that do not allow to properly estimate the respiratory rate.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n PARAFAC2 and its block term decomposition analog for blind fMRI source unmixing.\n \n \n \n \n\n\n \n Chatzichristos, C.; Kofidis, E.; and Theodoridis, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2081-2085, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PARAFAC2Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081576,\n  author = {C. Chatzichristos and E. Kofidis and S. Theodoridis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {PARAFAC2 and its block term decomposition analog for blind fMRI source unmixing},\n  year = {2017},\n  pages = {2081-2085},\n  abstract = {Tensor-based analysis of brain imaging data, in particular functional Magnetic Resonance Imaging (fMRI), has proved to be quite effective in exploiting their inherently multidimensional nature. It commonly relies on a trilinear model generating the analyzed data. This assumption, however, may prove to be quite strict in practice; for example, due to the natural intra-subject and inter-subject variability of the Haemodynamic Response Function (HRF). This paper investigates the possible gains from the adoption of a less strict trilinear model, such as PARAFAC2, which allows a more flexible representation of the fMRI data in the temporal domain. In this context, and inspired by a recently reported successful application of the Block Term Decomposition (BTD) model to a 4-way tensorization of the fMRI signal, a PARAFAC2-like extension of BTD (called here BTD2) is proposed. Simulation results are presented, that reveal the pros and cons of these tensorial methods, demonstrating BTD2's enhanced robustness to noise.},\n  keywords = {biomedical MRI;blind source separation;brain;haemodynamics;medical image processing;neurophysiology;physiological models;tensors;block term decomposition analog;blind fMRI source unmixing;brain imaging data;PARAFAC2-like extension;functional magnetic resonance imaging;intrasubject variability;intersubject variability;haemodynamic response function;block term decomposition model;tensor-based analysis;four-way tensorization;trilinear model;tensorial methods;Tensile stress;Matrix decomposition;Brain modeling;Robustness;Data models},\n  doi = {10.23919/EUSIPCO.2017.8081576},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347308.pdf},\n}\n\n
\n
\n\n\n
\n Tensor-based analysis of brain imaging data, in particular functional Magnetic Resonance Imaging (fMRI), has proved to be quite effective in exploiting their inherently multidimensional nature. It commonly relies on a trilinear model generating the analyzed data. This assumption, however, may prove to be quite strict in practice; for example, due to the natural intra-subject and inter-subject variability of the Haemodynamic Response Function (HRF). This paper investigates the possible gains from the adoption of a less strict trilinear model, such as PARAFAC2, which allows a more flexible representation of the fMRI data in the temporal domain. In this context, and inspired by a recently reported successful application of the Block Term Decomposition (BTD) model to a 4-way tensorization of the fMRI signal, a PARAFAC2-like extension of BTD (called here BTD2) is proposed. Simulation results are presented, that reveal the pros and cons of these tensorial methods, demonstrating BTD2's enhanced robustness to noise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimation of the blurring kernel in experimental HR-pQCT images based on mutual information.\n \n \n \n \n\n\n \n Li, Y.; Sixou, B.; and Peyrin, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2086-2090, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"EstimationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081577,\n  author = {Y. Li and B. Sixou and F. Peyrin},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Estimation of the blurring kernel in experimental HR-pQCT images based on mutual information},\n  year = {2017},\n  pages = {2086-2090},\n  abstract = {The analysis of trabecular bone micro structure from in-vivo CT images is still limited due to limited spatial resolution even with the new High Resolution peripheral Quantitative CT (HR-pQCT) scanners. In previous works, it has been proposed to exploit super resolution techniques to improve spatial resolution. However, the application of such methods requires to know the blurring kernel, which is challenging for experimental HR-pQCT images. The goal of this work is to determine the blurring kernel of these scanners in order to facilitate an increase of the resolution of the bone images and of the segmentation of the bone structures. To this aim, we propose a method based on mutual information and compare it with classical ¿2-norm minimization methods.},\n  keywords = {bone;computerised tomography;image resolution;image segmentation;medical image processing;super resolution techniques;blurring kernel estimation;high resolution peripheral quantitative CT scanners;spatial resolution techniques;trabecular bone microstructure segmentation;Kernel;Signal resolution;Spatial resolution;Bones;Computed tomography;TV;Deconvolution;super-resolution;Total Variation;3D CT images;bone micro-architecture},\n  doi = {10.23919/EUSIPCO.2017.8081577},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570344934.pdf},\n}\n\n
\n
\n\n\n
\n The analysis of trabecular bone micro structure from in-vivo CT images is still limited due to limited spatial resolution even with the new High Resolution peripheral Quantitative CT (HR-pQCT) scanners. In previous works, it has been proposed to exploit super resolution techniques to improve spatial resolution. However, the application of such methods requires to know the blurring kernel, which is challenging for experimental HR-pQCT images. The goal of this work is to determine the blurring kernel of these scanners in order to facilitate an increase of the resolution of the bone images and of the segmentation of the bone structures. To this aim, we propose a method based on mutual information and compare it with classical ¿2-norm minimization methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unveiling bias compensation in turbo-based algorithms for (discrete) compressed sensing.\n \n \n \n \n\n\n \n Sparrer, S.; and Fischer, R. F. H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2091-2095, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"UnveilingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081578,\n  author = {S. Sparrer and R. F. H. Fischer},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Unveiling bias compensation in turbo-based algorithms for (discrete) compressed sensing},\n  year = {2017},\n  pages = {2091-2095},\n  abstract = {In Compressed Sensing, a real-valued sparse vector has to be recovered from an underdetermined system of linear equations. In many applications, however, the elements of the sparse vector are drawn from a finite set. Adapted algorithms incorporating this additional knowledge are required for the discrete-valued setup. In this paper, turbo-based algorithms for both cases are elucidated and analyzed from a communications engineering perspective, leading to a deeper understanding of the algorithm. In particular, we gain the intriguing insight that the calculation of extrinsic values is equal to the unbiasing of a biased estimate, and present an improved algorithm.},\n  keywords = {compressed sensing;turbo codes;vectors;real-valued sparse vector;underdetermined system;linear equations;finite set;discrete-valued setup;communications engineering perspective;bias compensation;compressed sensing;turbo-based algorithms;Signal processing algorithms;Estimation;Approximation algorithms;Sparse matrices;Compressed sensing;Matching pursuit algorithms;Decoding},\n  doi = {10.23919/EUSIPCO.2017.8081578},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346908.pdf},\n}\n\n
\n
\n\n\n
\n In Compressed Sensing, a real-valued sparse vector has to be recovered from an underdetermined system of linear equations. In many applications, however, the elements of the sparse vector are drawn from a finite set. Adapted algorithms incorporating this additional knowledge are required for the discrete-valued setup. In this paper, turbo-based algorithms for both cases are elucidated and analyzed from a communications engineering perspective, leading to a deeper understanding of the algorithm. In particular, we gain the intriguing insight that the calculation of extrinsic values is equal to the unbiasing of a biased estimate, and present an improved algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A gridless sparse method for super-resolution of harmonics.\n \n \n \n \n\n\n \n Yang, Z.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2096-2100, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081579,\n  author = {Z. Yang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A gridless sparse method for super-resolution of harmonics},\n  year = {2017},\n  pages = {2096-2100},\n  abstract = {As a special frequency estimation problem, harmonics estimation has applications in speech and audio processing, power systems, healthcare monitoring, etc. In this paper, we make a first attempt to propose a gridless sparse method for harmonics estimation exploiting the harmonics structure. The method uses the atomic norm with carefully designed atoms and is formulated as a convex optimization problem. Its performance is demonstrated via numerical simulations.},\n  keywords = {convex programming;frequency estimation;numerical analysis;signal resolution;spectral analysis;numerical simulations;special frequency estimation problem;super-resolution;convex optimization problem;harmonics structure;harmonics estimation;gridless sparse method;Harmonic analysis;Power system harmonics;Estimation;Frequency estimation;Standards;Atomic measurements;Signal resolution;Harmonics estimation;frequency estimation;atomic norm;group sparsity;gridless sparse method},\n  doi = {10.23919/EUSIPCO.2017.8081579},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347391.pdf},\n}\n\n
\n
\n\n\n
\n As a special frequency estimation problem, harmonics estimation has applications in speech and audio processing, power systems, healthcare monitoring, etc. In this paper, we make a first attempt to propose a gridless sparse method for harmonics estimation exploiting the harmonics structure. The method uses the atomic norm with carefully designed atoms and is formulated as a convex optimization problem. Its performance is demonstrated via numerical simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online group-sparse estimation using the covariance fitting criterion.\n \n \n \n \n\n\n \n Kronvall, T.; Adalbjornsson, S. I.; Nadig, S.; and Jakobsson, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2101-2105, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081580,\n  author = {T. Kronvall and S. I. Adalbjornsson and S. Nadig and A. Jakobsson},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Online group-sparse estimation using the covariance fitting criterion},\n  year = {2017},\n  pages = {2101-2105},\n  abstract = {In this paper, we present a time-recursive implementation of a recent hyperparameter-free group-sparse estimation technique. This is achieved by reformulating the original method, termed group-SPICE, as a square-root group-LASSO with a suitable regularization level, for which a time-recursive implementation is derived. Using a proximal gradient step for lowering the computational cost, the proposed method may effectively cope with data sequences consisting of both stationary and non-stationary signals, such as transients, and/or amplitude modulated signals. Numerical examples illustrates the efficacy of the proposed method for both coherent Gaussian dictionaries and for the multi-pitch estimation problem.},\n  keywords = {covariance analysis;gradient methods;recursive estimation;signal processing;multipitch estimation problem;online group-sparse estimation;time-recursive implementation;square-root group-LASSO;proximal gradient step;nonstationary signals;hyperparameter-free group-sparse estimation technique;group-SPICE;regularization level;data sequences;stationary signals;coherent Gaussian dictionaries;Estimation;Dictionaries;SPICE;Europe;Signal processing;Recursive estimation;Signal processing algorithms;Online estimation;covariance fitting;group sparsity;multi-pitch estimation},\n  doi = {10.23919/EUSIPCO.2017.8081580},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347373.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we present a time-recursive implementation of a recent hyperparameter-free group-sparse estimation technique. This is achieved by reformulating the original method, termed group-SPICE, as a square-root group-LASSO with a suitable regularization level, for which a time-recursive implementation is derived. Using a proximal gradient step for lowering the computational cost, the proposed method may effectively cope with data sequences consisting of both stationary and non-stationary signals, such as transients, and/or amplitude modulated signals. Numerical examples illustrates the efficacy of the proposed method for both coherent Gaussian dictionaries and for the multi-pitch estimation problem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wideband DoA estimation based on joint optimisation of array and spatial sparsity.\n \n \n \n \n\n\n \n Chen, M.; Wang, W.; Barnard, M.; and Chambers, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2106-2110, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"WidebandPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081581,\n  author = {M. Chen and W. Wang and M. Barnard and J. Chambers},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Wideband DoA estimation based on joint optimisation of array and spatial sparsity},\n  year = {2017},\n  pages = {2106-2110},\n  abstract = {We study the problem of wideband direction of arrival (DoA) estimation by joint optimisation of array and spatial sparsity. Two-step iterative process is proposed. In the first step, the wideband signal is reshaped and used as the input to derive the weight coefficients using a sparse array optimisation method. The weights are then used to scale the observed signal model for which a compressive sensing based spatial sparsity optimisation method is used for DoA estimation. Simulations are provided to demonstrate the performance of the proposed method for both stationary and moving sources.},\n  keywords = {array signal processing;compressed sensing;direction-of-arrival estimation;iterative methods;optimisation;compressive sensing;joint array and spatial sparsity optimisation method;wideband direction of arrival estimation;observed signal model;weight coefficients;wideband signal;two-step iterative process;wideband direction;wideband DoA estimation;Sensor arrays;Direction-of-arrival estimation;Estimation;Wideband;Optimization;Array signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081581},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341208.pdf},\n}\n\n
\n
\n\n\n
\n We study the problem of wideband direction of arrival (DoA) estimation by joint optimisation of array and spatial sparsity. Two-step iterative process is proposed. In the first step, the wideband signal is reshaped and used as the input to derive the weight coefficients using a sparse array optimisation method. The weights are then used to scale the observed signal model for which a compressive sensing based spatial sparsity optimisation method is used for DoA estimation. Simulations are provided to demonstrate the performance of the proposed method for both stationary and moving sources.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Structurally random Fourier domain compressive sampling and frequency domain beamforming for ultrasound imaging.\n \n \n \n \n\n\n \n Foroozan, F.; Yousefi, R.; Sadeghi, P.; and Kolios, M. C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2111-2115, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"StructurallyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081582,\n  author = {F. Foroozan and R. Yousefi and P. Sadeghi and M. C. Kolios},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Structurally random Fourier domain compressive sampling and frequency domain beamforming for ultrasound imaging},\n  year = {2017},\n  pages = {2111-2115},\n  abstract = {Advances in ultrasound technology have fueled the emergence of Point-Of-Care Ultrasound (PoCU) imaging, including improved ease-of-use, superior image quality, and lower cost ultrasound. One of the approaches that can make the adoption of PoCU universal is to make the data acquisition module as simple as a {"}stethoscope{"} while further processing and image construction can be done using cloud-based processors. Toward this goal, we use Structurally Random Matrices (SRM) for compressive sensing of ultrasound data, Fourier sparsifying matrix for recovery in 1D, and frequency domain approach for 2D ultrasound image reconstruction. This approach is demonstrated through wire phantom and in vivo carotid arteries data from ultrasound system using 25%, 12.5%, and 6.25% of the full data rate and ultrasound images of similar perceived quality quantified by Structural Similarity Index Metric (SSIM).},\n  keywords = {biomedical ultrasonics;blood vessels;cloud computing;compressed sensing;data acquisition;image reconstruction;image sampling;medical image processing;phantoms;data acquisition module;compressive sensing;Fourier sparsifying matrix;2D ultrasound image reconstruction;in vivo carotid arteries data;structural similarity index metric;point-of-care ultrasound imaging;image quality;Fourier domain compressive sampling;frequency domain beamforming;stethoscope;cloud-based processors;structurally random matrices;wire phantom;Ultrasonic imaging;Sensors;Imaging;Frequency-domain analysis;Array signal processing;Image reconstruction;Sparse matrices;Compressive Sensing;Structurally Random Matrices;Beamforming;Ultrasound Imaging},\n  doi = {10.23919/EUSIPCO.2017.8081582},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347347.pdf},\n}\n\n
\n
\n\n\n
\n Advances in ultrasound technology have fueled the emergence of Point-Of-Care Ultrasound (PoCU) imaging, including improved ease-of-use, superior image quality, and lower cost ultrasound. One of the approaches that can make the adoption of PoCU universal is to make the data acquisition module as simple as a \"stethoscope\" while further processing and image construction can be done using cloud-based processors. Toward this goal, we use Structurally Random Matrices (SRM) for compressive sensing of ultrasound data, Fourier sparsifying matrix for recovery in 1D, and frequency domain approach for 2D ultrasound image reconstruction. This approach is demonstrated through wire phantom and in vivo carotid arteries data from ultrasound system using 25%, 12.5%, and 6.25% of the full data rate and ultrasound images of similar perceived quality quantified by Structural Similarity Index Metric (SSIM).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Time-data trade-off in the sparse Fourier transform.\n \n \n \n \n\n\n \n Aldharrab, A.; and Davies, M. E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2116-2120, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Time-dataPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081583,\n  author = {A. Aldharrab and M. E. Davies},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Time-data trade-off in the sparse Fourier transform},\n  year = {2017},\n  pages = {2116-2120},\n  abstract = {It has been shown that the Discrete Fourier Transform (DFT) can be computed in sublinear time from a sublinear number of samples when the target spectrum is sparse. However, this is usually only expressed qualitatively in terms of the order of number of computations/samples. Here we investigate the explicit time-data tradeoff for the Sparse Fourier Transform (SFT) algorithm proposed by Pawar and Ramchandran using coding theoretic tools. This leads to an optimal oversampling rate and algorithm configuration that minimises computation while keeping the required number of time domain samples close to the minimum value.},\n  keywords = {discrete Fourier transforms;signal sampling;optimal oversampling rate;time domain samples;time-data trade-off;DFT;target spectrum;discrete Fourier transform;sparse Fourier transform algorithm;SFT;Time-domain analysis;Complexity theory;Signal processing algorithms;Discrete Fourier transforms;Convergence;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081583},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347603.pdf},\n}\n\n
\n
\n\n\n
\n It has been shown that the Discrete Fourier Transform (DFT) can be computed in sublinear time from a sublinear number of samples when the target spectrum is sparse. However, this is usually only expressed qualitatively in terms of the order of number of computations/samples. Here we investigate the explicit time-data tradeoff for the Sparse Fourier Transform (SFT) algorithm proposed by Pawar and Ramchandran using coding theoretic tools. This leads to an optimal oversampling rate and algorithm configuration that minimises computation while keeping the required number of time domain samples close to the minimum value.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Code properties analysis for the implementation of a modulated wideband converter.\n \n \n \n \n\n\n \n Marnat, M.; Pelissier, M.; Michel, O.; and Ros, L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2121-2125, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CodePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081584,\n  author = {M. Marnat and M. Pelissier and O. Michel and L. Ros},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Code properties analysis for the implementation of a modulated wideband converter},\n  year = {2017},\n  pages = {2121-2125},\n  abstract = {This paper deals with the sub-Nyquist sampling of analog multiband signals. The Modulated Wideband Converter (MWC) is a promising compressive sensing architecture, foreseen to be able to break the usual compromise between bandwidth, noise figure and energy consumption of Analog-to-Digital Converters. The pseudorandom code sequences yielding the sensing matrix are yet the bottleneck of it. Our contributions are multifold: first, a proposal of a new Zadoff-Chu code based real-valued sensing matrix that satisfies cyclic properties and good spectral properties and increases robustness against noise. Second, a quasi systematic study of the influence of code families and of row selection is carried out on different criteria. Especially, the influence on the coherence, vital to limit the number of branches, is investigated. Additionally, an original approach that focuses on evaluating isometric properties is established. These measures are helpful since isometry is essential to noise robustness. Third, the relevance of previous high-level metrics is validated on various codes thanks to a simulation platform. Altogether this study delivers a methodology for a thorough comparison between usual compressive sensing matrices and new proposals.},\n  keywords = {analogue-digital conversion;compressed sensing;matrix algebra;pseudonoise codes;random sequences;signal sampling;cyclic properties;quasisystematic study;code families;isometric properties;noise robustness;codes thanks;code properties analysis;modulated wideband converter;sub-Nyquist sampling;analog multiband signals;noise figure;energy consumption;pseudorandom code sequences;sensing matrix;Zadoff-Chu code;spectral properties;analog-to-digital converters;compressive sensing matrices;Coherence;Sensors;Sparse matrices;Gold;Compressed sensing;Discrete Fourier transforms;Estimation},\n  doi = {10.23919/EUSIPCO.2017.8081584},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346502.pdf},\n}\n\n
\n
\n\n\n
\n This paper deals with the sub-Nyquist sampling of analog multiband signals. The Modulated Wideband Converter (MWC) is a promising compressive sensing architecture, foreseen to be able to break the usual compromise between bandwidth, noise figure and energy consumption of Analog-to-Digital Converters. The pseudorandom code sequences yielding the sensing matrix are yet the bottleneck of it. Our contributions are multifold: first, a proposal of a new Zadoff-Chu code based real-valued sensing matrix that satisfies cyclic properties and good spectral properties and increases robustness against noise. Second, a quasi systematic study of the influence of code families and of row selection is carried out on different criteria. Especially, the influence on the coherence, vital to limit the number of branches, is investigated. Additionally, an original approach that focuses on evaluating isometric properties is established. These measures are helpful since isometry is essential to noise robustness. Third, the relevance of previous high-level metrics is validated on various codes thanks to a simulation platform. Altogether this study delivers a methodology for a thorough comparison between usual compressive sensing matrices and new proposals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A two-term penalty function for inverse problems with sparsity constrains.\n \n \n \n \n\n\n \n Rodriguez, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2126-2130, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081585,\n  author = {P. Rodriguez},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A two-term penalty function for inverse problems with sparsity constrains},\n  year = {2017},\n  pages = {2126-2130},\n  abstract = {Inverse problems with sparsity constrains, such Basis Pursuit denoising (BPDN) and Convolutional BPDN (CBPDN), usually use the ℓ1-norm as the penalty function; however such choice leads to a solution that is biased towards zero. Recently, several works have proposed and assessed the properties of other non-standard penalty functions (most of them non-convex), which avoid the above mentioned drawback and at the same time are intended to induce sparsity more strongly than the ℓ1-norm. In this paper we propose a two-term penalty function consisting of a synthesis between the ℓ1-norm and the penalty function associated with the Non-Negative Garrote (NNG) thresholding rule. Although the proposed two-term penalty function is non-convex, the total cost function for the BPDN / CBPDN problems is still convex. The performance of the proposed two-term penalty function is compared with other reported choices for practical denoising, deconvolution and convolutional sparse coding (CSC) problems within the BPDN / CBPDN frameworks. Our experimental results show that the proposed two-term penalty function is particularly effective (better reconstruction with sparser solutions) for the CSC problem while attaining competitive performance for the denoising and deconvolution problems.},\n  keywords = {approximation theory;convex programming;image coding;image denoising;inverse problems;optimisation;signal denoising;signal reconstruction;two-term penalty function;inverse problems;sparsity constrains;nonstandard penalty functions;BPDN-CBPDN problems;convolutional sparse coding problems;convolutional basis pursuit denoising;Signal processing algorithms;Convergence;Noise reduction;Convolutional codes;Convolution;Dictionaries;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081585},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346716.pdf},\n}\n\n
\n
\n\n\n
\n Inverse problems with sparsity constrains, such Basis Pursuit denoising (BPDN) and Convolutional BPDN (CBPDN), usually use the ℓ1-norm as the penalty function; however such choice leads to a solution that is biased towards zero. Recently, several works have proposed and assessed the properties of other non-standard penalty functions (most of them non-convex), which avoid the above mentioned drawback and at the same time are intended to induce sparsity more strongly than the ℓ1-norm. In this paper we propose a two-term penalty function consisting of a synthesis between the ℓ1-norm and the penalty function associated with the Non-Negative Garrote (NNG) thresholding rule. Although the proposed two-term penalty function is non-convex, the total cost function for the BPDN / CBPDN problems is still convex. The performance of the proposed two-term penalty function is compared with other reported choices for practical denoising, deconvolution and convolutional sparse coding (CSC) problems within the BPDN / CBPDN frameworks. Our experimental results show that the proposed two-term penalty function is particularly effective (better reconstruction with sparser solutions) for the CSC problem while attaining competitive performance for the denoising and deconvolution problems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi denoising approximate message passing for optimal recovery with lower computational cost.\n \n \n \n \n\n\n \n Perelli, A.; and Davies, M. E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2131-2135, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MultiPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081586,\n  author = {A. Perelli and M. E. Davies},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi denoising approximate message passing for optimal recovery with lower computational cost},\n  year = {2017},\n  pages = {2131-2135},\n  abstract = {An emerging issue in large-scale inverse problems is constituted by the interdependency between computational and recovery performance; in particular in practical application, such as medical imaging, it is crucial to provide high quality estimates given bounds on computational time. While most work in this direction has gone down the lines of improving optimisation schemes, in this paper we are proposing and investigating a different approach based on a multi denoising approximate message passing (MultiD-AMP) framework for Compressive Sensing (CS) image reconstruction which exploits an hierarchy of denoisers by starting with a low fidelity model and then using the estimate as starting point for a higher fidelity models through an iterative reconstruction algorithm. MultiD-AMP achieves lower time complexity and same accuracy compared to using the same most accurate denoiser as in D-AMP. The novelty of our approach is based on exploiting the deterministic state evolution of AMP, which means the predictability of the recovery performances, to design a strategy for selecting the denoiser from a set ordered by both computational complexity and statistical efficiency. We apply the MultiD-AMP framework for image reconstruction given noisy Gaussian random linear measurements. Furthermore, we extend and show the applicability of MultiD-AMP for CS to image reconstruction.},\n  keywords = {approximation theory;compressed sensing;computational complexity;image denoising;image reconstruction;inverse problems;iterative methods;message passing;optimisation;optimal recovery;large-scale inverse problems;computational recovery performance;medical imaging;computational time;optimisation schemes;multidenoising approximate message passing;Compressive Sensing image reconstruction;CS;low fidelity model;higher fidelity models;iterative reconstruction algorithm;accurate denoiser;computational complexity;MultiD-AMP framework;time complexity;image reconstruction;noisy Gaussian random linear measurements;Switches;Noise reduction;Image reconstruction;Noise measurement;Training;Discrete wavelet transforms;Time complexity;Compressive Sensing;Approximate Message Passing;Denoising;Computational complexity},\n  doi = {10.23919/EUSIPCO.2017.8081586},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347684.pdf},\n}\n\n
\n
\n\n\n
\n An emerging issue in large-scale inverse problems is constituted by the interdependency between computational and recovery performance; in particular in practical application, such as medical imaging, it is crucial to provide high quality estimates given bounds on computational time. While most work in this direction has gone down the lines of improving optimisation schemes, in this paper we are proposing and investigating a different approach based on a multi denoising approximate message passing (MultiD-AMP) framework for Compressive Sensing (CS) image reconstruction which exploits an hierarchy of denoisers by starting with a low fidelity model and then using the estimate as starting point for a higher fidelity models through an iterative reconstruction algorithm. MultiD-AMP achieves lower time complexity and same accuracy compared to using the same most accurate denoiser as in D-AMP. The novelty of our approach is based on exploiting the deterministic state evolution of AMP, which means the predictability of the recovery performances, to design a strategy for selecting the denoiser from a set ordered by both computational complexity and statistical efficiency. We apply the MultiD-AMP framework for image reconstruction given noisy Gaussian random linear measurements. Furthermore, we extend and show the applicability of MultiD-AMP for CS to image reconstruction.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A minimax dictionary expansion for sparse continuous reconstruction.\n \n \n \n \n\n\n \n Passarin, T. A. R.; Pipa, D. R.; and Zibetti, M. V. W.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2136-2140, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081587,\n  author = {T. A. R. Passarin and D. R. Pipa and M. V. W. Zibetti},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A minimax dictionary expansion for sparse continuous reconstruction},\n  year = {2017},\n  pages = {2136-2140},\n  abstract = {The concept of dictionary expansion has been applied in inverse problems as a means to overcome a problem known as off-grid deviation. Within this framework and under the assumption that the off-grid deviations obey an uniform distribution, we propose a minimax error criterion to build expanded dictionaries. To this end, we formulate the problem as a polynomial regression and cast it as a second-order cone program. A robust method for the recovery of continuous time shifts and amplitudes from reconstructed expanded coefficients is also presented. Empirical results with a greedy algorithm and a convex optimization algorithm, both conceived to work with expanded dictionaries, show that the proposed expanded basis provides accurate reconstruction of continuous-time located events in the presence of noise.},\n  keywords = {convex programming;greedy algorithms;inverse problems;minimax techniques;optimisation;polynomials;regression analysis;robust method;continuous time shifts;reconstructed expanded coefficients;convex optimization algorithm;expanded dictionaries;continuous-time located events;minimax dictionary expansion;sparse continuous reconstruction;inverse problems;off-grid deviation;uniform distribution;minimax error criterion;polynomial regression;second-order cone program;Dictionaries;Manifolds;Europe;Signal processing;Delays;Adaptation models;Matching pursuit algorithms;Inverse problems;dictionary expansion;manifold;optimization;sparse reconstruction},\n  doi = {10.23919/EUSIPCO.2017.8081587},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342965.pdf},\n}\n\n
\n
\n\n\n
\n The concept of dictionary expansion has been applied in inverse problems as a means to overcome a problem known as off-grid deviation. Within this framework and under the assumption that the off-grid deviations obey an uniform distribution, we propose a minimax error criterion to build expanded dictionaries. To this end, we formulate the problem as a polynomial regression and cast it as a second-order cone program. A robust method for the recovery of continuous time shifts and amplitudes from reconstructed expanded coefficients is also presented. Empirical results with a greedy algorithm and a convex optimization algorithm, both conceived to work with expanded dictionaries, show that the proposed expanded basis provides accurate reconstruction of continuous-time located events in the presence of noise.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A new algorithm for training sparse autoencoders.\n \n \n \n \n\n\n \n Shamsabadi, A. S.; Babaie-Zadeh, M.; Seyyedsalehi, S. Z.; Rabiee, H. R.; and Jutten, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2141-2145, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081588,\n  author = {A. S. Shamsabadi and M. Babaie-Zadeh and S. Z. Seyyedsalehi and H. R. Rabiee and C. Jutten},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A new algorithm for training sparse autoencoders},\n  year = {2017},\n  pages = {2141-2145},\n  abstract = {Data representation plays an important role in performance of machine learning algorithms. Since data usually lacks the desired quality, many efforts have been made to provide a more desirable representation of data. Among many different approaches, sparse data representation has gained popularity in recent years. In this paper, we propose a new sparse autoencoder by imposing the power two of smoothed L0 norm of data representation on the hidden layer of regular autoencoder. The square of smoothed L0 norm increases the tendency that each data representation is {"}individually{"} sparse. Moreover, by using the proposed sparse autoencoder, once the model parameters are learned, the sparse representation of any new data is obtained simply by a matrix-vector multiplication without performing any optimization. When applied to the MNIST, CIFAR-10, and OPTDIGITS datasets, the results show that the proposed model guarantees a sparse representation for each input data which leads to better classification results.},\n  keywords = {data structures;learning (artificial intelligence);matrix multiplication;optimisation;pattern classification;vectors;machine learning algorithms;sparse data representation;sparse autoencoder;matrix-vector multiplication;optimization;Feature extraction;Optimization;Signal processing algorithms;Training;Sparse matrices;Decoding;Encoding},\n  doi = {10.23919/EUSIPCO.2017.8081588},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347253.pdf},\n}\n\n
\n
\n\n\n
\n Data representation plays an important role in performance of machine learning algorithms. Since data usually lacks the desired quality, many efforts have been made to provide a more desirable representation of data. Among many different approaches, sparse data representation has gained popularity in recent years. In this paper, we propose a new sparse autoencoder by imposing the power two of smoothed L0 norm of data representation on the hidden layer of regular autoencoder. The square of smoothed L0 norm increases the tendency that each data representation is \"individually\" sparse. Moreover, by using the proposed sparse autoencoder, once the model parameters are learned, the sparse representation of any new data is obtained simply by a matrix-vector multiplication without performing any optimization. When applied to the MNIST, CIFAR-10, and OPTDIGITS datasets, the results show that the proposed model guarantees a sparse representation for each input data which leads to better classification results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A generalization of weighted sparse decomposition to negative weights.\n \n \n \n \n\n\n \n Delfi, G.; Aziznejad, S.; Amani, S.; Babaie-Zadeh, M.; and Jutten, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2146-2150, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081589,\n  author = {G. Delfi and S. Aziznejad and S. Amani and M. Babaie-Zadeh and C. Jutten},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A generalization of weighted sparse decomposition to negative weights},\n  year = {2017},\n  pages = {2146-2150},\n  abstract = {Sparse solutions of underdetermined linear systems of equations are widely used in different fields of signal processing. This problem can also be seen as a sparse decomposition problem. Traditional sparse decomposition gives the same priority to all atoms for being included in the decomposition or not. However, in some applications, one may want to assign different priorities to different atoms for being included in the decomposition. This results to the so called {"}weighted sparse decomposition{"} problem [Babaie-Zadeh et al. 2012]. However, Babaie-Zadeh et al. studied this problem only for positive weights; but in some applications (e.g. classification) better performance can be obtained if some weights become negative. In this paper, we consider {"}weighted sparse decomposition{"} problem in its general form (positive and negative weights). A tight uniqueness condition and some applications for the general case will be presented.},\n  keywords = {linear systems;signal processing;sparse matrices;negative weights;sparse solutions;signal processing;weighted sparse decomposition problem;Signal processing algorithms;Minimization;Signal to noise ratio;Europe;Electronic mail;Linear systems;Sparse signal processing;Weighted sparse decomposition;Weighted ℓ0 norm minimization;Negative weights decomposition;Weighted Sparse Representation for Classification},\n  doi = {10.23919/EUSIPCO.2017.8081589},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346471.pdf},\n}\n\n
\n
\n\n\n
\n Sparse solutions of underdetermined linear systems of equations are widely used in different fields of signal processing. This problem can also be seen as a sparse decomposition problem. Traditional sparse decomposition gives the same priority to all atoms for being included in the decomposition or not. However, in some applications, one may want to assign different priorities to different atoms for being included in the decomposition. This results to the so called \"weighted sparse decomposition\" problem [Babaie-Zadeh et al. 2012]. However, Babaie-Zadeh et al. studied this problem only for positive weights; but in some applications (e.g. classification) better performance can be obtained if some weights become negative. In this paper, we consider \"weighted sparse decomposition\" problem in its general form (positive and negative weights). A tight uniqueness condition and some applications for the general case will be presented.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A micro-motion information reconstruction method based on compressed sensing for precession ballistic targets.\n \n \n \n \n\n\n \n Wu, Q.; Liu, J.; Ai, X.; Zhao, F.; and Xiao, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2151-2155, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081590,\n  author = {Q. Wu and J. Liu and X. Ai and F. Zhao and S. Xiao},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A micro-motion information reconstruction method based on compressed sensing for precession ballistic targets},\n  year = {2017},\n  pages = {2151-2155},\n  abstract = {For a wideband radar system adopting stepped frequency signal (SFS), the micro-motion parameter is usually obtained by time-frequency analysis of the echo HRRPs (high resolution profiles). The data to be collected mainly includes each sub-frequency echo in the slow-time domain, which brings a great burden to the signal generation equipment and data storage on the radar system. Because of the sparseness of the ballistic target scatters in high frequency area, this paper adopts the compressed sensing technique to reduce the sampling data by randomly transmitting the stepped frequency signal. The performance of the proposed method is analyzed by the precession target measurement experiment in the anechoic chamber. The experimental results show that for the nose cone, middle circular ring and bottom circular ring of experimental missile target, when the sampling rate is no less than 50%, 30% and 30% respectively, the reconstruction result is qualified for micro-motion parameter extraction.},\n  keywords = {ballistics;compressed sensing;missiles;radar resolution;radar target recognition;signal reconstruction;micromotion parameter extraction;micromotion information reconstruction method;precession ballistic targets;wideband radar system;stepped frequency signal;time-frequency analysis;signal generation equipment;ballistic target scatters;compressed sensing technique;precession target measurement experiment;middle circular ring;bottom circular ring;missile target;subfrequency echo;echo high resolution profile;Time-frequency analysis;Compressed sensing;Radar;Signal processing algorithms;Nose;Microwave measurement;Microwave theory and techniques;Micro-motion;Compressed sensing;Stepped frequency signal;Precession Ballistic targets},\n  doi = {10.23919/EUSIPCO.2017.8081590},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341972.pdf},\n}\n\n
\n
\n\n\n
\n For a wideband radar system adopting stepped frequency signal (SFS), the micro-motion parameter is usually obtained by time-frequency analysis of the echo HRRPs (high resolution profiles). The data to be collected mainly includes each sub-frequency echo in the slow-time domain, which brings a great burden to the signal generation equipment and data storage on the radar system. Because of the sparseness of the ballistic target scatters in high frequency area, this paper adopts the compressed sensing technique to reduce the sampling data by randomly transmitting the stepped frequency signal. The performance of the proposed method is analyzed by the precession target measurement experiment in the anechoic chamber. The experimental results show that for the nose cone, middle circular ring and bottom circular ring of experimental missile target, when the sampling rate is no less than 50%, 30% and 30% respectively, the reconstruction result is qualified for micro-motion parameter extraction.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multivariate iterative hard thresholding for sparse decomposition with flexible sparsity patterns.\n \n \n \n \n\n\n \n Rencker, L.; Wang, W.; and Plumbley, M. D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2156-2160, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MultivariatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081591,\n  author = {L. Rencker and W. Wang and M. D. Plumbley},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multivariate iterative hard thresholding for sparse decomposition with flexible sparsity patterns},\n  year = {2017},\n  pages = {2156-2160},\n  abstract = {We address the problem of decomposing several consecutive sparse signals, such as audio time frames or image patches. A typical approach is to process each signal sequentially and independently, with an arbitrary sparsity level fixed for each signal. Here, we propose to process several frames simultaneously, allowing for more flexible sparsity patterns to be considered. We propose a multivariate sparse coding approach, where sparsity is enforced on average across several frames. We propose a Multivariate Iterative Hard Thresholding to solve this problem. The usefulness of the proposed approach is demonstrated on audio coding and denoising tasks. Experiments show that the proposed approach leads to better results when the signal contains both transients and tonal components.},\n  keywords = {audio coding;iterative methods;signal denoising;sparse decomposition;flexible sparsity patterns;audio coding;denoising tasks;sparse signals;multivariate iterative hard thresholding;multivariate sparse coding approach;arbitrary sparsity level;Signal processing algorithms;Sparse matrices;Transient analysis;Dictionaries;Time-frequency analysis;Encoding;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081591},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347060.pdf},\n}\n\n
\n
\n\n\n
\n We address the problem of decomposing several consecutive sparse signals, such as audio time frames or image patches. A typical approach is to process each signal sequentially and independently, with an arbitrary sparsity level fixed for each signal. Here, we propose to process several frames simultaneously, allowing for more flexible sparsity patterns to be considered. We propose a multivariate sparse coding approach, where sparsity is enforced on average across several frames. We propose a Multivariate Iterative Hard Thresholding to solve this problem. The usefulness of the proposed approach is demonstrated on audio coding and denoising tasks. Experiments show that the proposed approach leads to better results when the signal contains both transients and tonal components.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Block sparse vector recovery via weighted generalized range space property.\n \n \n \n \n\n\n \n Hilli, A. A.; and Petropulu, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2161-2165, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BlockPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081592,\n  author = {A. A. Hilli and A. Petropulu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Block sparse vector recovery via weighted generalized range space property},\n  year = {2017},\n  pages = {2161-2165},\n  abstract = {In block sparse vector recovery problems we are interested in finding the vector with the least number of active blocks that best describes the observation. The convex relaxation of that problem, typically used to reduce complexity, is strictly equivalent with the original problem only when certain conditions are met, such as Restricted Isometry Property, Null Space Characterization, and Block Mutual Coherence. In practice, those conditions may not be satisfied, which implies that solving the relaxed problem may not retrieve the block sparsest solution. In this paper, we propose a weighted approach, which, in the noise free case and under certain conditions guarantees that the relaxed problem solution has the same support as the sparsest block vector. The weights can be obtained based on a low resolution estimate of the group sparse signal.},\n  keywords = {computational complexity;signal representation;block mutual coherence;null space characterization;restricted isometry property;group sparse signal;sparsest block vector;relaxed problem solution;block sparsest solution;convex relaxation;active blocks;block sparse vector recovery problems;weighted generalized range space property;Signal processing;Sparse matrices;Europe;Coherence;Null space;Noise measurement;Complexity theory},\n  doi = {10.23919/EUSIPCO.2017.8081592},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347335.pdf},\n}\n\n
\n
\n\n\n
\n In block sparse vector recovery problems we are interested in finding the vector with the least number of active blocks that best describes the observation. The convex relaxation of that problem, typically used to reduce complexity, is strictly equivalent with the original problem only when certain conditions are met, such as Restricted Isometry Property, Null Space Characterization, and Block Mutual Coherence. In practice, those conditions may not be satisfied, which implies that solving the relaxed problem may not retrieve the block sparsest solution. In this paper, we propose a weighted approach, which, in the noise free case and under certain conditions guarantees that the relaxed problem solution has the same support as the sparsest block vector. The weights can be obtained based on a low resolution estimate of the group sparse signal.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Frequency diverse array beamforming for physical-layer security with directionally-aligned legitimate user and eavesdropper.\n \n \n \n \n\n\n \n Lin, J.; Li, Q.; and Yang, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2166-2170, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"FrequencyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081593,\n  author = {J. Lin and Q. Li and J. Yang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Frequency diverse array beamforming for physical-layer security with directionally-aligned legitimate user and eavesdropper},\n  year = {2017},\n  pages = {2166-2170},\n  abstract = {The conventional physical-layer (PHY) security approaches, e.g., transmit beamforming and artificial noise (AN)-based design, may fail when the channels of legitimate user (LU) and eavesdropper (Eve) are close correlated. Due to the highly directional transmission feature of millimeter-wave (mmWave), this may occur in mmWave transmissions as the transmitter, Eve and LU are aligned in the same direction exactly. To handle the PHY security problem with directionally-aligned LU and Eve, we propose a novel frequency diverse array (FDA) beamforming approach to differentiating the LU and Eve. By intentionally introducing some frequency offsets across the antennas, the FDA beamforming generates an angle-range dependent beampattern. As a consequence, it can degrade the Eve's reception and thus achieve PHY security. In this paper, we maximize the secrecy rate by jointly optimizing the frequency offsets and the beamformer. This secrecy rate maximization (SRM) problem is hard to solve due to the tightly coupled variables. Nevertheless, we show that it can be reformulated into a form depending only on the frequency offsets. Building upon this reformulation, we identify some cases where the SRM problem can be optimally solved in closed form. Numerical results demonstrate the efficacy of FDA beamforming in achieving PHY security, even for aligned LU and Eve.},\n  keywords = {array signal processing;optimisation;radio transmitters;radiocommunication;telecommunication security;physical-layer security;directionally-aligned legitimate user;artificial noise;highly directional transmission feature;mmWave transmissions;PHY security problem;directionally-aligned LU;frequency offsets;FDA beamforming;angle-range dependent beampattern;secrecy rate maximization problem;frequency diverse array beamforming;millimeter wave transmission;Array signal processing;Security;OFDM;Frequency modulation;Phased arrays;Frequency diversity},\n  doi = {10.23919/EUSIPCO.2017.8081593},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341586.pdf},\n}\n\n
\n
\n\n\n
\n The conventional physical-layer (PHY) security approaches, e.g., transmit beamforming and artificial noise (AN)-based design, may fail when the channels of legitimate user (LU) and eavesdropper (Eve) are close correlated. Due to the highly directional transmission feature of millimeter-wave (mmWave), this may occur in mmWave transmissions as the transmitter, Eve and LU are aligned in the same direction exactly. To handle the PHY security problem with directionally-aligned LU and Eve, we propose a novel frequency diverse array (FDA) beamforming approach to differentiating the LU and Eve. By intentionally introducing some frequency offsets across the antennas, the FDA beamforming generates an angle-range dependent beampattern. As a consequence, it can degrade the Eve's reception and thus achieve PHY security. In this paper, we maximize the secrecy rate by jointly optimizing the frequency offsets and the beamformer. This secrecy rate maximization (SRM) problem is hard to solve due to the tightly coupled variables. Nevertheless, we show that it can be reformulated into a form depending only on the frequency offsets. Building upon this reformulation, we identify some cases where the SRM problem can be optimally solved in closed form. Numerical results demonstrate the efficacy of FDA beamforming in achieving PHY security, even for aligned LU and Eve.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Botnet identification in multi-clustered DDoS attacks.\n \n \n \n \n\n\n \n Matta, V.; Di Mauro, M.; and Longo, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2171-2175, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BotnetPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081594,\n  author = {V. Matta and M. {Di Mauro} and M. Longo},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Botnet identification in multi-clustered DDoS attacks},\n  year = {2017},\n  pages = {2171-2175},\n  abstract = {In a randomized DDoS attack with increasing emulation dictionary, the bots try to hide their malicious activity by disguising their traffic patterns as {"}normal{"} traffic patterns. In this work, we extend the DDoS class introduced in [1], [2] to the case of a multi-clustered botnet, whose main feature is that the emulation dictionary is split over the botnet, giving rise to multiple botnet clusters. We propose two strategies to identify the botnet in such challenging scenario, one based on cluster expurgation, the other one on a union rule. Consistency of both algorithms under ideal conditions is ascertained, while their performance is examined over real network traces.},\n  keywords = {client-server systems;computer network security;Internet;invasive software;pattern clustering;cluster expurgation;multiple botnet clusters;multiclustered botnet;DDoS class;emulation dictionary;randomized DDoS attack;multiclustered DDoS attacks;botnet identification;Computer crime;Dictionaries;Emulation;Signal processing algorithms;Clustering algorithms;Signal processing;Distributed Denial-of-Service;DDoS;Cyber-Security;Signal Processing for Network Security},\n  doi = {10.23919/EUSIPCO.2017.8081594},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343416.pdf},\n}\n\n
\n
\n\n\n
\n In a randomized DDoS attack with increasing emulation dictionary, the bots try to hide their malicious activity by disguising their traffic patterns as \"normal\" traffic patterns. In this work, we extend the DDoS class introduced in [1], [2] to the case of a multi-clustered botnet, whose main feature is that the emulation dictionary is split over the botnet, giving rise to multiple botnet clusters. We propose two strategies to identify the botnet in such challenging scenario, one based on cluster expurgation, the other one on a union rule. Consistency of both algorithms under ideal conditions is ascertained, while their performance is examined over real network traces.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring deep learning image super-resolution for iris recognition.\n \n \n \n \n\n\n \n Ribeiro, E.; Uhl, A.; Alonso-Fernandez, F.; and Farrugia, R. A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2176-2180, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081595,\n  author = {E. Ribeiro and A. Uhl and F. Alonso-Fernandez and R. A. Farrugia},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Exploring deep learning image super-resolution for iris recognition},\n  year = {2017},\n  pages = {2176-2180},\n  abstract = {In this work we test the ability of deep learning methods to provide an end-to-end mapping between low and high resolution images applying it to the iris recognition problem. Here, we propose the use of two deep learning single-image super-resolution approaches: Stacked Auto-Encoders (SAE) and Convolutional Neural Networks (CNN) with the most possible lightweight structure to achieve fast speed, preserve local information and reduce artifacts at the same time. We validate the methods with a database of 1.872 near-infrared iris images with quality assessment and recognition experiments showing the superiority of deep learning approaches over the compared algorithms.},\n  keywords = {feature extraction;image resolution;iris recognition;learning (artificial intelligence);neural nets;iris recognition problem;Stacked Auto-Encoders;Convolutional Neural Networks;quality assessment;deep learning methods;end-to-end mapping;low resolution images;high resolution images;lightweight structure;deep learning single-image super-resolution;CNN;SAE;Image resolution;Iris recognition;Training;Databases;Interpolation;Machine learning;Image reconstruction},\n  doi = {10.23919/EUSIPCO.2017.8081595},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346942.pdf},\n}\n\n
\n
\n\n\n
\n In this work we test the ability of deep learning methods to provide an end-to-end mapping between low and high resolution images applying it to the iris recognition problem. Here, we propose the use of two deep learning single-image super-resolution approaches: Stacked Auto-Encoders (SAE) and Convolutional Neural Networks (CNN) with the most possible lightweight structure to achieve fast speed, preserve local information and reduce artifacts at the same time. We validate the methods with a database of 1.872 near-infrared iris images with quality assessment and recognition experiments showing the superiority of deep learning approaches over the compared algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Video phylogeny tree reconstruction using aging measures.\n \n \n \n \n\n\n \n Milani, S.; Bestagini, P.; and Tubaro, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2181-2185, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"VideoPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081596,\n  author = {S. Milani and P. Bestagini and S. Tubaro},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Video phylogeny tree reconstruction using aging measures},\n  year = {2017},\n  pages = {2181-2185},\n  abstract = {The increasing diffusion of user-friendly editing software and online media sharing platforms has brought forth a growing on-line availability of near-duplicate (ND) videos. The need of authenticating these contents and tracing back their history has led to the investigation of forensic algorithms for the reconstruction of the video phylogeny tree (VPT), i.e., an acyclic directed graph summarizing video genealogical relationships. Unfortunately, state-of-the-art solutions for VPT reconstruction suffer from strong computational requirements. In this paper, we propose a processing age measure based on video DCT coefficients and motion vectors statistics, which enables to provide preliminary information about possible video parent-child relationship. The use of processing age allows a forensic analyst to blindly select a smaller amount of significant video pairs to be compared for VPT reconstruction. This solution grants computational complexity reduction to the overall VPT reconstruction pipeline.},\n  keywords = {computational complexity;data compression;directed graphs;discrete cosine transforms;image reconstruction;multimedia computing;trees (mathematics);video signal processing;user-friendly editing software;online media;near-duplicate videos;forensic algorithms;acyclic directed graph;video genealogical relationships;strong computational requirements;processing age measure;video DCT coefficients;motion vectors statistics;forensic analyst;significant video pairs;VPT reconstruction pipeline;video phylogeny tree reconstruction;aging measures;online availability;video parent-child relationship;Image reconstruction;Video sequences;Measurement;Silicon;Phylogeny;Computational complexity;Aging},\n  doi = {10.23919/EUSIPCO.2017.8081596},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347037.pdf},\n}\n\n
\n
\n\n\n
\n The increasing diffusion of user-friendly editing software and online media sharing platforms has brought forth a growing on-line availability of near-duplicate (ND) videos. The need of authenticating these contents and tracing back their history has led to the investigation of forensic algorithms for the reconstruction of the video phylogeny tree (VPT), i.e., an acyclic directed graph summarizing video genealogical relationships. Unfortunately, state-of-the-art solutions for VPT reconstruction suffer from strong computational requirements. In this paper, we propose a processing age measure based on video DCT coefficients and motion vectors statistics, which enables to provide preliminary information about possible video parent-child relationship. The use of processing age allows a forensic analyst to blindly select a smaller amount of significant video pairs to be compared for VPT reconstruction. This solution grants computational complexity reduction to the overall VPT reconstruction pipeline.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improved reversible data hiding in encrypted images based on reserving room after encryption and pixel prediction.\n \n \n \n \n\n\n \n Dragoi, I. C.; Coanda, H.; and Coltuc, D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2186-2190, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ImprovedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081597,\n  author = {I. C. Dragoi and H. Coanda and D. Coltuc},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Improved reversible data hiding in encrypted images based on reserving room after encryption and pixel prediction},\n  year = {2017},\n  pages = {2186-2190},\n  abstract = {This paper proposes a new vacating room after encryption reversible data hiding scheme. Both joint and separate methods are presented. The most interesting features of the proposed scheme are the two staged embedding/decoding process and the group parity based data embedding for the separate method. Other new features are introduced as well. Compared with the state-of-the-art reserving room after encryption schemes, the proposed approach provides higher embedding bit-rates at lower distortion. Experimental results are provided.},\n  keywords = {cryptography;data encapsulation;decoding;image coding;encrypted images;state-of-the-art reserving room;encryption schemes;data hiding;Encryption;Image restoration;Watermarking;Decoding;Data mining;Distortion},\n  doi = {10.23919/EUSIPCO.2017.8081597},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347410.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a new vacating room after encryption reversible data hiding scheme. Both joint and separate methods are presented. The most interesting features of the proposed scheme are the two staged embedding/decoding process and the group parity based data embedding for the separate method. Other new features are introduced as well. Compared with the state-of-the-art reserving room after encryption schemes, the proposed approach provides higher embedding bit-rates at lower distortion. Experimental results are provided.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Trust the biometrie mainstream: Multi-biometric fusion and score coherence.\n \n \n \n\n\n \n Damer, N.; Rhaibani, C. I.; Braun, A.; and Kuijper, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2191-2195, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081598,\n  author = {N. Damer and C. I. Rhaibani and A. Braun and A. Kuijper},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Trust the biometrie mainstream: Multi-biometric fusion and score coherence},\n  year = {2017},\n  pages = {2191-2195},\n  abstract = {Multi-biometrics aims at building more accurate unified biometric decisions based on the information provided by multiple biometric sources. Information fusion is used to optimize the process of creating this unified decision. In previous works dealing with score-level multi-biometric fusion, the scores of different biometric sources belonging to the comparison of interest are used to create the fused score. This is usually achieved by assigning static weights for the different biometric sources with more advanced solutions considering supplementary dynamic information like sample quality and neighbours distance ratio. This work proposes embedding score coherence information in the fusion process. This is based on our assumption that a minority of biometric sources, which points out towards a different decision than the majority, might have faulty conclusions and should be given relatively smaller role in the final decision. The evaluation was performed on the BioSecure multimodal biometric database with different levels of simulated noise. The proposed solution incorporates, and was compared to, three baseline static weighting approaches. The enhanced performance induced by including the coherence information within a dynamic weighting scheme in comparison to the baseline solution was shown by the reduction of the equal error rate by 45% to 85% over the different test scenarios and proved to maintain high performance when dealing with noisy data.},\n  keywords = {biometrics (access control);sensor fusion;accurate unified biometric decisions;information fusion;score-level multibiometric fusion;fused score;supplementary dynamic information;score coherence information;fusion process;BioSecure multimodal biometric database;baseline static weighting approaches;biometric sources;Coherence;Databases;Noise measurement;Face;Europe;Probes;Error analysis},\n  doi = {10.23919/EUSIPCO.2017.8081598},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n Multi-biometrics aims at building more accurate unified biometric decisions based on the information provided by multiple biometric sources. Information fusion is used to optimize the process of creating this unified decision. In previous works dealing with score-level multi-biometric fusion, the scores of different biometric sources belonging to the comparison of interest are used to create the fused score. This is usually achieved by assigning static weights for the different biometric sources with more advanced solutions considering supplementary dynamic information like sample quality and neighbours distance ratio. This work proposes embedding score coherence information in the fusion process. This is based on our assumption that a minority of biometric sources, which points out towards a different decision than the majority, might have faulty conclusions and should be given relatively smaller role in the final decision. The evaluation was performed on the BioSecure multimodal biometric database with different levels of simulated noise. The proposed solution incorporates, and was compared to, three baseline static weighting approaches. The enhanced performance induced by including the coherence information within a dynamic weighting scheme in comparison to the baseline solution was shown by the reduction of the equal error rate by 45% to 85% over the different test scenarios and proved to maintain high performance when dealing with noisy data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-way regression for age prediction exploiting speech and face image information.\n \n \n \n \n\n\n \n Pantraki, E.; and Kotropoulos, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2196-2200, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-wayPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081599,\n  author = {E. Pantraki and C. Kotropoulos},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-way regression for age prediction exploiting speech and face image information},\n  year = {2017},\n  pages = {2196-2200},\n  abstract = {In this paper, the problem of age estimation is addressed based on two modalities: speech utterances and speakers' face images. The proposed age estimation framework employs the Shifted Covariates REgression Analysis for Multi-way data (SCREAM) model, which combines Parallel Factor Analysis 2 and Principal Covariates Regression. SCREAM is able to extract a few latent variables from multi-way data and compute regression coefficients. Initially, biologically inspired features are extracted from speech utterances and face images and are suitable feature matrices are created to be fed to the multi-way SCREAM model. For bimodal age estimation, the visual and aural features are appropriately combined in a single matrix for each person. Experimental results demonstrate the profit of combining the two modalities. The performance admitted by the multi-way regression for age estimation is also measured on the benchmark face image dataset FG-NET. The proposed method is found to be competitive to state-of-the-art age estimation methods.},\n  keywords = {estimation theory;face recognition;feature extraction;matrix algebra;regression analysis;multiway regression;age prediction;image information;speech utterances;age estimation framework;SCREAM;Parallel Factor Analysis 2;Principal Covariates Regression;latent variables;compute regression coefficients;biologically inspired features;face images;bimodal age estimation;visual features;aural features;benchmark face image dataset FG-NET;feature matrices;shifted covariates regression analysis;multiway data model;Face;Estimation;Speech;Feature extraction;Databases;Visualization;Aging},\n  doi = {10.23919/EUSIPCO.2017.8081599},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570348322.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, the problem of age estimation is addressed based on two modalities: speech utterances and speakers' face images. The proposed age estimation framework employs the Shifted Covariates REgression Analysis for Multi-way data (SCREAM) model, which combines Parallel Factor Analysis 2 and Principal Covariates Regression. SCREAM is able to extract a few latent variables from multi-way data and compute regression coefficients. Initially, biologically inspired features are extracted from speech utterances and face images and are suitable feature matrices are created to be fed to the multi-way SCREAM model. For bimodal age estimation, the visual and aural features are appropriately combined in a single matrix for each person. Experimental results demonstrate the profit of combining the two modalities. The performance admitted by the multi-way regression for age estimation is also measured on the benchmark face image dataset FG-NET. The proposed method is found to be competitive to state-of-the-art age estimation methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Age group detection using smartphone motion sensors.\n \n \n \n \n\n\n \n Davarci, E.; Soysal, B.; Erguler, I.; Aydin, S. O.; Dincer, O.; and Anarim, E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2201-2205, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AgePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081600,\n  author = {E. Davarci and B. Soysal and I. Erguler and S. O. Aydin and O. Dincer and E. Anarim},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Age group detection using smartphone motion sensors},\n  year = {2017},\n  pages = {2201-2205},\n  abstract = {Side-channel attacks revealing the sensitive user data through the motion sensors (such as accelerometer, gyroscope, and orientation sensors) emerged as a new trend in the smartphone security. In this respect, recent studies have examined feasibility of inferring user's tap input by utilizing the motion sensor readings and propounded that some user secrets can be deduced by adopting the different side-channel attacks. More precisely, in this kind of attacks, a malware processes outputs of these sensors to exfiltrate victims private information such as PINs, passwords or unlock patterns. In this paper, we describe a new side-channel attack on smartphones that aims to predict the age interval of the user. Unlike the previous works, our attack does not directly deal with recovering a target user's some secret, rather its sole purpose is determining whether she is a child or an adult. The main idea behind our study relies on the key observation that the characteristics of children and adults differ in hand holding and touching the smartphones. Consequently, we show that there is an apparent correlation between the motion sensor readings and these characteristics that build up our attack strategy. In order to exhibit efficiency of the proposed attack, we have developed an Android application named as BalloonLogger that evaluates accelerometer sensor data and perform child/adult detection with a success rate of 92.5%. To the best of our knowledge, in this work, for the first time, we point out such a security breach.},\n  keywords = {cryptography;invasive software;mobile computing;smart phones;age group detection;smartphone motion sensors;side-channel attack;sensitive user data;orientation sensors;smartphone security;motion sensor readings;user secrets;victims private information;passwords;unlock patterns;age interval;attack strategy;accelerometer sensor data;child/adult detection;Sensors;Accelerometers;Malware;Androids;Humanoid robots;Discrete Fourier transforms;Security},\n  doi = {10.23919/EUSIPCO.2017.8081600},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346241.pdf},\n}\n\n
\n
\n\n\n
\n Side-channel attacks revealing the sensitive user data through the motion sensors (such as accelerometer, gyroscope, and orientation sensors) emerged as a new trend in the smartphone security. In this respect, recent studies have examined feasibility of inferring user's tap input by utilizing the motion sensor readings and propounded that some user secrets can be deduced by adopting the different side-channel attacks. More precisely, in this kind of attacks, a malware processes outputs of these sensors to exfiltrate victims private information such as PINs, passwords or unlock patterns. In this paper, we describe a new side-channel attack on smartphones that aims to predict the age interval of the user. Unlike the previous works, our attack does not directly deal with recovering a target user's some secret, rather its sole purpose is determining whether she is a child or an adult. The main idea behind our study relies on the key observation that the characteristics of children and adults differ in hand holding and touching the smartphones. Consequently, we show that there is an apparent correlation between the motion sensor readings and these characteristics that build up our attack strategy. In order to exhibit efficiency of the proposed attack, we have developed an Android application named as BalloonLogger that evaluates accelerometer sensor data and perform child/adult detection with a success rate of 92.5%. To the best of our knowledge, in this work, for the first time, we point out such a security breach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Secure communication protocol for a low-bandwidth audio channel.\n \n \n \n \n\n\n \n Berchtold, W.; Lieb, P.; and Steinebach, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2206-2210, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SecurePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081601,\n  author = {W. Berchtold and P. Lieb and M. Steinebach},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Secure communication protocol for a low-bandwidth audio channel},\n  year = {2017},\n  pages = {2206-2210},\n  abstract = {Data transmission over an inaudible audio channel describes a low-bandwidth alternative to exchange data between devices without any additional infrastructure. However, the established communication channel can be eavesdropped and manipulated by an attacker. To prevent this, we introduce a tailored protocol with smallest possible overhead to secure the communication. The proposed protocol produces an overhead of 256 bits for the handshake message for setting up the first conversation with each partner. Further, the protocol produces [msg_len/64]* 3 + 67 bits overhead for each message. The overhead of 67 bits at the beginning of each message corresponds to one second transmission time with the used FSK modulation in the frequency range of 16kHz-20kHz. The additional overhead of 3-bit per 64-bit sequence poses a relation of 95% message to 5% overhead. For the implementation of the protocol, algorithms implemented in the Crypto++ library such as SHA-256, CCM and PBKDF2 have been used.},\n  keywords = {cryptographic protocols;frequency shift keying;telecommunication channels;telecommunication security;secure communication protocol;low-bandwidth audio channel;data transmission;inaudible audio channel;handshake message;communication channel;FSK modulation;Crypto++ library;Protocols;Receivers;Cryptography;Transmitters;Radiation detectors},\n  doi = {10.23919/EUSIPCO.2017.8081601},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345370.pdf},\n}\n\n
\n
\n\n\n
\n Data transmission over an inaudible audio channel describes a low-bandwidth alternative to exchange data between devices without any additional infrastructure. However, the established communication channel can be eavesdropped and manipulated by an attacker. To prevent this, we introduce a tailored protocol with smallest possible overhead to secure the communication. The proposed protocol produces an overhead of 256 bits for the handshake message for setting up the first conversation with each partner. Further, the protocol produces [msg_len/64]* 3 + 67 bits overhead for each message. The overhead of 67 bits at the beginning of each message corresponds to one second transmission time with the used FSK modulation in the frequency range of 16kHz-20kHz. The additional overhead of 3-bit per 64-bit sequence poses a relation of 95% message to 5% overhead. For the implementation of the protocol, algorithms implemented in the Crypto++ library such as SHA-256, CCM and PBKDF2 have been used.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Design of binary sequences with low PSL/ISL.\n \n \n \n \n\n\n \n Alaee, M.; Aubry, A.; De Maio, A.; Naghsh, M. M.; and Modarres-Hashemi, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2211-2215, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DesignPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081602,\n  author = {M. Alaee and A. Aubry and A. {De Maio} and M. M. Naghsh and M. Modarres-Hashemi},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Design of binary sequences with low PSL/ISL},\n  year = {2017},\n  pages = {2211-2215},\n  abstract = {In this paper the long standing major challenge of designing binary sequences with good (aperiodic) autocorrelation properties in terms of Peak Sidelobe Level (PSL) and Integrated Sidelobe Level (ISL) is considered. The problem is formulated as a bi-objective Pareto optimization forcing the binary constraint at the design stage. An iterative novel FFT-based approach exploiting the coordinate descent method is devised to deal with the resulting optimization problem which is non-convex and NP-hard in general. Simulation results illustrate that the proposed algorithm can outperform some counterparts providing sequences with desirable PSL as well as ISL.},\n  keywords = {binary sequences;computational complexity;concave programming;correlation methods;fast Fourier transforms;gradient methods;iterative methods;Pareto optimisation;coordinate descent method;binary sequences;Peak Sidelobe Level;Integrated Sidelobe Level;bi-objective Pareto optimization;binary constraint;design stage;optimization problem;low PSL-ISL;aperiodic autocorrelation properties;iterative novel FFT approach;NP-hard problem;nonconvex optimisation;NP;Correlation;Algorithm design and analysis;Optimization;Binary codes;Signal processing algorithms;Iterative methods;Radar;Radar;Waveform Design;Peak Sidelobe Level (PSL);Integrated Sidelobe Level (ISL);Binary Phase Codes},\n  doi = {10.23919/EUSIPCO.2017.8081602},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342388.pdf},\n}\n\n
\n
\n\n\n
\n In this paper the long standing major challenge of designing binary sequences with good (aperiodic) autocorrelation properties in terms of Peak Sidelobe Level (PSL) and Integrated Sidelobe Level (ISL) is considered. The problem is formulated as a bi-objective Pareto optimization forcing the binary constraint at the design stage. An iterative novel FFT-based approach exploiting the coordinate descent method is devised to deal with the resulting optimization problem which is non-convex and NP-hard in general. Simulation results illustrate that the proposed algorithm can outperform some counterparts providing sequences with desirable PSL as well as ISL.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Range migrating target detection in correlated compound-Gaussian clutter.\n \n \n \n \n\n\n \n Petrov, N.; Le Chevalier, F.; Bogdanović, N.; and Yarovoy, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2216-2220, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RangePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081603,\n  author = {N. Petrov and F. {Le Chevalier} and N. Bogdanović and A. Yarovoy},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Range migrating target detection in correlated compound-Gaussian clutter},\n  year = {2017},\n  pages = {2216-2220},\n  abstract = {The problem of range-migrating target detection in a compound-Gaussian clutter is studied here. We assume a target to have a range-walk of a few range cells during the coherent processing interval, when observed by wideband radar with high range resolution. Two CFAR detectors are proposed assuming different correlation properties of clutter over range. The detectors' performance is studied via numerical simulations and a significant improvement over existing techniques is demonstrated.},\n  keywords = {correlation methods;Gaussian noise;radar clutter;radar detection;constant false alram rate detection;CFAR detectors;coherent processing interval;range cells;range-walk;range-migrating target detection;correlated compound-Gaussian clutter;Clutter;Detectors;Speckle;Object detection;Estimation;Radar},\n  doi = {10.23919/EUSIPCO.2017.8081603},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347601.pdf},\n}\n\n
\n
\n\n\n
\n The problem of range-migrating target detection in a compound-Gaussian clutter is studied here. We assume a target to have a range-walk of a few range cells during the coherent processing interval, when observed by wideband radar with high range resolution. Two CFAR detectors are proposed assuming different correlation properties of clutter over range. The detectors' performance is studied via numerical simulations and a significant improvement over existing techniques is demonstrated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Random phase center motion technique for enhanced angle-Doppler discrimination using MIMO radars.\n \n \n \n \n\n\n \n Hammes, C.; Shankar, M. R. B.; Nijsure, Y.; Spielmann, T.; and Ottersten, B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2221-2225, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RandomPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081604,\n  author = {C. Hammes and M. R. B. Shankar and Y. Nijsure and T. Spielmann and B. Ottersten},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Random phase center motion technique for enhanced angle-Doppler discrimination using MIMO radars},\n  year = {2017},\n  pages = {2221-2225},\n  abstract = {A random Phase Center Motion (PCM) technique is presented in this paper, based on Frequency Modulated Continuous Wave (FMCW) radar, in order to suppress the angle-Doppler coupling in Time Division Multiplex (TDM) Multiple-Input-Multiple-Output (MIMO) radar when employing sparse array structures. The presented approach exploits an apparently moving transmit platform or PCM due to spatio-temporal transmit array modulation. In particular, the work considers a framework utilizing a random PCM trajectory. The statistical characterization of the random PCM trajectory is devised, such that the PCM and the target motion coupling is minimal, while the angular resolution is increased by enabling the virtual MIMO concept. In more details, this paper discusses sidelobe suppression approaches within the angle-Doppler Ambiguity Function (AF) by introducing a phase center probability density function within the array. This allows for enhanced discrimination of multiple targets. Simulation results demonstrate the suppression angle-Doppler coupling by more than 30 dB, even though spatio-temporal transmit array modulation is done across chirps which leads usually to strong angle-Doppler coupling.},\n  keywords = {CW radar;Doppler radar;FM radar;MIMO radar;probability;time division multiplexing;random phase center motion technique;enhanced angle-Doppler discrimination;MIMO radars;Frequency Modulated Continuous Wave radar;Time Division Multiplex Multiple-Input-Multiple-Output radar;sparse array structures;spatio-temporal transmit array modulation;random PCM trajectory;target motion coupling;virtual MIMO concept;angle-Doppler Ambiguity Function;phase center probability density function;suppression angle-Doppler coupling;noise figure 30.0 dB;Integrated circuits;Pulse modulation;Chirp;Doppler effect;Trajectory;Couplings;Time division multiplexing},\n  doi = {10.23919/EUSIPCO.2017.8081604},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347462.pdf},\n}\n\n
\n
\n\n\n
\n A random Phase Center Motion (PCM) technique is presented in this paper, based on Frequency Modulated Continuous Wave (FMCW) radar, in order to suppress the angle-Doppler coupling in Time Division Multiplex (TDM) Multiple-Input-Multiple-Output (MIMO) radar when employing sparse array structures. The presented approach exploits an apparently moving transmit platform or PCM due to spatio-temporal transmit array modulation. In particular, the work considers a framework utilizing a random PCM trajectory. The statistical characterization of the random PCM trajectory is devised, such that the PCM and the target motion coupling is minimal, while the angular resolution is increased by enabling the virtual MIMO concept. In more details, this paper discusses sidelobe suppression approaches within the angle-Doppler Ambiguity Function (AF) by introducing a phase center probability density function within the array. This allows for enhanced discrimination of multiple targets. Simulation results demonstrate the suppression angle-Doppler coupling by more than 30 dB, even though spatio-temporal transmit array modulation is done across chirps which leads usually to strong angle-Doppler coupling.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hybrid optimization method for cognitive and MIMO radar code design.\n \n \n \n \n\n\n \n Aittomäki, T.; and Koivunen, V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2226-2229, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"HybridPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081605,\n  author = {T. Aittomäki and V. Koivunen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Hybrid optimization method for cognitive and MIMO radar code design},\n  year = {2017},\n  pages = {2226-2229},\n  abstract = {Cognitive and MIMO radars need to adapt the transmitted waveforms based on the radar task as well as the propagation and the target environments. Many waveform optimization methods proposed in the literature for optimizing the sidelobe and cross-correlation levels are based on stochastic search algorithms or slow numerical approximation methods. However, for real-time applications, it is necessary to perform the optimization fast since the radar channels and target parameters may vary rapidly. For this purpose, we propose a hybrid optimization approaches based on gradient and randomization for fast optimization of the transmit waveform codes.},\n  keywords = {approximation theory;gradient methods;MIMO radar;optimisation;radar signal processing;search problems;stochastic processes;waveform optimization methods;cross-correlation levels;stochastic search algorithms;radar channels;target parameters;hybrid optimization approaches;fast optimization;transmit waveform codes;hybrid optimization method;cognitive radar code design;MIMO radar code design;radar task;target environments;numerical approximation methods;Doppler effect;MIMO radar;Delays;Simulated annealing;Linear programming;Cognitive radar;MIMO radar;waveform design;optimization;constant-modulus waveforms},\n  doi = {10.23919/EUSIPCO.2017.8081605},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346965.pdf},\n}\n\n
\n
\n\n\n
\n Cognitive and MIMO radars need to adapt the transmitted waveforms based on the radar task as well as the propagation and the target environments. Many waveform optimization methods proposed in the literature for optimizing the sidelobe and cross-correlation levels are based on stochastic search algorithms or slow numerical approximation methods. However, for real-time applications, it is necessary to perform the optimization fast since the radar channels and target parameters may vary rapidly. For this purpose, we propose a hybrid optimization approaches based on gradient and randomization for fast optimization of the transmit waveform codes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Array spatial thinning for interference mitigation by semidefinite programming.\n \n \n \n \n\n\n \n Nosrati, H.; Aboutanios, E.; and Smith, D. B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2230-2234, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ArrayPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081606,\n  author = {H. Nosrati and E. Aboutanios and D. B. Smith},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Array spatial thinning for interference mitigation by semidefinite programming},\n  year = {2017},\n  pages = {2230-2234},\n  abstract = {We study the problem of interference mitigation in a phased array, where a subset containing k out of a total of N receivers creates a virtual spatial null for an incoming interference. The signal-of-interest and interference are represented by their corresponding steering vectors, and an optimum subarray is chosen such that the two vectors are as orthogonal as possible. This optimization is a binary quadratic non-convex minimization. We propose a semidefinite programming method to find suboptimal solutions using an optimal randomized sampling strategy. We show that the proposed method provides solutions as good as an exhaustive search with a cubic computational complexity. Furthermore, the proposed algorithm outperforms existing methods by solving the problem in a higher dimensionality.},\n  keywords = {antenna phased arrays;array signal processing;computational complexity;concave programming;interference suppression;matrix algebra;minimisation;signal sampling;steering vectors;signal-of-interest;incoming interference;virtual spatial null;phased array;interference mitigation;array spatial thinning;optimal randomized sampling strategy;semidefinite programming method;binary quadratic nonconvex minimization;optimum subarray;Signal processing algorithms;Correlation;Phased arrays;Programming;Interference;Minimization;Optimization;Array thinning;antenna selection;binary quadratic constrained programming;semidefinite programming;convex optimization},\n  doi = {10.23919/EUSIPCO.2017.8081606},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347711.pdf},\n}\n\n
\n
\n\n\n
\n We study the problem of interference mitigation in a phased array, where a subset containing k out of a total of N receivers creates a virtual spatial null for an incoming interference. The signal-of-interest and interference are represented by their corresponding steering vectors, and an optimum subarray is chosen such that the two vectors are as orthogonal as possible. This optimization is a binary quadratic non-convex minimization. We propose a semidefinite programming method to find suboptimal solutions using an optimal randomized sampling strategy. We show that the proposed method provides solutions as good as an exhaustive search with a cubic computational complexity. Furthermore, the proposed algorithm outperforms existing methods by solving the problem in a higher dimensionality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Boosting LINC systems combiners efficiency through ring-type magnitude modulation.\n \n \n \n \n\n\n \n Castanheira, M.; Simíões, A.; Gomes, M.; Dinis, R.; and Silva, V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2235-2238, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BoostingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081607,\n  author = {M. Castanheira and A. Simíões and M. Gomes and R. Dinis and V. Silva},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Boosting LINC systems combiners efficiency through ring-type magnitude modulation},\n  year = {2017},\n  pages = {2235-2238},\n  abstract = {This paper proposes a transmitter structure that combines a ring-type magnitude modulation (RMM) technique with a linear amplification with nonlinear components (LINC) scheme for power and spectrally efficient transmission based on bandwidth limited OQPSK signals, for either a linear combiner (LC) or a Chireix combiner (CC). It shows that by controlling the transmitted signal's envelope through RMM, the range of the LINC decomposition angle is considerably decreased. This significantly improves LC's power efficiency, and substantially reduces CC's spectral leakage while maintaining its high amplification efficiency.},\n  keywords = {power amplifiers;power combiners;quadrature phase shift keying;transmitters;LINC systems combiners efficiency;transmitter structure;ring-type magnitude modulation;RMM;linear amplification with nonlinear components;spectrally efficient transmission;bandwidth limited OQPSK signals;linear combiner;Chireix combiner;LINC decomposition angle;power efficiency;Transmitters;Modulation;Linearity;Europe;Dynamic range;Gold;LINC;Power Efficiency;Spectral Efficiency;Magnitude Modulation;OQPSK signals;Chireix Combiner;Linear Combiner},\n  doi = {10.23919/EUSIPCO.2017.8081607},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347320.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a transmitter structure that combines a ring-type magnitude modulation (RMM) technique with a linear amplification with nonlinear components (LINC) scheme for power and spectrally efficient transmission based on bandwidth limited OQPSK signals, for either a linear combiner (LC) or a Chireix combiner (CC). It shows that by controlling the transmitted signal's envelope through RMM, the range of the LINC decomposition angle is considerably decreased. This significantly improves LC's power efficiency, and substantially reduces CC's spectral leakage while maintaining its high amplification efficiency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A novel channel estimation scheme for multicarrier communications with the Type-I even discrete cosine transform.\n \n \n \n \n\n\n \n Domínguez-Jiménez, M. E.; Luengo, D.; Sansigre-Vidal, G.; and Cruz-Roldán, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2239-2243, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081608,\n  author = {M. E. Domínguez-Jiménez and D. Luengo and G. Sansigre-Vidal and F. Cruz-Roldán},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A novel channel estimation scheme for multicarrier communications with the Type-I even discrete cosine transform},\n  year = {2017},\n  pages = {2239-2243},\n  abstract = {In this work, the problem of channel estimation in multicarrier communications with the Type-I even discrete cosine transform (DCT1e) is addressed. A novel scheme, based on using the DCT1e, both at the transmitter and the receiver, is introduced. The proposed approach does not require adding any redundancy or knowing the exact length of the channel's impulse response. By constructing a symmetric training sequence at the transmitter with enough leading and tail zeros, we show that an accurate estimation of the channel's impulse response can be attained. Simulations using the ITU-T pedestrian channel B illustrate the good behavior of the proposed scheme in terms of reconstruction signal to noise ratio.},\n  keywords = {channel estimation;discrete cosine transforms;frequency estimation;noise;OFDM modulation;radio transceivers;transient response;multicarrier communications;DCT1e;symmetric training sequence;channel estimation scheme;Type-I even discrete cosine transform;impulse response;Channel estimation;Receivers;Training;Transmitters;Estimation;Discrete Fourier transforms},\n  doi = {10.23919/EUSIPCO.2017.8081608},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347139.pdf},\n}\n\n
\n
\n\n\n
\n In this work, the problem of channel estimation in multicarrier communications with the Type-I even discrete cosine transform (DCT1e) is addressed. A novel scheme, based on using the DCT1e, both at the transmitter and the receiver, is introduced. The proposed approach does not require adding any redundancy or knowing the exact length of the channel's impulse response. By constructing a symmetric training sequence at the transmitter with enough leading and tail zeros, we show that an accurate estimation of the channel's impulse response can be attained. Simulations using the ITU-T pedestrian channel B illustrate the good behavior of the proposed scheme in terms of reconstruction signal to noise ratio.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Prequalification of VDSL2 customers for G.fast services.\n \n \n \n \n\n\n \n Statovci, D.; Wolkerstorfer, M.; and Drakulić, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2244-2248, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PrequalificationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081609,\n  author = {D. Statovci and M. Wolkerstorfer and S. Drakulić},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Prequalification of VDSL2 customers for G.fast services},\n  year = {2017},\n  pages = {2244-2248},\n  abstract = {In the near future, digital subscriber line (DSL) network operators will start deploying G.fast technology to stay competitive in the broadband market. G.fast provides data rates of up to 1 Gbps, which is a 10× higher data rate than offered by the widely deployed very high speed DSL 2 (VDSL2) technology. In this paper we propose a novel algorithm to prequalify existing VDSL2 customers for G.fast services. Motivated by testbed and field trial results we also propose a new loop attenuation parameter for prequalification purposes, namely the geometric loop attenuation (GeoLATN) instead of the {"}classical{"} loop attenuation (LATN). G.fast testbed experiments show that on average the difference between the attainable net data rates (AttNDR) prequalfied by our algorithm and reported by G.fast systems is 5%. Furthermore, on average the GeoLATN-based prequalification outperforms the LATN-based prequalification by 5% in terms of AttNDR.},\n  keywords = {attenuation;digital subscriber lines;VDSL2 customers;G.fast services;digital subscriber line network operators;broadband market;testbed field trial results;loop attenuation parameter;prequalification purposes;geometric loop attenuation;attainable net data rates;very high speed DSL 2 technology;GeoLATN-based prequalification;Geology;Attenuation;DSL;Correlation;Signal processing algorithms;Frequency estimation;Channel estimation},\n  doi = {10.23919/EUSIPCO.2017.8081609},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347009.pdf},\n}\n\n
\n
\n\n\n
\n In the near future, digital subscriber line (DSL) network operators will start deploying G.fast technology to stay competitive in the broadband market. G.fast provides data rates of up to 1 Gbps, which is a 10× higher data rate than offered by the widely deployed very high speed DSL 2 (VDSL2) technology. In this paper we propose a novel algorithm to prequalify existing VDSL2 customers for G.fast services. Motivated by testbed and field trial results we also propose a new loop attenuation parameter for prequalification purposes, namely the geometric loop attenuation (GeoLATN) instead of the \"classical\" loop attenuation (LATN). G.fast testbed experiments show that on average the difference between the attainable net data rates (AttNDR) prequalfied by our algorithm and reported by G.fast systems is 5%. Furthermore, on average the GeoLATN-based prequalification outperforms the LATN-based prequalification by 5% in terms of AttNDR.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Traffic-aware transmission mode selection in D2D-enabled cellular networks with token system.\n \n \n \n \n\n\n \n Yuan, Y.; Yang, T.; Feng, H.; Hu, B.; Zhang, J.; Wang, B.; and Lu, Q.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2249-2253, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Traffic-awarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081610,\n  author = {Y. Yuan and T. Yang and H. Feng and B. Hu and J. Zhang and B. Wang and Q. Lu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Traffic-aware transmission mode selection in D2D-enabled cellular networks with token system},\n  year = {2017},\n  pages = {2249-2253},\n  abstract = {We consider a D2D-enabled cellular network where user equipments (UEs) owned by rational users are incentivized to form D2D pairs using tokens. They exchange tokens electronically to {"}buy{"} and {"}sell{"} D2D services. Meanwhile the devices have the ability to choose the transmission mode, i.e. receiving data via cellular links or D2D links. Thus taking the different benefits brought by diverse traffic types as a prior, the UEs can utilize their tokens more efficiently via transmission mode selection. In this paper, the optimal transmission mode selection strategy as well as token collection policy are investigated to maximize the long-term utility in the dynamic network environment. The optimal policy is proved to be a threshold strategy, and the thresholds have a monotonicity property. Numerical simulations verify our observations and the gain from transmission mode selection is observed.},\n  keywords = {cellular radio;telecommunication traffic;dynamic network environment;traffic-aware transmission mode selection;token system;user equipments;cellular links;diverse traffic types;optimal transmission mode selection strategy;token collection policy;D2D pairs;D2D-enabled cellular networks;Device-to-device communication;Cellular networks;Numerical models;Europe;Signal processing;Numerical simulation;Waste materials},\n  doi = {10.23919/EUSIPCO.2017.8081610},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346311.pdf},\n}\n\n
\n
\n\n\n
\n We consider a D2D-enabled cellular network where user equipments (UEs) owned by rational users are incentivized to form D2D pairs using tokens. They exchange tokens electronically to \"buy\" and \"sell\" D2D services. Meanwhile the devices have the ability to choose the transmission mode, i.e. receiving data via cellular links or D2D links. Thus taking the different benefits brought by diverse traffic types as a prior, the UEs can utilize their tokens more efficiently via transmission mode selection. In this paper, the optimal transmission mode selection strategy as well as token collection policy are investigated to maximize the long-term utility in the dynamic network environment. The optimal policy is proved to be a threshold strategy, and the thresholds have a monotonicity property. Numerical simulations verify our observations and the gain from transmission mode selection is observed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Toward green communications using semi-blind channel estimation.\n \n \n \n \n\n\n \n Ladaycia, A.; Mokraoui, A.; Abed-Meraim, K.; and Belouchrani, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2254-2258, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"TowardPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081611,\n  author = {A. Ladaycia and A. Mokraoui and K. Abed-Meraim and A. Belouchrani},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Toward green communications using semi-blind channel estimation},\n  year = {2017},\n  pages = {2254-2258},\n  abstract = {In most Multiple-Input Multiple-Output Orthogonal Frequency Division Multiplexing (MIMO-OFDM) systems, channel estimation is required for equalization and symbol detection. It often exploits the specified pilot symbols consuming not only a large part of the throughput but also significant power resources. This paper quantifies the theoretical maximum power reduction of the transmitted pilots when semiblind channel estimator is deployed while ensuring the same pilot-based channel estimation performance for BPSK/QPSK data models and a block-type pilot arrangement as specified in the IEEE 802.11n standard. A Least Square Decision Feedback (LS-DF) semi-blind channel estimator is then considered showing that a reduction of 76% of the pilot's power is obtained compared to the LS pilot-based estimator for the same channel estimation performance.},\n  keywords = {channel estimation;decision feedback equalisers;least squares approximations;MIMO communication;OFDM modulation;quadrature phase shift keying;wireless LAN;symbol detection;semiblind channel estimator;multiple-output orthogonal frequency division multiplexing systems;channel estimation;power resources;equalization;semiblind channel estimation;Channel estimation;OFDM;Signal processing algorithms;Training;Mathematical model;Equalizers;Throughput},\n  doi = {10.23919/EUSIPCO.2017.8081611},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342152.pdf},\n}\n\n
\n
\n\n\n
\n In most Multiple-Input Multiple-Output Orthogonal Frequency Division Multiplexing (MIMO-OFDM) systems, channel estimation is required for equalization and symbol detection. It often exploits the specified pilot symbols consuming not only a large part of the throughput but also significant power resources. This paper quantifies the theoretical maximum power reduction of the transmitted pilots when semiblind channel estimator is deployed while ensuring the same pilot-based channel estimation performance for BPSK/QPSK data models and a block-type pilot arrangement as specified in the IEEE 802.11n standard. A Least Square Decision Feedback (LS-DF) semi-blind channel estimator is then considered showing that a reduction of 76% of the pilot's power is obtained compared to the LS pilot-based estimator for the same channel estimation performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-blind student's t source separation for multichannel audio convolutive mixtures.\n \n \n \n \n\n\n \n Leglaive, S.; Badeau, R.; and Richard, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2259-2263, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-blindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081612,\n  author = {S. Leglaive and R. Badeau and G. Richard},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Semi-blind student's t source separation for multichannel audio convolutive mixtures},\n  year = {2017},\n  pages = {2259-2263},\n  abstract = {This paper addresses the problem of multichannel audio source separation in under-determined convolutive mixtures. We target a semi-blind scenario assuming that the mixing filters are known. The convolutive mixing process is exactly modeled using the time-domain impulse responses of the mixing filters. We propose a Student's t time-frequency source model based on non-negative matrix factorization (NMF). The Student's t distribution being heavy-tailed with respect to the Gaussian, it provides some flexibility in the modeling of the sources. We also study a simpler Student's t sparse source model within the same general source separation framework. The inference procedure relies on a variational expectation-maximization algorithm. Experiments show the advantage of using an NMF model compared with the sparse source model. While the Student's t NMF source model leads to slightly better results than our previous Gaussian one, we demonstrate the superiority of our method over two other approaches from the literature.},\n  keywords = {audio signal processing;blind source separation;convolution;expectation-maximisation algorithm;filtering theory;Gaussian distribution;matrix decomposition;time-frequency analysis;transient response;multichannel audio convolutive mixtures;multichannel audio source separation;mixing filters;time-domain impulse responses;nonnegative matrix factorization;general source separation framework;semiblind student t source separation;student's t time-frequency source model;student's t distribution;student's t sparse source model;student's t NMF source model;expectation-maximization algorithm;inference procedure;Source separation;Signal processing algorithms;Probability density function;Europe;Time-domain analysis;Sparse matrices;Under-determined audio source separation;multichannel convolutive mixture;Student's t distribution;non-negative matrix factorization;variational inference},\n  doi = {10.23919/EUSIPCO.2017.8081612},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342184.pdf},\n}\n\n
\n
\n\n\n
\n This paper addresses the problem of multichannel audio source separation in under-determined convolutive mixtures. We target a semi-blind scenario assuming that the mixing filters are known. The convolutive mixing process is exactly modeled using the time-domain impulse responses of the mixing filters. We propose a Student's t time-frequency source model based on non-negative matrix factorization (NMF). The Student's t distribution being heavy-tailed with respect to the Gaussian, it provides some flexibility in the modeling of the sources. We also study a simpler Student's t sparse source model within the same general source separation framework. The inference procedure relies on a variational expectation-maximization algorithm. Experiments show the advantage of using an NMF model compared with the sparse source model. While the Student's t NMF source model leads to slightly better results than our previous Gaussian one, we demonstrate the superiority of our method over two other approaches from the literature.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring sound source separation for acoustic condition monitoring in industrial scenarios.\n \n \n \n \n\n\n \n Cano, E.; Nowak, J.; and Grollmisch, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2264-2268, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081613,\n  author = {E. Cano and J. Nowak and S. Grollmisch},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Exploring sound source separation for acoustic condition monitoring in industrial scenarios},\n  year = {2017},\n  pages = {2264-2268},\n  abstract = {This paper evaluates the application of three methods for Sound Source Separation (SSS) in industrial acoustic condition monitoring scenarios. To evaluate the impact of SSS, we use a machine learning approach where a classifier is trained to detect a specific operating machine. The evaluation procedure is based on simulated and measured data, comprising three different machine sounds as targets and 10 interfering signals. Various intermixing levels of target and interfering signal are taken into account, using three different signal-to-interference ratios. Results show that the chosen source separation methods, originally developed for music analysis, work well for industrial signals, significantly improving the classification accuracy.},\n  keywords = {acoustic signal processing;blind source separation;condition monitoring;fault diagnosis;learning (artificial intelligence);mechanical engineering computing;source separation;sound source separation;SSS;industrial acoustic condition monitoring scenarios;simulated measured data;interfering signal;signal-to-interference ratios;machine learning approach;machine sounds;Signal to noise ratio;Microphones;Source separation;Acoustics;Condition monitoring;Interference;Signal processing algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081613},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346961.pdf},\n}\n\n
\n
\n\n\n
\n This paper evaluates the application of three methods for Sound Source Separation (SSS) in industrial acoustic condition monitoring scenarios. To evaluate the impact of SSS, we use a machine learning approach where a classifier is trained to detect a specific operating machine. The evaluation procedure is based on simulated and measured data, comprising three different machine sounds as targets and 10 interfering signals. Various intermixing levels of target and interfering signal are taken into account, using three different signal-to-interference ratios. Results show that the chosen source separation methods, originally developed for music analysis, work well for industrial signals, significantly improving the classification accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analysis of the robustness of neural network-based target activity detection.\n \n \n \n \n\n\n \n Meier, S.; Gerber, D.; and Kellermann, W.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2269-2273, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnalysisPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081614,\n  author = {S. Meier and D. Gerber and W. Kellermann},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Analysis of the robustness of neural network-based target activity detection},\n  year = {2017},\n  pages = {2269-2273},\n  abstract = {Many applications in audio signal processing require a precise identification of time frames where a predefined target source is active. In previous work, Artificial Neural Networks (ANNs) with crosscorrelation features showed a considerable potential in this field. In this paper, the performance of ANN-based target activity detection is analyzed in more detail and compared with a well-performing {"}classical{"} signal processing method. On the one hand, the impact of the angular distance between target source and interferers is evaluated for both the neural network-based method and the classical one. On the other hand, the sensitivity of both methods to varying Signal-to-Noise Ratio (SNR) conditions is analyzed with respect to the importance of a proper choice of detection thresholds. In the evaluations, the ANN-based method proves its general superiority and also its robustness with respect to a non-ideal choice of detection thresholds.},\n  keywords = {audio signal processing;neural nets;signal detection;target activity detection;audio signal processing;precise identification;time frames;predefined target source;Artificial Neural Networks;ANN;crosscorrelation features;classical signal processing method;Signal-to-Noise Ratio conditions;detection thresholds;Microphones;Training;Signal to noise ratio;Noise measurement;Speech;Robots},\n  doi = {10.23919/EUSIPCO.2017.8081614},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347017.pdf},\n}\n\n
\n
\n\n\n
\n Many applications in audio signal processing require a precise identification of time frames where a predefined target source is active. In previous work, Artificial Neural Networks (ANNs) with crosscorrelation features showed a considerable potential in this field. In this paper, the performance of ANN-based target activity detection is analyzed in more detail and compared with a well-performing \"classical\" signal processing method. On the one hand, the impact of the angular distance between target source and interferers is evaluated for both the neural network-based method and the classical one. On the other hand, the sensitivity of both methods to varying Signal-to-Noise Ratio (SNR) conditions is analyzed with respect to the importance of a proper choice of detection thresholds. In the evaluations, the ANN-based method proves its general superiority and also its robustness with respect to a non-ideal choice of detection thresholds.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Speaker extraction using LCMV beamformer with DNN-based SPP and RTF identification scheme.\n \n \n \n \n\n\n \n Malek, A.; Chazan, S. E.; Malka, I.; Tourbabin, V.; Goldberger, J.; Tzirkel-Hancock, E.; and Gannot, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2274-2278, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SpeakerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081615,\n  author = {A. Malek and S. E. Chazan and I. Malka and V. Tourbabin and J. Goldberger and E. Tzirkel-Hancock and S. Gannot},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Speaker extraction using LCMV beamformer with DNN-based SPP and RTF identification scheme},\n  year = {2017},\n  pages = {2274-2278},\n  abstract = {The linearly constrained minimum variance (LCMV)-beamformer (BF) is a viable solution for desired source extraction from a mixture of speakers in a noisy environment. The performance in terms of speech distortion, interference cancellation and noise reduction depends on the estimation of a set of parameters. This paper presents a new mechanism to update the parameters of the LCMV-BF. A new speech presence probability (SPP)-based voice activity detector (VAD) controls the noise covariance matrix update, and a speaker position identifier (SPI) procedure controls the relative transfer functions (RTFs) update. A postfilter is then applied to the BF output to further attenuate the residual noise signal. A series of experiments using real-life recordings confirm the speech enhancement capabilities of the proposed algorithm.},\n  keywords = {blind source separation;covariance matrices;probability;speaker recognition;speech enhancement;interference cancellation;noise reduction;LCMV-BF;speech presence probability;SPP;voice activity detector;speaker extraction;LCMV beamformer;DNN;RTF identification scheme;linearly constrained minimum variance-beamformer;speech distortion;covariance matrix;speech enhancement;source extraction;speaker position identifier;Noise measurement;Microphones;Libraries;Speech;Estimation;Artificial neural networks;Speech enhancement},\n  doi = {10.23919/EUSIPCO.2017.8081615},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347174.pdf},\n}\n\n
\n
\n\n\n
\n The linearly constrained minimum variance (LCMV)-beamformer (BF) is a viable solution for desired source extraction from a mixture of speakers in a noisy environment. The performance in terms of speech distortion, interference cancellation and noise reduction depends on the estimation of a set of parameters. This paper presents a new mechanism to update the parameters of the LCMV-BF. A new speech presence probability (SPP)-based voice activity detector (VAD) controls the noise covariance matrix update, and a speaker position identifier (SPI) procedure controls the relative transfer functions (RTFs) update. A postfilter is then applied to the BF output to further attenuate the residual noise signal. A series of experiments using real-life recordings confirm the speech enhancement capabilities of the proposed algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Acoustic scene classification based on generative model of acoustic spatial words for distributed microphone array.\n \n \n \n \n\n\n \n Imoto, K.; and Ono, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2279-2283, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AcousticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081616,\n  author = {K. Imoto and N. Ono},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Acoustic scene classification based on generative model of acoustic spatial words for distributed microphone array},\n  year = {2017},\n  pages = {2279-2283},\n  abstract = {In this paper, we propose an acoustic scene classification method for a distributed microphone array based on a combination of spatial information of multiple sound events. In the proposed method, each acoustic scene is characterized by a spatial information representation based on a bag-of-words called the bag of acoustic spatial words. To calculate the bag-of-acoustic spatial words, spatial features extracted from multichannel observations are quantized and then aggregated over a sound clip, that is, each sound clip is regarded as a unit of a{"}document.{"} Moreover, a supervised generative model relating acoustic scenes and bag-of-acoustic spatial words is also adapted, which enables robust acoustic scene classification. Experimental results using actual environmental sounds show that the proposed approach achieves more effective performance than the conventional acoustic scene classification approach not utilizing a combination of the spatial information of multiple sound events.},\n  keywords = {acoustic signal processing;feature extraction;microphone arrays;signal classification;distributed microphone array;acoustic scene classification method;multiple sound events;spatial information representation;bag-of-acoustic spatial words;spatial features;sound clip;robust acoustic scene classification;spatial features extraction;multichannel observations;Acoustics;Microphone arrays;Adaptation models;Acoustic arrays;Feature extraction;Histograms},\n  doi = {10.23919/EUSIPCO.2017.8081616},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347357.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose an acoustic scene classification method for a distributed microphone array based on a combination of spatial information of multiple sound events. In the proposed method, each acoustic scene is characterized by a spatial information representation based on a bag-of-words called the bag of acoustic spatial words. To calculate the bag-of-acoustic spatial words, spatial features extracted from multichannel observations are quantized and then aggregated over a sound clip, that is, each sound clip is regarded as a unit of a\"document.\" Moreover, a supervised generative model relating acoustic scenes and bag-of-acoustic spatial words is also adapted, which enables robust acoustic scene classification. Experimental results using actual environmental sounds show that the proposed approach achieves more effective performance than the conventional acoustic scene classification approach not utilizing a combination of the spatial information of multiple sound events.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Coordinate-descent adaptation over networks.\n \n \n \n \n\n\n \n Wang, C.; Zhang, Y.; Ying, B.; and Sayed, A. H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2284-2288, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Coordinate-descentPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081617,\n  author = {C. Wang and Y. Zhang and B. Ying and A. H. Sayed},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Coordinate-descent adaptation over networks},\n  year = {2017},\n  pages = {2284-2288},\n  abstract = {This work examines the mean-square error performance of diffusion stochastic algorithms under a generalized coordinate-descent scheme. In this setting, the adaptation step by each agent is limited to a random subset of the coordinates of its stochastic gradient vector. The selection of which coordinates to use varies randomly from iteration to iteration and from agent to agent across the network. Such schemes are useful in reducing computational complexity in power-intensive large data applications. The results show that the steady-state performance of the learning strategy is not affected, while the convergence rate suffers some degradation. The results provide yet another indication of the resilience and robustness of adaptive distributed strategies.},\n  keywords = {computational complexity;convergence of numerical methods;gradient methods;iterative methods;learning (artificial intelligence);mean square error methods;optimisation;stochastic processes;diffusion stochastic algorithms;coordinate-descent scheme;random subset;stochastic gradient vector;iteration;computational complexity;power-intensive large data applications;adaptive distributed strategies;mean-square error performance;learning strategy;coordinate-descent adaptation;convergence;Stochastic processes;Signal processing algorithms;Steady-state;Convergence;Covariance matrices;Europe;Signal processing;Coordinate descent;stochastic partial update;computational complexity;diffusion strategies;stochastic gradient algorithms},\n  doi = {10.23919/EUSIPCO.2017.8081617},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347151.pdf},\n}\n\n
\n
\n\n\n
\n This work examines the mean-square error performance of diffusion stochastic algorithms under a generalized coordinate-descent scheme. In this setting, the adaptation step by each agent is limited to a random subset of the coordinates of its stochastic gradient vector. The selection of which coordinates to use varies randomly from iteration to iteration and from agent to agent across the network. Such schemes are useful in reducing computational complexity in power-intensive large data applications. The results show that the steady-state performance of the learning strategy is not affected, while the convergence rate suffers some degradation. The results provide yet another indication of the resilience and robustness of adaptive distributed strategies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed recursive least squares strategies for adaptive reconstruction of graph signals.\n \n \n \n \n\n\n \n Di Lorenzo, P.; Isufi, E.; Banelli, P.; Barbarossa, S.; and Leus, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2289-2293, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081618,\n  author = {P. {Di Lorenzo} and E. Isufi and P. Banelli and S. Barbarossa and G. Leus},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed recursive least squares strategies for adaptive reconstruction of graph signals},\n  year = {2017},\n  pages = {2289-2293},\n  abstract = {This work proposes distributed recursive least squares (RLS) strategies for adaptive reconstruction and learning of signals defined over graphs. First, we introduce a centralized RLS estimation strategy with probabilistic sampling, and we propose a sparse sensing method that selects the sampling probability at each node in the graph in order to guarantee adaptive signal reconstruction and a target steady-state performance. Then, a distributed RLS strategy is derived and is shown to be convergent to its centralized counterpart. The performed numerical tests show the performance of the proposed adaptive method for distributed learning of graph signals.},\n  keywords = {adaptive signal processing;compressed sensing;convergence;graph theory;least squares approximations;probability;recursive estimation;signal reconstruction;recursive least squares strategies;graph signals;distributed learning;distributed RLS strategy;target steady-state performance;adaptive signal reconstruction;sampling probability;sparse sensing method;probabilistic sampling;centralized RLS estimation strategy;Nickel;Estimation;Signal processing;Europe;Topology;Sensors;Steady-state;Recursive least squares estimation;graph signal processing;sampling;adaptive networks},\n  doi = {10.23919/EUSIPCO.2017.8081618},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347381.pdf},\n}\n\n
\n
\n\n\n
\n This work proposes distributed recursive least squares (RLS) strategies for adaptive reconstruction and learning of signals defined over graphs. First, we introduce a centralized RLS estimation strategy with probabilistic sampling, and we propose a sparse sensing method that selects the sampling probability at each node in the graph in order to guarantee adaptive signal reconstruction and a target steady-state performance. Then, a distributed RLS strategy is derived and is shown to be convergent to its centralized counterpart. The performed numerical tests show the performance of the proposed adaptive method for distributed learning of graph signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed dictionary learning via projections onto convex sets.\n \n \n \n \n\n\n \n Ampeliotis, D.; Mavrokefalidis, C.; and Berberidis, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2294-2298, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081619,\n  author = {D. Ampeliotis and C. Mavrokefalidis and K. Berberidis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed dictionary learning via projections onto convex sets},\n  year = {2017},\n  pages = {2294-2298},\n  abstract = {We study a problem in which the nodes of a network, each with different data, are interested in computing a common dictionary that is suitable for the efficient sparse coding of all their data. To this end, distributed processing is employed, that is, the nodes merge local and neighboring information. We formulate this as a convex feasibility problem, and propose a suitable distributed algorithm for obtaining a solution that employs projections onto convex sets. A fast method for computing the involved projection operations is also given. The proposed approach allows the associated convex sets to be updated at every iteration of the algorithm, thus resulting into a faster agreement of the nodes in a common dictionary. Simulation results are provided that demonstrate the effectiveness of the proposed scheme in computing a common dictionary, in a scenario where the data of the nodes are significantly different and a second scenario, in which the nodes have the same data.},\n  keywords = {convex programming;distributed algorithms;iterative methods;learning (artificial intelligence);projection operations;distributed dictionary learning;sparse coding;convex sets;distributed algorithm;convex feasibility problem;Dictionaries;Machine learning;Signal processing algorithms;Distributed algorithms;Distributed databases;Inference algorithms;Sparse matrices},\n  doi = {10.23919/EUSIPCO.2017.8081619},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347252.pdf},\n}\n\n
\n
\n\n\n
\n We study a problem in which the nodes of a network, each with different data, are interested in computing a common dictionary that is suitable for the efficient sparse coding of all their data. To this end, distributed processing is employed, that is, the nodes merge local and neighboring information. We formulate this as a convex feasibility problem, and propose a suitable distributed algorithm for obtaining a solution that employs projections onto convex sets. A fast method for computing the involved projection operations is also given. The proposed approach allows the associated convex sets to be updated at every iteration of the algorithm, thus resulting into a faster agreement of the nodes in a common dictionary. Simulation results are provided that demonstrate the effectiveness of the proposed scheme in computing a common dictionary, in a scenario where the data of the nodes are significantly different and a second scenario, in which the nodes have the same data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive diffusion-based track assisted multi-object labeling in distributed camera networks.\n \n \n \n \n\n\n \n Teklehaymanot, F. K.; Muma, M.; and Zoubir, A. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2299-2303, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081620,\n  author = {F. K. Teklehaymanot and M. Muma and A. M. Zoubir},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive diffusion-based track assisted multi-object labeling in distributed camera networks},\n  year = {2017},\n  pages = {2299-2303},\n  abstract = {The tracking and labeling of multiple objects in multiple cameras is a fundamental task in applications such as video surveillance, autonomous driving, and sports analysis. In an ad-hoc multi-camera network without a fusion center nodes can benefit from local cooperation to solve signal processing tasks, such as distributed image enhancement. A crucial first step for the successful cooperation of neighboring nodes is to answer the question: Who observes what?. In this paper, an adaptive algorithm is proposed that enables cameras with different view points to assign the same identity to the same object across time frames without assuming the availability of camera calibration information or requiring the registration of camera views. Information which is extracted directly from the videos and is shared in the network via a diffusion algorithm is exploited to jointly solve multi-object tracking and labeling problems in a multi-camera network. A real-data use case of pedestrian labeling is provided, which demonstrates that a high labeling accuracy can be achieved in a multi-object multi-camera setup with low video resolution and frequent object occlusions.},\n  keywords = {cameras;image enhancement;object tracking;video signal processing;wireless sensor networks;distributed camera networks;ad-hoc multicamera network;local cooperation;adaptive diffusion-based track assisted multi-object labeling;frequent object occlusions;low video resolution;multiobject multicamera setup;pedestrian labeling;multiobject tracking;diffusion algorithm;adaptive algorithm;distributed image enhancement;signal processing tasks;Cameras;Feature extraction;Labeling;Signal processing algorithms;Signal processing;Clustering algorithms;Image color analysis},\n  doi = {10.23919/EUSIPCO.2017.8081620},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346656.pdf},\n}\n\n
\n
\n\n\n
\n The tracking and labeling of multiple objects in multiple cameras is a fundamental task in applications such as video surveillance, autonomous driving, and sports analysis. In an ad-hoc multi-camera network without a fusion center nodes can benefit from local cooperation to solve signal processing tasks, such as distributed image enhancement. A crucial first step for the successful cooperation of neighboring nodes is to answer the question: Who observes what?. In this paper, an adaptive algorithm is proposed that enables cameras with different view points to assign the same identity to the same object across time frames without assuming the availability of camera calibration information or requiring the registration of camera views. Information which is extracted directly from the videos and is shared in the network via a diffusion algorithm is exploited to jointly solve multi-object tracking and labeling problems in a multi-camera network. A real-data use case of pedestrian labeling is provided, which demonstrates that a high labeling accuracy can be achieved in a multi-object multi-camera setup with low video resolution and frequent object occlusions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed efficient multimodal data clustering.\n \n \n \n \n\n\n \n Chen, J.; and Schizas, I. D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2304-2308, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081621,\n  author = {J. Chen and I. D. Schizas},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed efficient multimodal data clustering},\n  year = {2017},\n  pages = {2304-2308},\n  abstract = {Clustering of multimodal data according to their information content is considered in this paper. Statistical correlations present in data that contain similar information are exploited to perform the clustering task. Specifically, multiset canonical correlation analysis is equipped with norm-one regularization mechanisms to identify clusters within different types of data that share the same information content. A pertinent minimization formulation is put forth, while block coordinate descent is employed to derive a batch clustering algorithm which achieves better clustering performance than existing alternatives. Distributed implementations are also considered to cluster spatially clustered data utilizing the alternating direction method of multipliers. Relying on subgradient descent, an online clustering approach is derived which substantially lowers computational complexity compared to the batch approaches. Numerical tests demonstrate that the proposed schemes outperform existing alternatives.},\n  keywords = {computational complexity;correlation methods;distributed processing;gradient methods;pattern clustering;statistical analysis;norm-one regularization mechanisms;spatially clustered data clustering;alternating direction method of multipliers;subgradient descent;computational complexity;distributed implementations;clustering performance;batch clustering algorithm;pertinent minimization formulation;multiset canonical correlation analysis;clustering task;statistical correlations;information content;distributed efficient multimodal data clustering;existing alternatives;online clustering approach;Distributed databases;Correlation;Minimization;Europe;Signal processing;Sensors;Indexes},\n  doi = {10.23919/EUSIPCO.2017.8081621},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343311.pdf},\n}\n\n
\n
\n\n\n
\n Clustering of multimodal data according to their information content is considered in this paper. Statistical correlations present in data that contain similar information are exploited to perform the clustering task. Specifically, multiset canonical correlation analysis is equipped with norm-one regularization mechanisms to identify clusters within different types of data that share the same information content. A pertinent minimization formulation is put forth, while block coordinate descent is employed to derive a batch clustering algorithm which achieves better clustering performance than existing alternatives. Distributed implementations are also considered to cluster spatially clustered data utilizing the alternating direction method of multipliers. Relying on subgradient descent, an online clustering approach is derived which substantially lowers computational complexity compared to the batch approaches. Numerical tests demonstrate that the proposed schemes outperform existing alternatives.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Speech enhancement using modulation-domain Kalman filtering with active speech level normalized log-spectrum global priors.\n \n \n \n \n\n\n \n Dionelis, N.; and Brookes, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2309-2313, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SpeechPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081622,\n  author = {N. Dionelis and M. Brookes},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Speech enhancement using modulation-domain Kalman filtering with active speech level normalized log-spectrum global priors},\n  year = {2017},\n  pages = {2309-2313},\n  abstract = {We describe a single-channel speech enhancement algorithm that is based on modulation-domain Kalman filtering that tracks the inter-frame time evolution of the speech logpower spectrum in combination with the long-term average speech log-spectrum. We use offline-trained log-power spectrum global priors incorporated in the Kalman filter prediction and update steps for enhancing noise suppression. In particular, we train and utilize Gaussian mixture model priors for speech in the log-spectral domain that are normalized with respect to the active speech level. The Kalman filter update step uses the log-power spectrum global priors together with the local priors obtained from the Kalman filter prediction step. The logspectrum Kalman filtering algorithm, which uses the theoretical phase factor distribution and improves the modeling of the modulation features, is evaluated in terms of speech quality. Different algorithm configurations, dependent on whether global priors and/or Kalman filter noise tracking are used, are compared in various noise types.},\n  keywords = {Gaussian processes;Kalman filters;modulation;speech enhancement;speech quality;Kalman filter noise tracking;single-channel speech enhancement algorithm;active speech level normalized log-spectrum global priors;modulation-domain Kalman filtering;phase factor distribution;log spectrum Kalman filtering algorithm;speech log power spectrum;Gaussian mixture model;interframe time evolution;Speech;Speech enhancement;Covariance matrices;Noise measurement;Kalman filters;Signal processing algorithms;Predictive models},\n  doi = {10.23919/EUSIPCO.2017.8081622},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340016.pdf},\n}\n\n
\n
\n\n\n
\n We describe a single-channel speech enhancement algorithm that is based on modulation-domain Kalman filtering that tracks the inter-frame time evolution of the speech logpower spectrum in combination with the long-term average speech log-spectrum. We use offline-trained log-power spectrum global priors incorporated in the Kalman filter prediction and update steps for enhancing noise suppression. In particular, we train and utilize Gaussian mixture model priors for speech in the log-spectral domain that are normalized with respect to the active speech level. The Kalman filter update step uses the log-power spectrum global priors together with the local priors obtained from the Kalman filter prediction step. The logspectrum Kalman filtering algorithm, which uses the theoretical phase factor distribution and improves the modeling of the modulation features, is evaluated in terms of speech quality. Different algorithm configurations, dependent on whether global priors and/or Kalman filter noise tracking are used, are compared in various noise types.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multiple far noise suppression in a real environment using transfer-function-gain NMF.\n \n \n \n \n\n\n \n Matsui, Y.; Makino, S.; Ono, N.; and Yamada, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2314-2318, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MultiplePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081623,\n  author = {Y. Matsui and S. Makino and N. Ono and T. Yamada},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multiple far noise suppression in a real environment using transfer-function-gain NMF},\n  year = {2017},\n  pages = {2314-2318},\n  abstract = {Conventional noise suppression methods based on array signal processing use phase information and control the directivity of noises. However, such methods can hardly suppress so-called background noise, whose arrival direction cannot be specified. Thus, multiple far noise suppression based on transfer-function-gain non-negative matrix factorization (NMF) has been proposed as a method that can suppress such background noise. Its effectiveness has been confirmed by an experimental simulation using convolutional mixtures; however, it has not been verified that it is practical in a real environment. Thus, in this paper, we examine the performance of this method by recording a target and multiple far noises with asynchronous microphones in a real environment. We confirm that this method can suppress far noises in a real environment with diverse distances between microphones and interference sources.},\n  keywords = {array signal processing;convolution;matrix decomposition;microphone arrays;signal denoising;transfer functions;transfer-function-gain NMF;background noise suppression;array signal processing;multiple far noise suppression method;noise directivity;noise control;transfer-function-gain nonnegative matrix factorization;NMF;convolutional mixtures;asynchronous microphones;interference sources;Microphones;Noise measurement;Transfer functions;Signal processing;Interference;Time-frequency analysis;Phased arrays},\n  doi = {10.23919/EUSIPCO.2017.8081623},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347700.pdf},\n}\n\n
\n
\n\n\n
\n Conventional noise suppression methods based on array signal processing use phase information and control the directivity of noises. However, such methods can hardly suppress so-called background noise, whose arrival direction cannot be specified. Thus, multiple far noise suppression based on transfer-function-gain non-negative matrix factorization (NMF) has been proposed as a method that can suppress such background noise. Its effectiveness has been confirmed by an experimental simulation using convolutional mixtures; however, it has not been verified that it is practical in a real environment. Thus, in this paper, we examine the performance of this method by recording a target and multiple far noises with asynchronous microphones in a real environment. We confirm that this method can suppress far noises in a real environment with diverse distances between microphones and interference sources.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Antarctic Blue Whale calls detection based on an improved version of the stochastic matched filter.\n \n \n \n \n\n\n \n Bouffaut, L.; Dreo, R.; Labat, V.; Boudraa, A.; and Barruol, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2319-2323, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AntarcticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081624,\n  author = {L. Bouffaut and R. Dreo and V. Labat and A. Boudraa and G. Barruol},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Antarctic Blue Whale calls detection based on an improved version of the stochastic matched filter},\n  year = {2017},\n  pages = {2319-2323},\n  abstract = {As a first step to Antarctic Blue Whale monitoring, a new method based on a passive application of the Stochastic Matched Filter (SMF) is developed. To perform Z-call detection in noisy environment, improvements on the classical SMF requirements are proposed. The signal's reference is adjusted, the background noise estimation is reevaluated to avoid operator's selection, and the time-dependent Signal to Noise Ratio (SNR) estimation is revised by time-frequency analysis. To highlight the SMF's robustness against noise, it is applied on a Ocean Bottom Seismometers hydrophone-recorded data and compared to the classical Matched Filter: the output's SNR is maximized and the false alarm drastically decreased.},\n  keywords = {bioacoustics;biocommunications;hydrophones;matched filters;seismometers;stochastic processes;time-frequency analysis;underwater sound;stochastic matched filter;Antarctic Blue Whale monitoring;passive application;noisy environment;classical SMF requirements;background noise;time-frequency analysis;SMF robustness;noise ratio estimation;classical matched filter;time-dependent signal;Signal to noise ratio;Noise measurement;Estimation;Whales;Antarctica;Time-frequency analysis;Monitoring},\n  doi = {10.23919/EUSIPCO.2017.8081624},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345881.pdf},\n}\n\n
\n
\n\n\n
\n As a first step to Antarctic Blue Whale monitoring, a new method based on a passive application of the Stochastic Matched Filter (SMF) is developed. To perform Z-call detection in noisy environment, improvements on the classical SMF requirements are proposed. The signal's reference is adjusted, the background noise estimation is reevaluated to avoid operator's selection, and the time-dependent Signal to Noise Ratio (SNR) estimation is revised by time-frequency analysis. To highlight the SMF's robustness against noise, it is applied on a Ocean Bottom Seismometers hydrophone-recorded data and compared to the classical Matched Filter: the output's SNR is maximized and the false alarm drastically decreased.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance evaluation of nonlinear speech enhancement based on virtual increase of channels in reverberant environments.\n \n \n \n \n\n\n \n Yamaoka, K.; Makino, S.; Ono, N.; and Yamada, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2324-2328, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081625,\n  author = {K. Yamaoka and S. Makino and N. Ono and T. Yamada},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Performance evaluation of nonlinear speech enhancement based on virtual increase of channels in reverberant environments},\n  year = {2017},\n  pages = {2324-2328},\n  abstract = {In this paper, we evaluate the performance of a maximum signal-to-noise ratio beamformer based on a virtual increase of channels. We previously proposed a new microphone array signal processing technique, which virtually increases the number of microphones by generating extra signal channels from two real microphone signals. This technique generates a virtual observation on the assumption that the sources are W-disjoint orthogonal, which means that only one source is dominant in one time-frequency bin. However, mixed signals with a long reverberation tend to dissatisfy this assumption. In this study, we conducted experiments in a variety of reverberant environments, as well as computer simulation using image method. As a result, we confirmed that our technique contributes improving the performance in reverberant environments. We also confirmed that the longer the reverberation time, the smaller the increase in the improvement using our technique. Moreover, we present directivity patterns to confirm the behavior of a virtual increase of channels.},\n  keywords = {acoustic signal processing;array signal processing;microphone arrays;reverberation;speech enhancement;nonlinear speech enhancement;reverberant environments;maximum signal-to-noise ratio beamformer;microphone array signal processing technique;extra signal channels;microphone signals;reverberation time;Microphones;Reverberation;Signal to noise ratio;Interpolation;Speech enhancement;Microwave integrated circuits},\n  doi = {10.23919/EUSIPCO.2017.8081625},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347780.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we evaluate the performance of a maximum signal-to-noise ratio beamformer based on a virtual increase of channels. We previously proposed a new microphone array signal processing technique, which virtually increases the number of microphones by generating extra signal channels from two real microphone signals. This technique generates a virtual observation on the assumption that the sources are W-disjoint orthogonal, which means that only one source is dominant in one time-frequency bin. However, mixed signals with a long reverberation tend to dissatisfy this assumption. In this study, we conducted experiments in a variety of reverberant environments, as well as computer simulation using image method. As a result, we confirmed that our technique contributes improving the performance in reverberant environments. We also confirmed that the longer the reverberation time, the smaller the increase in the improvement using our technique. Moreover, we present directivity patterns to confirm the behavior of a virtual increase of channels.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Independent vector analysis with frequency range division and prior switching.\n \n \n \n \n\n\n \n Ikeshita, R.; Kawaguchi, Y.; Togami, M.; Fujita, Y.; and Nagamatsu, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2329-2333, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"IndependentPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081626,\n  author = {R. Ikeshita and Y. Kawaguchi and M. Togami and Y. Fujita and K. Nagamatsu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Independent vector analysis with frequency range division and prior switching},\n  year = {2017},\n  pages = {2329-2333},\n  abstract = {A novel source model is developed to improve the separation performance of independent vector analysis (IVA) for speech mixtures. The source model of IVA generally assumes the same amount of statistical dependency on each pair of frequency bins, which is not effective for speech signals with strong correlations among neighboring frequency bins. In the proposed model, the set of all frequency bins is divided into frequency bands, and the statistical dependency is assumed only within each band to better represent speech signals. In addition, each source prior is switched depending on the source states, active or inactive, since intermittent silent periods have totally different priors from those of speech periods. The optimization of the model is based on an EM algorithm, in which the IVA filters, states of sources, and permutation alignments between each pair of bands are jointly optimized. The experimental results show the effectiveness of the proposed model.},\n  keywords = {blind source separation;independent component analysis;optimisation;speech processing;vectors;speech periods;IVA filters;independent vector analysis;frequency range division;source model;separation performance;speech mixtures;statistical dependency;speech signals;frequency bands;source states;intermittent silent periods;prior switching;Speech;Signal processing algorithms;Time-frequency analysis;Frequency conversion;Europe;Signal processing;Noise measurement;Blind source separation;independent vector analysis (IVA);independent component analysis (ICA)},\n  doi = {10.23919/EUSIPCO.2017.8081626},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345796.pdf},\n}\n\n
\n
\n\n\n
\n A novel source model is developed to improve the separation performance of independent vector analysis (IVA) for speech mixtures. The source model of IVA generally assumes the same amount of statistical dependency on each pair of frequency bins, which is not effective for speech signals with strong correlations among neighboring frequency bins. In the proposed model, the set of all frequency bins is divided into frequency bands, and the statistical dependency is assumed only within each band to better represent speech signals. In addition, each source prior is switched depending on the source states, active or inactive, since intermittent silent periods have totally different priors from those of speech periods. The optimization of the model is based on an EM algorithm, in which the IVA filters, states of sources, and permutation alignments between each pair of bands are jointly optimized. The experimental results show the effectiveness of the proposed model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Surgical tool tracking by on-line selection of structural correlation filters.\n \n \n \n \n\n\n \n Wesierski, D.; and Jezierska, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2334-2338, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SurgicalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081627,\n  author = {D. Wesierski and A. Jezierska},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Surgical tool tracking by on-line selection of structural correlation filters},\n  year = {2017},\n  pages = {2334-2338},\n  abstract = {In visual tracking of surgical instruments, correlation filtering finds the best candidate with maximal correlation peak. However, most trackers only consider capturing target appearance but not target structure. In this paper we propose surgical instrument tracking approach that integrates prior knowledge related to rotation of both shaft and tool tips. To this end, we employ rigid parts mixtures model of an instrument. The rigidly composed parts encode diverse, pose-specific appearance mixtures of the tool. Tracking search space is confined to the neighbourhood of tool position, scale, and rotation with respect to previous best estimate such that the rotation constraint translates into querying subset of templates. Qualitative and quantitative evaluation on challenging benchmarks demonstrate state-of-the-art results.},\n  keywords = {image representation;medical computing;medical robotics;object tracking;pose estimation;surgery;surgical tool tracking;on-line selection;structural correlation filters;visual tracking;correlation filtering;surgical instrument tracking approach;pose-specific appearance mixtures;rotation constraint;Shafts;Tools;Correlation;Surgery;Dictionaries;Target tracking;Instruments},\n  doi = {10.23919/EUSIPCO.2017.8081627},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347451.pdf},\n}\n\n
\n
\n\n\n
\n In visual tracking of surgical instruments, correlation filtering finds the best candidate with maximal correlation peak. However, most trackers only consider capturing target appearance but not target structure. In this paper we propose surgical instrument tracking approach that integrates prior knowledge related to rotation of both shaft and tool tips. To this end, we employ rigid parts mixtures model of an instrument. The rigidly composed parts encode diverse, pose-specific appearance mixtures of the tool. Tracking search space is confined to the neighbourhood of tool position, scale, and rotation with respect to previous best estimate such that the rotation constraint translates into querying subset of templates. Qualitative and quantitative evaluation on challenging benchmarks demonstrate state-of-the-art results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MoveHN-A database to support the development of motion based biosignal processing systems.\n \n \n \n \n\n\n \n Kitzig, A.; Schröter, S.; Naroska, E.; Stockmanns, G.; Viga, R.; and Grabmaier, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2339-2343, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MoveHN-APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081628,\n  author = {A. Kitzig and S. Schröter and E. Naroska and G. Stockmanns and R. Viga and A. Grabmaier},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {MoveHN-A database to support the development of motion based biosignal processing systems},\n  year = {2017},\n  pages = {2339-2343},\n  abstract = {In the field of signal processing, pattern recognition and also modeling and simulation, it is often necessary to use large data sets. These allow reliable, independent and test case spanning development of algorithms or even of complete systems. The data is usually taken from existing data sets such as TIMIT for speech recognition and processing or EPILEPSIAE to develop algorithms for epileptic seizure prediction, to give just two examples. Apart from the fact that some of these databases imply a considerable cost factor and thus are not accessible to all research groups, even greater problems arise if no data is available at all. In the field of speech recognition, this problem was solved more or less by creating databases. In the area of biosignal processing with a focus on the functionalization of furniture for care and clinical facilities, there is still a need for large data sets. This was also the case with biomechanical modeling of functionalized furniture, since up to now none or few data on the human movement sequences were available. In order to overcome this deficiency, the following paper presents a new database of motion patterns, which is intended to support the development of algorithms for motion detection as well as modeling biosignal processing. The database can be used and downloaded by any interested researcher for free.},\n  keywords = {database management systems;medical disorders;medical signal processing;speech recognition;motion based biosignal processing systems;pattern recognition;speech recognition;epileptic seizure prediction;motion patterns;MoveHN-A database;TIMIT;EPILEPSIAE;Hidden Markov models;Mathematical model;Biological system modeling;Databases;Medical services;Sensor systems;Biosignal processing;model driven development;biomechanical modelling;motion pattern database;modelling},\n  doi = {10.23919/EUSIPCO.2017.8081628},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342955.pdf},\n}\n\n
\n
\n\n\n
\n In the field of signal processing, pattern recognition and also modeling and simulation, it is often necessary to use large data sets. These allow reliable, independent and test case spanning development of algorithms or even of complete systems. The data is usually taken from existing data sets such as TIMIT for speech recognition and processing or EPILEPSIAE to develop algorithms for epileptic seizure prediction, to give just two examples. Apart from the fact that some of these databases imply a considerable cost factor and thus are not accessible to all research groups, even greater problems arise if no data is available at all. In the field of speech recognition, this problem was solved more or less by creating databases. In the area of biosignal processing with a focus on the functionalization of furniture for care and clinical facilities, there is still a need for large data sets. This was also the case with biomechanical modeling of functionalized furniture, since up to now none or few data on the human movement sequences were available. In order to overcome this deficiency, the following paper presents a new database of motion patterns, which is intended to support the development of algorithms for motion detection as well as modeling biosignal processing. The database can be used and downloaded by any interested researcher for free.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Nonconvulsive epileptic seizures detection using multiway data analysis.\n \n \n \n \n\n\n \n Aldana, Y. R.; Hunyadi, B.; Marañón Reyes, E. J.; Rodríguez, V. R.; and Van Huffel, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2344-2348, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"NonconvulsivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081629,\n  author = {Y. R. Aldana and B. Hunyadi and E. J. {Marañón Reyes} and V. R. Rodríguez and S. {Van Huffel}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Nonconvulsive epileptic seizures detection using multiway data analysis},\n  year = {2017},\n  pages = {2344-2348},\n  abstract = {Nonconvulsive status epilepticus (NCSE) is observed when the patient undergoes a persistent electroencephalographic epileptic episode without physical symptoms. This condition is commonly found in critically ill patients from intensive care units and constitutes a medical emergency. This paper proposes a method to detect nonconvulsive epileptic seizures (NCES). To perform the NCES detection the electroencephalogram (EEG) is represented as a third order tensor with axes frequency χ time χ channels using Wavelet or Hilbert-Huang transform. The signatures obtained from the tensor decomposition are used to train five classifiers to separate between the normal and seizure EEG. Classification is performed in two ways: (1) with each signature of the different modes separately, (2) with all signatures assembled. The algorithm is tested on a database containing 139 nonconvulsive seizures. From all performed analysis, Hilbert-Huang Tensors Space and assembled signatures demonstrate to be the best features to classify between seizure and non-seizure EEG.},\n  keywords = {data analysis;diseases;electroencephalography;medical disorders;medical signal detection;medical signal processing;neurophysiology;signal classification;tensors;wavelet transforms;multiway data analysis;nonconvulsive status epilepticus;persistent electroencephalographic epileptic episode;intensive care units;tensor decomposition;Hilbert-Huang transform;nonconvulsive epileptic seizure detection;seizure EEG classification;Tensile stress;Electroencephalography;Wavelet transforms;Time-frequency analysis;Europe;Signal processing;Support vector machines},\n  doi = {10.23919/EUSIPCO.2017.8081629},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346810.pdf},\n}\n\n
\n
\n\n\n
\n Nonconvulsive status epilepticus (NCSE) is observed when the patient undergoes a persistent electroencephalographic epileptic episode without physical symptoms. This condition is commonly found in critically ill patients from intensive care units and constitutes a medical emergency. This paper proposes a method to detect nonconvulsive epileptic seizures (NCES). To perform the NCES detection the electroencephalogram (EEG) is represented as a third order tensor with axes frequency χ time χ channels using Wavelet or Hilbert-Huang transform. The signatures obtained from the tensor decomposition are used to train five classifiers to separate between the normal and seizure EEG. Classification is performed in two ways: (1) with each signature of the different modes separately, (2) with all signatures assembled. The algorithm is tested on a database containing 139 nonconvulsive seizures. From all performed analysis, Hilbert-Huang Tensors Space and assembled signatures demonstrate to be the best features to classify between seizure and non-seizure EEG.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Prognostic prediction of histopathological images by local binary patterns and RUSBoost.\n \n \n \n \n\n\n \n Urdal, J.; Engan, K.; Kvikstad, V.; and Janssen, E. A. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2349-2353, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PrognosticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081630,\n  author = {J. Urdal and K. Engan and V. Kvikstad and E. A. M. Janssen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Prognostic prediction of histopathological images by local binary patterns and RUSBoost},\n  year = {2017},\n  pages = {2349-2353},\n  abstract = {A high recurrence rate, and progression to higher stages are observed for patients diagnosed with urothelial carcinoma (previously known as transitional cell carcinoma). Low prognostic value of the current grading systems result in extensive follow-up of patients for multiple years after first diagnosis. Although, the aid of computer systems for prognosis prediction of superficial urothelial carcinomas have been proposed, earlier analyses have been focused on using morphological features of cells and attributes describing the patient. In this study, we propose a system to aid in the prediction of prognostic information based on a texture analysis of histopathological images of superficial urothelial carcinoma. The analyses are conducted using the local binary pattern (LBP) and local variance (VAR) operators followed by a RUSBoost classifier. A dataset of 42 patients, consisting of 13 patients without recurrence, 14 with recurrence but not progression and 15 patients with progression are studied. Using a leave-one-out cross-validation, an accuracy of 70% and sensitivity of 84% is achieved.},\n  keywords = {biological organs;biomedical optical imaging;cancer;image classification;image texture;medical image processing;patient diagnosis;pattern classification;tumours;RUSBoost classifier;prognostic prediction;high recurrence rate;transitional cell carcinoma;prognosis prediction;superficial urothelial carcinoma;prognostic information;histopathological image prognostic prediction;local-binary patterns;VAR;LBP;Histograms;Prognostics and health management;Feature extraction;Training;Cancer;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081630},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346499.pdf},\n}\n\n
\n
\n\n\n
\n A high recurrence rate, and progression to higher stages are observed for patients diagnosed with urothelial carcinoma (previously known as transitional cell carcinoma). Low prognostic value of the current grading systems result in extensive follow-up of patients for multiple years after first diagnosis. Although, the aid of computer systems for prognosis prediction of superficial urothelial carcinomas have been proposed, earlier analyses have been focused on using morphological features of cells and attributes describing the patient. In this study, we propose a system to aid in the prediction of prognostic information based on a texture analysis of histopathological images of superficial urothelial carcinoma. The analyses are conducted using the local binary pattern (LBP) and local variance (VAR) operators followed by a RUSBoost classifier. A dataset of 42 patients, consisting of 13 patients without recurrence, 14 with recurrence but not progression and 15 patients with progression are studied. Using a leave-one-out cross-validation, an accuracy of 70% and sensitivity of 84% is achieved.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Differential SART for sub-Nyquist tomographic reconstruction in presence of misalignments.\n \n \n \n \n\n\n \n Roemer, F.; Großmann, M.; Schoen, T.; Gruber, R.; Jung, A.; Oeckl, S.; and Del Galdo, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2354-2358, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DifferentialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081631,\n  author = {F. Roemer and M. Großmann and T. Schoen and R. Gruber and A. Jung and S. Oeckl and G. {Del Galdo}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Differential SART for sub-Nyquist tomographic reconstruction in presence of misalignments},\n  year = {2017},\n  pages = {2354-2358},\n  abstract = {In this paper we study tomographic reconstruction methods in the case that prior knowledge about the object is available. In particular, we consider the case that a reference object that is similar in shape and orientation is available, which is very common in non-destructive testing applications. We demonstrate that a differential version of existing reconstruction methods can easily be derived which reconstructs only the deviation between test and reference object. Since this difference volume is significantly more sparse, the differential reconstruction can be implemented very efficiently. We also discuss the case where knowledge about the misalignment between test and reference object is available, in which case the efficiency of the differential reconstruction can be improved even further. The resulting algorithm is faster, more accurate, and less sensitive to the choice of the step size parameters and regularization than state of the art reconstruction methods.},\n  keywords = {computerised tomography;image reconstruction;nondestructive testing;sub-Nyquist tomographic reconstruction method;differential SART;differential reconstruction;reference object;nondestructive testing applications;Image reconstruction;Computed tomography;X-ray imaging;TV;Casting;Inspection;Europe;Computed Tomography;Iterative Reconstruction;Total Variation;Sparse Signal Recovery},\n  doi = {10.23919/EUSIPCO.2017.8081631},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345401.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we study tomographic reconstruction methods in the case that prior knowledge about the object is available. In particular, we consider the case that a reference object that is similar in shape and orientation is available, which is very common in non-destructive testing applications. We demonstrate that a differential version of existing reconstruction methods can easily be derived which reconstructs only the deviation between test and reference object. Since this difference volume is significantly more sparse, the differential reconstruction can be implemented very efficiently. We also discuss the case where knowledge about the misalignment between test and reference object is available, in which case the efficiency of the differential reconstruction can be improved even further. The resulting algorithm is faster, more accurate, and less sensitive to the choice of the step size parameters and regularization than state of the art reconstruction methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Teaching multi-core DSP implementation on EVM C6678 board.\n \n \n \n \n\n\n \n Kharin, A.; Vityazev, S.; and Vityazev, V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2359-2363, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"TeachingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081632,\n  author = {A. Kharin and S. Vityazev and V. Vityazev},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Teaching multi-core DSP implementation on EVM C6678 board},\n  year = {2017},\n  pages = {2359-2363},\n  abstract = {Teaching implementation of digital signal processing systems plays a very important role in recent technical education. The multi-core digital signal processor (DSP) is a new type of architecture widely used now in the industry. A new course on multi-core DSP programming is considered in this paper. The lab experiments are described. The course has been developed for the TMS320C6678 multi-core DSPs. This paper provides educators with a content that cover theoretical and technical skills that are required by industry.},\n  keywords = {digital signal processing chips;electronic engineering education;multiprocessing systems;signal processing;teaching;TMS320C6678 multicore DSPs;technical skills;EVM C6678 board;teaching implementation;multicore digital signal processor;multicore DSP programming;technical education;multicore DSP implementation teaching;educators;Multicore processing;Digital signal processing;Navigation;Programming;Program processors;multi-core;DSP;signal processing;programming;real-time processing},\n  doi = {10.23919/EUSIPCO.2017.8081632},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343781.pdf},\n}\n\n
\n
\n\n\n
\n Teaching implementation of digital signal processing systems plays a very important role in recent technical education. The multi-core digital signal processor (DSP) is a new type of architecture widely used now in the industry. A new course on multi-core DSP programming is considered in this paper. The lab experiments are described. The course has been developed for the TMS320C6678 multi-core DSPs. This paper provides educators with a content that cover theoretical and technical skills that are required by industry.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Raspberry Pi for image processing education.\n \n \n \n \n\n\n \n Marot, J.; and Bourennane, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2364-2366, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RaspberryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081633,\n  author = {J. Marot and S. Bourennane},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Raspberry Pi for image processing education},\n  year = {2017},\n  pages = {2364-2366},\n  abstract = {This paper firstly describes the development and evaluation of a project course which yields university students building a complete hardware and software chain for a digital image processing application. To arouse the interest and learning initiative of students, we propose them to build a setup including a Raspberry Pi® and image processing programmes. This inexpensive single board computer answers today's issues in energy saving and permits to review fundamental hardware and software principles. Secondly, we propose a low-cost setup for a time-limited practical work: a Raspberry Pi® is shared and controlled remotely by several student pairs: we emphasize the interest of collaborative work, and we provide knowledge and skills about micro-computers to a large number of students simultaneously.},\n  keywords = {computer science education;educational courses;educational institutions;groupware;image processing;microcomputers;Raspberry Pi;image processing education;university students;software chain;digital image processing application;image processing programmes;low-cost setup;single board computer;project course evaluation;hardware chain;collaborative work;microcomputers;Economic indicators;Image processing;Hardware;Universal Serial Bus;Operating systems},\n  doi = {10.23919/EUSIPCO.2017.8081633},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343986.pdf},\n}\n\n
\n
\n\n\n
\n This paper firstly describes the development and evaluation of a project course which yields university students building a complete hardware and software chain for a digital image processing application. To arouse the interest and learning initiative of students, we propose them to build a setup including a Raspberry Pi® and image processing programmes. This inexpensive single board computer answers today's issues in energy saving and permits to review fundamental hardware and software principles. Secondly, we propose a low-cost setup for a time-limited practical work: a Raspberry Pi® is shared and controlled remotely by several student pairs: we emphasize the interest of collaborative work, and we provide knowledge and skills about micro-computers to a large number of students simultaneously.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Real life radio-location examples for enhanced signal processing teaching.\n \n \n \n \n\n\n \n Vincent, F.; Chaumette, E.; and Besson, O.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2369-2372, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RealPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081634,\n  author = {F. Vincent and E. Chaumette and O. Besson},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Real life radio-location examples for enhanced signal processing teaching},\n  year = {2017},\n  pages = {2369-2372},\n  abstract = {For some students, learning signal processing could sometimes be a bit complicated due to the importance of the mathematical background that underlies this field. In this paper, we present two real-life experiments that allows to introduce, in a very natural way, most of the standard tools of signal processing. This teaching, aimed at undergraduate students, is divided in two parts. One exploiting a radar model based experiment, the other designed to process real GPS signals.},\n  keywords = {further education;Global Positioning System;radar signal processing;teaching;GPS signals;real-life experiments;undergraduate students;radar model based experiment;real life radio-location;signal processing teaching;Signal processing;Global Positioning System;Receivers;Spaceborne radar;Satellites;Bandwidth;Education;Project-Based Learning;radar;sonar;ultrasound;Matlab;navigation;GPS},\n  doi = {10.23919/EUSIPCO.2017.8081634},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346969.pdf},\n}\n\n
\n
\n\n\n
\n For some students, learning signal processing could sometimes be a bit complicated due to the importance of the mathematical background that underlies this field. In this paper, we present two real-life experiments that allows to introduce, in a very natural way, most of the standard tools of signal processing. This teaching, aimed at undergraduate students, is divided in two parts. One exploiting a radar model based experiment, the other designed to process real GPS signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Telecom showcase: An exhibition of ole technology useful for students and teachers.\n \n \n \n\n\n \n Grivel, E.; Medina, S.; Krief, F.; Falleri, J.; Ferre, G.; Reveillere, L.; and Negru, D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2373-2377, Aug 2017. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081635,\n  author = {E. Grivel and S. Medina and F. Krief and J. Falleri and G. Ferre and L. Reveillere and D. Negru},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Telecom showcase: An exhibition of ole technology useful for students and teachers},\n  year = {2017},\n  pages = {2373-2377},\n  abstract = {In this article, we share our positive experience about the creation of a Telecom showcase in our engineering school, which is an exhibition of old technology to help students learn about previous habits and think about some of the consequences of rapid innovation. This project was done in collaboration with industrial partners such as Thales and Orange. It includes the following steps: collecting objects, organizing and rendering the objects accessible to students, disseminating the history of the telecommunications industry by using a website and quizzes and helping students see how the telecommunications industry and engineers have contributed to social and cultural evolution. This exhibit is particularly useful for the Minute Telecom, inspired from the Minute Physics, where the students are invited to create a video on theoretical concepts such as Shannon's theorem, mobile communication systems or the impact of innovation on user habits.},\n  keywords = {telecommunication engineering education;telecommunication industry;telecommunications industry;social evolution;cultural evolution;Minute Telecom;Telecom showcase;engineering school;old technology;Shannon's theorem;mobile communication systems;Telecommunications;Education;Technological innovation;History;Industries;Telephone sets;Signal processing;object collection;technology exhibition;video;history of telecommunications},\n  doi = {10.23919/EUSIPCO.2017.8081635},\n  issn = {2076-1465},\n  month = {Aug},\n}\n\n
\n
\n\n\n
\n In this article, we share our positive experience about the creation of a Telecom showcase in our engineering school, which is an exhibition of old technology to help students learn about previous habits and think about some of the consequences of rapid innovation. This project was done in collaboration with industrial partners such as Thales and Orange. It includes the following steps: collecting objects, organizing and rendering the objects accessible to students, disseminating the history of the telecommunications industry by using a website and quizzes and helping students see how the telecommunications industry and engineers have contributed to social and cultural evolution. This exhibit is particularly useful for the Minute Telecom, inspired from the Minute Physics, where the students are invited to create a video on theoretical concepts such as Shannon's theorem, mobile communication systems or the impact of innovation on user habits.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unmixing multitemporal hyperspectral images accounting for smooth and abrupt variations.\n \n \n \n \n\n\n \n Thouvenin, P.; Dobigeon, N.; and Tourneret, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2378-2382, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"UnmixingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081636,\n  author = {P. Thouvenin and N. Dobigeon and J. Tourneret},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Unmixing multitemporal hyperspectral images accounting for smooth and abrupt variations},\n  year = {2017},\n  pages = {2378-2382},\n  abstract = {A classical problem in hyperspectral imaging, referred to as hyperspectral unmixing, consists in estimating spectra associated with each material present in an image and their proportions in each pixel. In practice, illumination variations (e.g., due to declivity or complex interactions with the observed materials) and the possible presence of outliers can result in significant changes in both the shape and the amplitude of the measurements, thus modifying the extracted signatures. In this context, sequences of hyperspectral images are expected to be simultaneously affected by such phenomena when acquired on the same area at different time instants. Thus, we propose a hierarchical Bayesian model to simultaneously account for smooth and abrupt spectral variations affecting a set of multitemporal hyperspectral images to be jointly unmixed. This model assumes that smooth variations can be interpreted as the result of endmember variability, whereas abrupt variations are due to significant changes in the imaged scene (e.g., presence of outliers, additional endmembers, etc.). The parameters of this Bayesian model are estimated using samples generated by a Gibbs sampler according to its posterior. Performance assessment is conducted on synthetic data in comparison with state-of-the-art unmixing methods.},\n  keywords = {Bayes methods;geophysical image processing;geophysical techniques;hyperspectral imaging;object detection;remote sensing;spectral analysis;unmixing multitemporal hyperspectral images;abrupt variations;classical problem;hyperspectral imaging;illumination variations;observed materials;hierarchical Bayesian model;smooth variations;abrupt spectral variations;imaged scene;unmixing methods;Gibbs sampler;synthetic data;Bayes methods;Hyperspectral imaging;Europe;Signal processing;Redundancy;Additives;Gaussian distribution},\n  doi = {10.23919/EUSIPCO.2017.8081636},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341763.pdf},\n}\n\n
\n
\n\n\n
\n A classical problem in hyperspectral imaging, referred to as hyperspectral unmixing, consists in estimating spectra associated with each material present in an image and their proportions in each pixel. In practice, illumination variations (e.g., due to declivity or complex interactions with the observed materials) and the possible presence of outliers can result in significant changes in both the shape and the amplitude of the measurements, thus modifying the extracted signatures. In this context, sequences of hyperspectral images are expected to be simultaneously affected by such phenomena when acquired on the same area at different time instants. Thus, we propose a hierarchical Bayesian model to simultaneously account for smooth and abrupt spectral variations affecting a set of multitemporal hyperspectral images to be jointly unmixed. This model assumes that smooth variations can be interpreted as the result of endmember variability, whereas abrupt variations are due to significant changes in the imaged scene (e.g., presence of outliers, additional endmembers, etc.). The parameters of this Bayesian model are estimated using samples generated by a Gibbs sampler according to its posterior. Performance assessment is conducted on synthetic data in comparison with state-of-the-art unmixing methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Image deblurring using a perturbation-based regularization approach.\n \n \n \n \n\n\n \n Alanazi, A. M.; Ballal, T.; Masood, M.; and Al-Naffouri, T. Y.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2383-2387, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ImagePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081637,\n  author = {A. M. Alanazi and T. Ballal and M. Masood and T. Y. Al-Naffouri},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Image deblurring using a perturbation-based regularization approach},\n  year = {2017},\n  pages = {2383-2387},\n  abstract = {The image restoration problem deals with images in which information has been degraded by blur or noise. In this work, we present a new method for image deblurring by solving a regularized linear least-squares problem. In the proposed method, a synthetic perturbation matrix with a bounded norm is forced into the discrete ill-conditioned model matrix. This perturbation is added to enhance the singular-value structure of the matrix and hence to provide an improved solution. A method is proposed to find a near-optimal value of the regularization parameter for the proposed approach. To reduce the computational complexity, we present a technique based on the bootstrapping method to estimate the regularization parameter for both low and high-resolution images. Experimental results on the image deblurring problem are presented. Comparisons are made with three benchmark methods and the results demonstrate that the proposed method clearly outperforms the other methods in terms of both the output PSNR and SSIM values.},\n  keywords = {computational complexity;image restoration;least squares approximations;matrix algebra;computational complexity reduction;regularized linear least-square problem;image deblurring problem;high-resolution images;bootstrapping method;regularization parameter;singular-value structure;discrete ill-conditioned model matrix;synthetic perturbation matrix;image restoration problem;perturbation-basec regularization approach;Image restoration;Mathematical model;Computational modeling;Computational complexity;Signal processing algorithms;Europe;Signal processing;Bootstrapping;bounded perturbation regularization;image deblurring;linear least-squares problems;Tikhonov regularization},\n  doi = {10.23919/EUSIPCO.2017.8081637},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347484.pdf},\n}\n\n
\n
\n\n\n
\n The image restoration problem deals with images in which information has been degraded by blur or noise. In this work, we present a new method for image deblurring by solving a regularized linear least-squares problem. In the proposed method, a synthetic perturbation matrix with a bounded norm is forced into the discrete ill-conditioned model matrix. This perturbation is added to enhance the singular-value structure of the matrix and hence to provide an improved solution. A method is proposed to find a near-optimal value of the regularization parameter for the proposed approach. To reduce the computational complexity, we present a technique based on the bootstrapping method to estimate the regularization parameter for both low and high-resolution images. Experimental results on the image deblurring problem are presented. Comparisons are made with three benchmark methods and the results demonstrate that the proposed method clearly outperforms the other methods in terms of both the output PSNR and SSIM values.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Design of orthonormal Haar-like features for fast pattern matching.\n \n \n \n \n\n\n \n Ito, I.; and Egiazarian, K.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2388-2392, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DesignPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081638,\n  author = {I. Ito and K. Egiazarian},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Design of orthonormal Haar-like features for fast pattern matching},\n  year = {2017},\n  pages = {2388-2392},\n  abstract = {The goal of pattern matching is to find small parts of an image that are similar to a given template. Matching in transform-domain (such as Haar, Walsh-Hadamard, etc.) is more efficient that matching in the spatial domain. However, it has a limitation: the template size is restricted to be a power of two to apply fast computational algorithms of transforms. In this paper, fast pattern matching method based on orthonormal tree-structured Haar transform (OTSHT) is proposed. It allows to overcome the above-mentioned limitation of the template size. Two types of tree structures are considered in this paper: balanced tree and logarithmic tree. It is demonstrated that the proposed method with the balanced tree structure is computationally more efficient.},\n  keywords = {computational complexity;Haar transforms;image matching;trees (mathematics);Walsh functions;transform-domain;spatial domain;template size;fast pattern matching method;logarithmic tree structures;orthonormal tree-structured Haar transform;OTSHT;Built-in self-test;Pattern matching;Transforms;Strips;Binary trees;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081638},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347122.pdf},\n}\n\n
\n
\n\n\n
\n The goal of pattern matching is to find small parts of an image that are similar to a given template. Matching in transform-domain (such as Haar, Walsh-Hadamard, etc.) is more efficient that matching in the spatial domain. However, it has a limitation: the template size is restricted to be a power of two to apply fast computational algorithms of transforms. In this paper, fast pattern matching method based on orthonormal tree-structured Haar transform (OTSHT) is proposed. It allows to overcome the above-mentioned limitation of the template size. Two types of tree structures are considered in this paper: balanced tree and logarithmic tree. It is demonstrated that the proposed method with the balanced tree structure is computationally more efficient.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Machine learning for automatic classification of volcano-seismic signatures.\n \n \n \n \n\n\n \n Malfante, M.; Mura, M. D.; Mars, J. I.; and Métaxian, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2393-2397, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MachinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081639,\n  author = {M. Malfante and M. D. Mura and J. I. Mars and J. Métaxian},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Machine learning for automatic classification of volcano-seismic signatures},\n  year = {2017},\n  pages = {2393-2397},\n  abstract = {The evaluation and prediction of volcanoes activities and associated risks is still a timely and open issue. The amount of volcano-seismic data acquired by recent monitoring stations is huge (e.g., several years of continuous recordings), thereby making machine learning absolutely necessary for their automatic analysis. The transient nature of the volcano-seismic signatures of interest further enforces the need of automatic detection and classification of such events. In this paper, we present a novel architecture for automatic classification of volcano-seismic events based on a comprehensive signal representation with a large feature set. To the best of our knowledge this is one of the first attempts to automatize the classification task of these signals. The proposed approach relies on supervised machine learning techniques to build a prediction model.},\n  keywords = {geophysical signal processing;learning (artificial intelligence);seismology;signal classification;signal representation;volcanology;volcano-seismic events;classification task;supervised machine;machine learning;automatic classification;volcano-seismic signatures;volcano-seismic data;volcano activity;signal representation;Feature extraction;Predictive models;Support vector machines;Monitoring;Speech;Time-frequency analysis},\n  doi = {10.23919/EUSIPCO.2017.8081639},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347114.pdf},\n}\n\n
\n
\n\n\n
\n The evaluation and prediction of volcanoes activities and associated risks is still a timely and open issue. The amount of volcano-seismic data acquired by recent monitoring stations is huge (e.g., several years of continuous recordings), thereby making machine learning absolutely necessary for their automatic analysis. The transient nature of the volcano-seismic signatures of interest further enforces the need of automatic detection and classification of such events. In this paper, we present a novel architecture for automatic classification of volcano-seismic events based on a comprehensive signal representation with a large feature set. To the best of our knowledge this is one of the first attempts to automatize the classification task of these signals. The proposed approach relies on supervised machine learning techniques to build a prediction model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Skill robot library: Intelligent path planning framework for object manipulation.\n \n \n \n \n\n\n \n Kyrarini, M.; Naeem, S.; Wang, X.; and Gräser, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2398-2402, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SkillPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081640,\n  author = {M. Kyrarini and S. Naeem and X. Wang and A. Gräser},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Skill robot library: Intelligent path planning framework for object manipulation},\n  year = {2017},\n  pages = {2398-2402},\n  abstract = {Commonly used path planning techniques for object manipulation are computationally expensive and time-consuming. In this paper, a novel framework called Skill Robot Library (SRL), which has competence to store only the keypoints of a path rather than complete, is presented. The path can be computed with path planner or taught by a human using kinesthetic teaching. Additionally, when the environment is static and only the requested new start and goal positions are changed with respect to the start and goal positions of the stored path, the SRL can retrieve and modify the stored path. The SRL forwards the final path to the robot for reproduction. Experimental results achieved with a six degrees of freedom robotic arm are presented together with performance evaluation of the SRL and the path planner is demonstrated via a series of experiments.},\n  keywords = {manipulators;path planning;robot programming;freedom robotic arm;path planner;intelligent path planning framework;object manipulation;skill robot library;SRL;Libraries;Manipulators;Databases;Signal processing algorithms;Path planning;Collision avoidance;object manipulation;intelligent path planning;robot skill framework},\n  doi = {10.23919/EUSIPCO.2017.8081640},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347294.pdf},\n}\n\n
\n
\n\n\n
\n Commonly used path planning techniques for object manipulation are computationally expensive and time-consuming. In this paper, a novel framework called Skill Robot Library (SRL), which has competence to store only the keypoints of a path rather than complete, is presented. The path can be computed with path planner or taught by a human using kinesthetic teaching. Additionally, when the environment is static and only the requested new start and goal positions are changed with respect to the start and goal positions of the stored path, the SRL can retrieve and modify the stored path. The SRL forwards the final path to the robot for reproduction. Experimental results achieved with a six degrees of freedom robotic arm are presented together with performance evaluation of the SRL and the path planner is demonstrated via a series of experiments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Near field targets localization using bistatic MIMO system with symmetric arrays.\n \n \n \n \n\n\n \n Singh, P. R.; Wang, Y.; and Chargé, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2403-2407, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"NearPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081641,\n  author = {P. R. Singh and Y. Wang and P. Chargé},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Near field targets localization using bistatic MIMO system with symmetric arrays},\n  year = {2017},\n  pages = {2403-2407},\n  abstract = {In this paper, we propose a subspace based method to localize multiple targets in the near field region of a bistatic MIMO system with symmetric uniform linear arrays (ULAs). The proposed method uses the symmetry in the transmitting and receiving arrays to estimate the angle of departure (AOD) and angle of arrival (AOA) of each target by using 1D rank reduction estimator (RARE) based method. For each estimated AOA, the range from the center of the transmitting array to the corresponding target is estimated by using 1D multiple signal classification (MUSIC). Finally, the receiver side range of each target is estimated by using the other three estimated location parameters in 2D MUSIC technique which also automatically pairs the location parameters.},\n  keywords = {array signal processing;direction-of-arrival estimation;MIMO radar;radar receivers;radar target recognition;radar transmitters;signal classification;bistatic MIMO system;multiple targets;symmetric uniform linear arrays;1D rank reduction estimator based method;transmitting array;1D multiple signal classification;angle of arrival;near field targets localization;receiving array;angle of departure;2D MUSIC technique;MIMO;Transmitting antennas;Europe;Signal processing;Multiple signal classification;Electronic mail;Two dimensional displays},\n  doi = {10.23919/EUSIPCO.2017.8081641},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341775.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a subspace based method to localize multiple targets in the near field region of a bistatic MIMO system with symmetric uniform linear arrays (ULAs). The proposed method uses the symmetry in the transmitting and receiving arrays to estimate the angle of departure (AOD) and angle of arrival (AOA) of each target by using 1D rank reduction estimator (RARE) based method. For each estimated AOA, the range from the center of the transmitting array to the corresponding target is estimated by using 1D multiple signal classification (MUSIC). Finally, the receiver side range of each target is estimated by using the other three estimated location parameters in 2D MUSIC technique which also automatically pairs the location parameters.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Near field targets localization using bistatic MIMO system with spherical wavefront based model.\n \n \n \n \n\n\n \n Singh, P. R.; Wang, Y.; and Charge, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2408-2412, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"NearPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081642,\n  author = {P. R. Singh and Y. Wang and P. Charge},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Near field targets localization using bistatic MIMO system with spherical wavefront based model},\n  year = {2017},\n  pages = {2408-2412},\n  abstract = {This paper proposes a subspace based near field targets localization method with bistatic MIMO system consisting of uniform linear transmitting and receiving arrays. The proposed method uses the spherical wavefront based exact model to avoid the systematic error introduced by the Fresnel approximation, which is usually made on the wavefront to simplify the signal model for a near field source in the existing literature on near field sources localization. By avoiding this approximation, we have significantly improved the estimation accuracy. Additionally, unlike most of the existing near field sources localization techniques, the proposed method works for the array with interelement spacing greater than a quarter of the carrier wavelength.},\n  keywords = {antenna arrays;array signal processing;linear antenna arrays;MIMO radar;object detection;radar antennas;radar detection;receiving antennas;transmitting antennas;bistatic MIMO system;receiving arrays;Fresnel approximation;near field target localization method;spherical wavefront based model;uniform linear transmitting arrays;field source localization;near field source localization;carrier wavelength;MIMO;Transmitting antennas;Antenna arrays;Receiving antennas;Antenna measurements;Covariance matrices;Eigenvalues and eigenfunctions},\n  doi = {10.23919/EUSIPCO.2017.8081642},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570338054.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a subspace based near field targets localization method with bistatic MIMO system consisting of uniform linear transmitting and receiving arrays. The proposed method uses the spherical wavefront based exact model to avoid the systematic error introduced by the Fresnel approximation, which is usually made on the wavefront to simplify the signal model for a near field source in the existing literature on near field sources localization. By avoiding this approximation, we have significantly improved the estimation accuracy. Additionally, unlike most of the existing near field sources localization techniques, the proposed method works for the array with interelement spacing greater than a quarter of the carrier wavelength.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sub-antenna sparse processing for coherence loss in underwater source localization.\n \n \n \n \n\n\n \n Lefort, R.; and Drémeau, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2413-2417, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Sub-antennaPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081643,\n  author = {R. Lefort and A. Drémeau},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Sub-antenna sparse processing for coherence loss in underwater source localization},\n  year = {2017},\n  pages = {2413-2417},\n  abstract = {In underwater acoustics, inversion techniques, also called {"}Matched Field{"} processing, remain the best methods for locating acoustic sources. But this is without counting on the environmental parameter fluctuations that lead to injurious coherence loss. We have shown in a previous work that a sub-antenna approach could be well suited to deal with such coherence loss, however at the expense of a lower resolution. In this paper, we propose to solve this drawback by considering sparse priors. In the experimental part of this paper, by using numerical simulations of plane waves subject to coherence loss, we demonstrate that the proposed method not only outperforms a classical beamformer in terms of source localization performance, but also, that it improves the antenna resolution.},\n  keywords = {acoustic signal processing;array signal processing;underwater acoustic communication;sparse priors;antenna resolution;underwater source localization;underwater acoustics;inversion techniques;Matched Field processing;environmental parameter fluctuations;injurious coherence loss;subantenna sparse processing;acoustic sources localization;Coherence;Matching pursuit algorithms;Mathematical model;Antennas;Sensor arrays;Acoustics},\n  doi = {10.23919/EUSIPCO.2017.8081643},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342498.pdf},\n}\n\n
\n
\n\n\n
\n In underwater acoustics, inversion techniques, also called \"Matched Field\" processing, remain the best methods for locating acoustic sources. But this is without counting on the environmental parameter fluctuations that lead to injurious coherence loss. We have shown in a previous work that a sub-antenna approach could be well suited to deal with such coherence loss, however at the expense of a lower resolution. In this paper, we propose to solve this drawback by considering sparse priors. In the experimental part of this paper, by using numerical simulations of plane waves subject to coherence loss, we demonstrate that the proposed method not only outperforms a classical beamformer in terms of source localization performance, but also, that it improves the antenna resolution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Precision analysis of the imaging pipeline in the square kilometre array.\n \n \n \n \n\n\n \n Griffin, A.; Pradel, N.; Radford, B.; Wilson, D. I.; and Ensor, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2418-2422, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PrecisionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081644,\n  author = {A. Griffin and N. Pradel and B. Radford and D. I. Wilson and A. Ensor},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Precision analysis of the imaging pipeline in the square kilometre array},\n  year = {2017},\n  pages = {2418-2422},\n  abstract = {In this paper we present our end-to-end model of the imaging pipeline in the Square Kilometre Array. Our Sky Generator models the signals that are received by the Central Signal Processor (CSP), our CSP Correlator model then processes those signals to generate visibilities to pass to the Science Data Processor (SDP). Our SDP Imaging model then grids the visibilities and inverse Fourier transforms them to produce a dirty image of the sky. Our modelling allows us to investigate the error that is introduced due to reduced numerical precision, and we then propose techniques to mitigate this error, and thus reduce the required amount of computational hardware.},\n  keywords = {Fourier transforms;image processing;inverse transforms;radiotelescopes;imaging pipeline;square kilometre array;end-to-end model;Sky Generator models;Central Signal Processor;CSP Correlator model;Science Data Processor;SDP Imaging model;Mathematical model;Delays;Antenna arrays;Correlators;Imaging;Computational modeling;Radio astronomy;Square Kilometre Array;numerical precision;signal processing;modelling},\n  doi = {10.23919/EUSIPCO.2017.8081644},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347792.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we present our end-to-end model of the imaging pipeline in the Square Kilometre Array. Our Sky Generator models the signals that are received by the Central Signal Processor (CSP), our CSP Correlator model then processes those signals to generate visibilities to pass to the Science Data Processor (SDP). Our SDP Imaging model then grids the visibilities and inverse Fourier transforms them to produce a dirty image of the sky. Our modelling allows us to investigate the error that is introduced due to reduced numerical precision, and we then propose techniques to mitigate this error, and thus reduce the required amount of computational hardware.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint sensor placement and power rating selection in energy harvesting wireless sensor networks.\n \n \n \n \n\n\n \n Bushnaq, O. M.; Al-Naffouri, T. Y.; Chepuri, S. P.; and Leus, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2423-2427, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081645,\n  author = {O. M. Bushnaq and T. Y. Al-Naffouri and S. P. Chepuri and G. Leus},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Joint sensor placement and power rating selection in energy harvesting wireless sensor networks},\n  year = {2017},\n  pages = {2423-2427},\n  abstract = {In this paper, the focus is on optimal sensor placement and power rating selection for parameter estimation in wireless sensor networks (WSNs). We take into account the amount of energy harvested by the sensing nodes, communication link quality, and the observation accuracy at the sensor level. In particular, the aim is to reconstruct the estimation parameter with minimum error at a fusion center under a system budget constraint. To achieve this goal, a subset of sensing locations is selected from a large pool of candidate sensing locations. Furthermore, the type of sensor to be placed at those locations is selected from a given set of sensor types (e.g., sensors with different power ratings). We further investigate whether it is better to install a large number of cheap sensors, a few expensive sensors or a combination of different sensor types at the optimal locations.},\n  keywords = {energy harvesting;parameter estimation;sensor placement;wireless sensor networks;joint sensor placement;power rating selection;energy harvesting wireless sensor networks;optimal sensor placement;parameter estimation;sensing nodes;communication link quality;sensor level;system budget constraint;optimal locations;Sensors;Batteries;Optimization;Covariance matrices;Europe;Signal processing;Wireless sensor networks;Wireless sensor networks;sensor selection;convex optimization;energy harvesting;estimation},\n  doi = {10.23919/EUSIPCO.2017.8081645},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347207.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, the focus is on optimal sensor placement and power rating selection for parameter estimation in wireless sensor networks (WSNs). We take into account the amount of energy harvested by the sensing nodes, communication link quality, and the observation accuracy at the sensor level. In particular, the aim is to reconstruct the estimation parameter with minimum error at a fusion center under a system budget constraint. To achieve this goal, a subset of sensing locations is selected from a large pool of candidate sensing locations. Furthermore, the type of sensor to be placed at those locations is selected from a given set of sensor types (e.g., sensors with different power ratings). We further investigate whether it is better to install a large number of cheap sensors, a few expensive sensors or a combination of different sensor types at the optimal locations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Separation of vibration-derived sound signals based on fusion processing of vibration sensors and microphones.\n \n \n \n \n\n\n \n Takashima, R.; Kawaguchi, Y.; and Togami, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2428-2432, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SeparationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081646,\n  author = {R. Takashima and Y. Kawaguchi and M. Togami},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Separation of vibration-derived sound signals based on fusion processing of vibration sensors and microphones},\n  year = {2017},\n  pages = {2428-2432},\n  abstract = {This paper proposes a sound source separation method for vibration-derived sound signals such as sounds derived from mechanical vibrations by using vibration sensors. The proposed method is based on two assumptions. First, a vibration signal and the sound derived from the vibration are assumed to have a linear correlation. This assumption enables us to model the vibration-derived sound as a linear convolution of a transfer function and a vibration signal recorded by a vibration sensor. Second, un-vibration-derived sound signals such that the sound source is not connected to vibration sensors via a solid medium are barely recorded by vibration sensors. This assumption leads to a constraint of the transfer function from the un-vibration-derived sound sources to the vibration sensors. The proposed framework is the same as a microphone-array-based blind source separation framework, except that the proposed method constructs arrays with microphones and vibration sensors, and the separation parameters are constrained by the prior knowledge gained from the above second assumption. Experimental results indicate that the separation performance of the proposed method is superior to that of a conventional microphone-array-based source separation method.},\n  keywords = {blind source separation;microphones;sensor fusion;vibrations;vibration sensor;sound signals;sound source separation method;mechanical vibrations;vibration signal;Vibrations;Sensors;Microphones;Correlation;Transfer functions;Solids;Source separation;blind source separation;vibration-derived sound;vibration sensor;microphone;local Gaussian model},\n  doi = {10.23919/EUSIPCO.2017.8081646},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346812.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a sound source separation method for vibration-derived sound signals such as sounds derived from mechanical vibrations by using vibration sensors. The proposed method is based on two assumptions. First, a vibration signal and the sound derived from the vibration are assumed to have a linear correlation. This assumption enables us to model the vibration-derived sound as a linear convolution of a transfer function and a vibration signal recorded by a vibration sensor. Second, un-vibration-derived sound signals such that the sound source is not connected to vibration sensors via a solid medium are barely recorded by vibration sensors. This assumption leads to a constraint of the transfer function from the un-vibration-derived sound sources to the vibration sensors. The proposed framework is the same as a microphone-array-based blind source separation framework, except that the proposed method constructs arrays with microphones and vibration sensors, and the separation parameters are constrained by the prior knowledge gained from the above second assumption. Experimental results indicate that the separation performance of the proposed method is superior to that of a conventional microphone-array-based source separation method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance improvement for wideband beamforming with white noise reduction based on sparse arrays.\n \n \n \n \n\n\n \n Anbiyaei, M. R.; Liu, W.; and McLernon, D. C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2433-2437, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081647,\n  author = {M. R. Anbiyaei and W. Liu and D. C. McLernon},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Performance improvement for wideband beamforming with white noise reduction based on sparse arrays},\n  year = {2017},\n  pages = {2433-2437},\n  abstract = {A method is proposed for reducing the effect of white noise in wideband sparse arrays via a combination of a judiciously designed transformation followed by highpass filters. The reduced noise level leads to a higher signal to noise ratio for the system, which can have a significant effect on the performance of various beamforming methods. As a representative example, the reference signal based (RSB) and the Linearly Constrained Minimum Variance (LCMV) beamformers are employed here to demonstrate the improved beamforming performance, as confirmed by simulation results.},\n  keywords = {array signal processing;signal denoising;white noise;highpass filters;reduced noise level;Linearly Constrained Minimum Variance beamformers;wideband beamforming;white noise reduction;wideband sparse arrays;reference signal based beamformers;Sensor arrays;Array signal processing;Sparse matrices;White noise;Wideband;Signal to noise ratio},\n  doi = {10.23919/EUSIPCO.2017.8081647},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346517.pdf},\n}\n\n
\n
\n\n\n
\n A method is proposed for reducing the effect of white noise in wideband sparse arrays via a combination of a judiciously designed transformation followed by highpass filters. The reduced noise level leads to a higher signal to noise ratio for the system, which can have a significant effect on the performance of various beamforming methods. As a representative example, the reference signal based (RSB) and the Linearly Constrained Minimum Variance (LCMV) beamformers are employed here to demonstrate the improved beamforming performance, as confirmed by simulation results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Blind spatial sound source clustering and activity detection using uncalibrated microphone array.\n \n \n \n \n\n\n \n Nakamura, K.; and Mizumoto, T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2438-2442, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"BlindPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081648,\n  author = {K. Nakamura and T. Mizumoto},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Blind spatial sound source clustering and activity detection using uncalibrated microphone array},\n  year = {2017},\n  pages = {2438-2442},\n  abstract = {This paper presents a method for estimating the number, as well as the activity periods of spatially distributed sound sources using an uncalibrated microphone array. This methodology is applied for the purposes of speaker diarization. In general, speaker diarization has difficulty with: 1) estimating the number of sound sources (speakers), and 2) activity detection of multiple sound sources including overlap of utterances. Several microphone array based techniques have already tackled these challenges. However, existing methods mainly assume that the steering vectors for the microphone array are calibrated in advance to identify sound sources, which is difficult to satisfy when ad-hoc or flexible microphone arrays are used. Thus our approach estimates the number of sound sources blindly in two steps. First, Time Delay of Arrival (TDOA) of the observed signal is clustered. Second, the sound source activity is detected by clustering the long-term spatial spectrum using the TDOA based steering vector for each cluster. The validity of the algorithm is confirmed by both synthesized signals and a real-world flexible microphone array application.},\n  keywords = {array signal processing;blind source separation;microphone arrays;pattern clustering;speaker recognition;activity detection;uncalibrated microphone array;spatially distributed sound sources;speaker diarization;microphone array based techniques;flexible microphone arrays;sound source activity;TDOA based steering vector;real-world flexible microphone array application;blind spatial sound source clustering;Microphone arrays;Estimation;Robots;Histograms;Reverberation;Robustness},\n  doi = {10.23919/EUSIPCO.2017.8081648},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345746.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a method for estimating the number, as well as the activity periods of spatially distributed sound sources using an uncalibrated microphone array. This methodology is applied for the purposes of speaker diarization. In general, speaker diarization has difficulty with: 1) estimating the number of sound sources (speakers), and 2) activity detection of multiple sound sources including overlap of utterances. Several microphone array based techniques have already tackled these challenges. However, existing methods mainly assume that the steering vectors for the microphone array are calibrated in advance to identify sound sources, which is difficult to satisfy when ad-hoc or flexible microphone arrays are used. Thus our approach estimates the number of sound sources blindly in two steps. First, Time Delay of Arrival (TDOA) of the observed signal is clustered. Second, the sound source activity is detected by clustering the long-term spatial spectrum using the TDOA based steering vector for each cluster. The validity of the algorithm is confirmed by both synthesized signals and a real-world flexible microphone array application.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Space-time CUSUM for distributed quickest detection using randomly spaced sensors along a path.\n \n \n \n \n\n\n \n Egea-Roca, D.; Seco-Granados, G.; López-Salcedo, J. A.; and Kim, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2443-2447, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Space-timePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081649,\n  author = {D. Egea-Roca and G. Seco-Granados and J. A. López-Salcedo and S. Kim},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Space-time CUSUM for distributed quickest detection using randomly spaced sensors along a path},\n  year = {2017},\n  pages = {2443-2447},\n  abstract = {This work investigates the distributed quickest detection problem, where a set of sensors receive independent observations and send messages to a fusion center, which makes a final decision. We are interested in detecting an event as soon as possible even though the set of affected sensors is unknown. We consider a scenario where the sensors are randomly spaced along a path, and then the affected sensors are assumed to be consecutive. Based on the assumption that the affected sensors are consecutive, we propose a solution based on the detection of a transient change in the spatial domain (i.e. from different sensors). This is done by applying a double CUSUM to detect both the appearance and disappearance of the change in the space samples. Numerical results are presented showing the superior performance of our proposed solution, for different scenarios, with respect to other methods in the literature.},\n  keywords = {sensor fusion;signal detection;space-time CUSUM;randomly spaced sensors;distributed quickest detection problem;fusion center;Sensor fusion;Transient analysis;Time measurement;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081649},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345501.pdf},\n}\n\n
\n
\n\n\n
\n This work investigates the distributed quickest detection problem, where a set of sensors receive independent observations and send messages to a fusion center, which makes a final decision. We are interested in detecting an event as soon as possible even though the set of affected sensors is unknown. We consider a scenario where the sensors are randomly spaced along a path, and then the affected sensors are assumed to be consecutive. Based on the assumption that the affected sensors are consecutive, we propose a solution based on the detection of a transient change in the spatial domain (i.e. from different sensors). This is done by applying a double CUSUM to detect both the appearance and disappearance of the change in the space samples. Numerical results are presented showing the superior performance of our proposed solution, for different scenarios, with respect to other methods in the literature.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Enhancing polynomial MUSIC algorithm for coherent broadband sources through spatial smoothing.\n \n \n \n \n\n\n \n Coventry, W.; Clemente, C.; and Soraghan, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2448-2452, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"EnhancingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081650,\n  author = {W. Coventry and C. Clemente and J. Soraghan},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Enhancing polynomial MUSIC algorithm for coherent broadband sources through spatial smoothing},\n  year = {2017},\n  pages = {2448-2452},\n  abstract = {Direction of arrival algorithms which exploit the eigenstructure of the spatial covariance matrix (such as MUSIC) encounter difficulties in the presence of strongly correlated sources. Since the broadband polynomial MUSIC is an extension of the narrowband version, it is unsurprising that the same issues arise. In this paper, we extend the spatial smoothing technique to broadband scenarios via spatially averaging polynomial spacetime covariance matrices. This is shown to restore the rank of the polynomial source covariance matrix. In the application of the polynomial MUSIC algorithm, the spatially smoothed spacetime covariance matrix greatly enhances the direction of arrival estimate in the presence of strongly correlated sources. Simulation results are described shows the performance improvement gained using the new approach compared to the conventional non-smoothed method.},\n  keywords = {covariance matrices;direction-of-arrival estimation;eigenvalues and eigenfunctions;polynomials;signal classification;smoothing methods;enhancing polynomial MUSIC algorithm;coherent broadband sources;spatial covariance matrix;strongly correlated sources;broadband polynomial MUSIC;narrowband version;spatial smoothing technique;spatially averaging polynomial spacetime covariance matrices;polynomial source covariance matrix;direction of arrival algorithms;direction of arrival estimate;spatially smoothed spacetime covariance matrix;Covariance matrices;Signal processing algorithms;Multiple signal classification;Smoothing methods;Narrowband;Broadband communication;Direction-of-arrival estimation},\n  doi = {10.23919/EUSIPCO.2017.8081650},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347038.pdf},\n}\n\n
\n
\n\n\n
\n Direction of arrival algorithms which exploit the eigenstructure of the spatial covariance matrix (such as MUSIC) encounter difficulties in the presence of strongly correlated sources. Since the broadband polynomial MUSIC is an extension of the narrowband version, it is unsurprising that the same issues arise. In this paper, we extend the spatial smoothing technique to broadband scenarios via spatially averaging polynomial spacetime covariance matrices. This is shown to restore the rank of the polynomial source covariance matrix. In the application of the polynomial MUSIC algorithm, the spatially smoothed spacetime covariance matrix greatly enhances the direction of arrival estimate in the presence of strongly correlated sources. Simulation results are described shows the performance improvement gained using the new approach compared to the conventional non-smoothed method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A low-complexity equalizer for massive MIMO systems based on array separability.\n \n \n \n \n\n\n \n Ribeiro, L. N.; Schwarz, S.; Rupp, M.; de Almeida , A. L. F.; and Mota, J. C. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2453-2457, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081651,\n  author = {L. N. Ribeiro and S. Schwarz and M. Rupp and A. L. F. {de Almeida} and J. C. M. Mota},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A low-complexity equalizer for massive MIMO systems based on array separability},\n  year = {2017},\n  pages = {2453-2457},\n  abstract = {In this paper, we propose a low-complexity equalizer for multiple-input multiple-output systems with large receive antenna arrays. The computational complexity reduction is achieved by exploiting array separability on a geometric channel model. This property suggests a two-stage receive processing, consisting of (i) sub-array beamforming and (ii) low-dimension minimum mean square error (MMSE) equalization. Simulations indicate that the proposed method outperforms the classical MMSE filter in terms of complexity provided that the number of channel scatterers and the sub-arrays dimensions are not excessively large.},\n  keywords = {antenna arrays;array signal processing;computational complexity;least mean squares methods;low-complexity equalizer;massive MIMO systems;array separability;antenna arrays;computational complexity reduction;geometric channel model;sub-array beamforming;square error equalization;sub-arrays dimensions;low-dimension minimum mean square error equalization;Tensile stress;Array signal processing;MIMO;Transmission line matrix methods;Equalizers;Receivers;Mathematical model},\n  doi = {10.23919/EUSIPCO.2017.8081651},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346687.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a low-complexity equalizer for multiple-input multiple-output systems with large receive antenna arrays. The computational complexity reduction is achieved by exploiting array separability on a geometric channel model. This property suggests a two-stage receive processing, consisting of (i) sub-array beamforming and (ii) low-dimension minimum mean square error (MMSE) equalization. Simulations indicate that the proposed method outperforms the classical MMSE filter in terms of complexity provided that the number of channel scatterers and the sub-arrays dimensions are not excessively large.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Design of pre-coding and combining in hybrid analog-digital massive MIMO with phase noise.\n \n \n \n \n\n\n \n Corvaja, R.; Armada, A. G.; Vázquez, M. Á.; and Párez-Neira, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2458-2462, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DesignPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081652,\n  author = {R. Corvaja and A. G. Armada and M. Á. Vázquez and A. Párez-Neira},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Design of pre-coding and combining in hybrid analog-digital massive MIMO with phase noise},\n  year = {2017},\n  pages = {2458-2462},\n  abstract = {The design of massive MIMO, especially at millimeter waves, requires a trade-off between cost and power consumption, balancing the complexity and the performance in terms of achievable rate. A recent trend in the design is to split the pre-coding at the transmitter and the combining at the receiver into a digital and analog part, with hybrid analog-digital schemes. In this paper the effect of phase noise is considered in the design of different hybrid analog-digital alternatives to implement massive MIMO, in particular at very high frequencies. Its effect depending on the number of RF chains, oscillators, and groups of antennas is analyzed providing some insights for the system design. In order to limit the penalty introduced by the phase noise to values below 6 dB, with a number of antennas around 64, the value of phase noise increment variance should be limited below 0.005. This limit is slightly lower in a simplified architecture with more blocks driven by independent oscillators.},\n  keywords = {analogue-digital conversion;MIMO communication;phase noise;precoding;power consumption;hybrid analog-digital alternatives;hybrid analog-digital massive MIMO;pre-coding;phase noise increment variance;system design;hybrid analog-digital schemes;achievable rate;noise figure 6.0 dB;Radio frequency;Receivers;Phase noise;Transmitting antennas;MIMO;Massive MIMO;hybrid pre-coding and combining MIMO;phase noise},\n  doi = {10.23919/EUSIPCO.2017.8081652},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341780.pdf},\n}\n\n
\n
\n\n\n
\n The design of massive MIMO, especially at millimeter waves, requires a trade-off between cost and power consumption, balancing the complexity and the performance in terms of achievable rate. A recent trend in the design is to split the pre-coding at the transmitter and the combining at the receiver into a digital and analog part, with hybrid analog-digital schemes. In this paper the effect of phase noise is considered in the design of different hybrid analog-digital alternatives to implement massive MIMO, in particular at very high frequencies. Its effect depending on the number of RF chains, oscillators, and groups of antennas is analyzed providing some insights for the system design. In order to limit the penalty introduced by the phase noise to values below 6 dB, with a number of antennas around 64, the value of phase noise increment variance should be limited below 0.005. This limit is slightly lower in a simplified architecture with more blocks driven by independent oscillators.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed approach for deblurring large images with shift-variant blur.\n \n \n \n \n\n\n \n Mourya, R.; Ferrari, A.; Flamary, R.; Bianchi, P.; and Richard, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2463-2470, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081653,\n  author = {R. Mourya and A. Ferrari and R. Flamary and P. Bianchi and C. Richard},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Distributed approach for deblurring large images with shift-variant blur},\n  year = {2017},\n  pages = {2463-2470},\n  abstract = {Image deblurring techniques are effective tools to obtain high quality image from acquired image degraded by blur and noise. In applications such as astronomy and satellite imaging, size of acquired images can be extremely large (up to gigapixels) covering a wide field-of-view suffering from shift-variant blur. Most of the existing deblurring techniques are designed to be cost effective on a centralized computing system having a shared memory and possibly multicore processor. The largest image they can handle is then conditioned by the memory capacity of the system. In this paper, we propose a distributed shift-variant image deblurring algorithm in which several connected processing units (each with reasonable computational resources) can deblur simultaneously different portions of a large image while maintaining a certain coherency among them to finally obtain a single crisp image. The proposed algorithm is based on a distributed Douglas-Rachford splitting algorithm with a specific structure of the penalty parameters used in the proximity operator. Numerical experiments show that the proposed algorithm produces images of similar quality as the existing centralized techniques while being distributed and being cost effective for extremely large images.},\n  keywords = {image restoration;multiprocessing systems;shift-variant blur;image deblurring techniques;high quality image;astronomy;satellite imaging;centralized computing system;shared memory;distributed shift-variant image deblurring algorithm;distributed Douglas-Rachford splitting algorithm;multicore processor;penalty parameters;proximity operator;Image restoration;Interpolation;Signal processing algorithms;Optimization;Two dimensional displays;Distributed computing;Europe;Distributed optimization;proximal projection;shift-variant blur;inverse problems;image deblurring},\n  doi = {10.23919/EUSIPCO.2017.8081653},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342870.pdf},\n}\n\n
\n
\n\n\n
\n Image deblurring techniques are effective tools to obtain high quality image from acquired image degraded by blur and noise. In applications such as astronomy and satellite imaging, size of acquired images can be extremely large (up to gigapixels) covering a wide field-of-view suffering from shift-variant blur. Most of the existing deblurring techniques are designed to be cost effective on a centralized computing system having a shared memory and possibly multicore processor. The largest image they can handle is then conditioned by the memory capacity of the system. In this paper, we propose a distributed shift-variant image deblurring algorithm in which several connected processing units (each with reasonable computational resources) can deblur simultaneously different portions of a large image while maintaining a certain coherency among them to finally obtain a single crisp image. The proposed algorithm is based on a distributed Douglas-Rachford splitting algorithm with a specific structure of the penalty parameters used in the proximity operator. Numerical experiments show that the proposed algorithm produces images of similar quality as the existing centralized techniques while being distributed and being cost effective for extremely large images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Astronomical image reconstruction with convolutional neural networks.\n \n \n \n \n\n\n \n Flamary, R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2468-2472, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AstronomicalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081654,\n  author = {R. Flamary},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Astronomical image reconstruction with convolutional neural networks},\n  year = {2017},\n  pages = {2468-2472},\n  abstract = {State of the art methods in astronomical image reconstruction rely on the resolution of a regularized or constrained optimization problem. Solving this problem can be computationally intensive and usually leads to a quadratic or at least superlinear complexity w.r.t. the number of pixels in the image. We investigate in this work the use of convolutional neural networks for image reconstruction in astronomy. With neural networks, the computationally intensive tasks is the training step, but the prediction step has a fixed complexity per pixel, i.e. a linear complexity. Numerical experiments show that our approach is both computationally efficient and competitive with other state of the art methods in addition to being interpretable.},\n  keywords = {astronomical image processing;astronomical techniques;image reconstruction;neural nets;optimisation;astronomical image reconstruction;convolutional neural networks;regularized constrained optimization problem;computationally intensive tasks;superlinear complexity;Image reconstruction;Neural networks;Convolution;Complexity theory;Training;Optimization},\n  doi = {10.23919/EUSIPCO.2017.8081654},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340787.pdf},\n}\n\n
\n
\n\n\n
\n State of the art methods in astronomical image reconstruction rely on the resolution of a regularized or constrained optimization problem. Solving this problem can be computationally intensive and usually leads to a quadratic or at least superlinear complexity w.r.t. the number of pixels in the image. We investigate in this work the use of convolutional neural networks for image reconstruction in astronomy. With neural networks, the computationally intensive tasks is the training step, but the prediction step has a fixed complexity per pixel, i.e. a linear complexity. Numerical experiments show that our approach is both computationally efficient and competitive with other state of the art methods in addition to being interpretable.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A smartphone-based indoor localisation system using FM and Wi-Fi signals.\n \n \n \n \n\n\n \n Mukhopadhyay, A.; Rajput, P. S.; and Srirangarajan, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2473-2477, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081655,\n  author = {A. Mukhopadhyay and P. S. Rajput and S. Srirangarajan},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A smartphone-based indoor localisation system using FM and Wi-Fi signals},\n  year = {2017},\n  pages = {2473-2477},\n  abstract = {Indoor localisation has the potential to revolutionise the way people navigate indoors, similar to the tremendous impact that GPS has had on outdoor navigation. A number of solutions have been proposed for indoor localisation but most rely on specialised hardware or on the presence of a strong (access point) infrastructure. Many places do not have such infrastructure, thus limiting the use of these indoor localisation technologies. We propose a smartphone-based solution using FM and Wi-Fi signals that uses commercial off-the-shelf hardware which can be connected as and when required and thus addresses some of the potential privacy concerns. We show through our experiments that the proposed system can be used even in areas with low FM and Wi-Fi signal coverage. Our system achieves a mean localisation error of 2.84 m with a 90th percentile error of 4.03 m. In addition, we show the robustness of our system in a realistic and challenging environment by using a 4 month old training database.},\n  keywords = {Global Positioning System;indoor navigation;indoor radio;sensor placement;smart phones;telecommunication security;wireless LAN;smartphone-based indoor localisation system;Wi-Fi signals;outdoor navigation;potential privacy concerns;Wi-Fi signal coverage;mean localisation error;commercial off-the-shelf hardware;GPS;access point infrastructure;time 4.0 month;Frequency modulation;Wireless fidelity;Training;Hardware;Databases;Smart phones},\n  doi = {10.23919/EUSIPCO.2017.8081655},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347761.pdf},\n}\n\n
\n
\n\n\n
\n Indoor localisation has the potential to revolutionise the way people navigate indoors, similar to the tremendous impact that GPS has had on outdoor navigation. A number of solutions have been proposed for indoor localisation but most rely on specialised hardware or on the presence of a strong (access point) infrastructure. Many places do not have such infrastructure, thus limiting the use of these indoor localisation technologies. We propose a smartphone-based solution using FM and Wi-Fi signals that uses commercial off-the-shelf hardware which can be connected as and when required and thus addresses some of the potential privacy concerns. We show through our experiments that the proposed system can be used even in areas with low FM and Wi-Fi signal coverage. Our system achieves a mean localisation error of 2.84 m with a 90th percentile error of 4.03 m. In addition, we show the robustness of our system in a realistic and challenging environment by using a 4 month old training database.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Computationally efficient heart rate estimation during physical exercise using photoplethysmographic signals.\n \n \n \n \n\n\n \n Schäck, T.; Muma, M.; and Zoubir, A. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2478-2481, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ComputationallyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081656,\n  author = {T. Schäck and M. Muma and A. M. Zoubir},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Computationally efficient heart rate estimation during physical exercise using photoplethysmographic signals},\n  year = {2017},\n  pages = {2478-2481},\n  abstract = {Wearable devices that acquire photoplethysmographic (PPG) signals are becoming increasingly popular to monitor the heart rate during physical exercise. However, high accuracy and low computational complexity are conflicting requirements. We propose a method that provides highly accurate heart rate estimates at a very low computational cost in order to be implementable on wearables. To achieve the lowest possible complexity, only basic signal processing operations, i.e., correlation-based fundamental frequency estimation and spectral combination, harmonic noise damping and frequency domain tracking, are used. The proposed approach outperforms state-of-the-art methods on current benchmark data considerably in terms of computation time, while achieving a similar accuracy.},\n  keywords = {body sensor networks;electrocardiography;frequency estimation;medical signal processing;patient monitoring;photoplethysmography;signal denoising;fundamental frequency estimation;harmonic noise damping;frequency domain tracking;physical exercise;wearable devices;PPG;wearables;heart rate estimation;photoplethysmographic signals;computational complexity;signal processing operations;heart rate;correlation-based fundamental frequency estimation;spectral combination;Heart rate;Motion artifacts;Estimation;Biomedical monitoring;Acceleration;Computational complexity;Photoplethysmography (PPG);Heart Rate Estimation;Motion Artifacts (MA)},\n  doi = {10.23919/EUSIPCO.2017.8081656},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346964.pdf},\n}\n\n
\n
\n\n\n
\n Wearable devices that acquire photoplethysmographic (PPG) signals are becoming increasingly popular to monitor the heart rate during physical exercise. However, high accuracy and low computational complexity are conflicting requirements. We propose a method that provides highly accurate heart rate estimates at a very low computational cost in order to be implementable on wearables. To achieve the lowest possible complexity, only basic signal processing operations, i.e., correlation-based fundamental frequency estimation and spectral combination, harmonic noise damping and frequency domain tracking, are used. The proposed approach outperforms state-of-the-art methods on current benchmark data considerably in terms of computation time, while achieving a similar accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive time corrected gain for ultrasound through time-varying Wiener deconvolution.\n \n \n \n \n\n\n \n Pipa, D. R.; Guarneri, G. A.; and de Moura , H. L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2482-2485, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081657,\n  author = {D. R. Pipa and G. A. Guarneri and H. L. {de Moura}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Adaptive time corrected gain for ultrasound through time-varying Wiener deconvolution},\n  year = {2017},\n  pages = {2482-2485},\n  abstract = {Ultrasound testing techniques, either nondestructive (NDT) or medical, suffer from spatial signal attenuation, where equivalent scatterers at different distances from the transducer will display different signal amplitudes. If not corrected, these differences may lead to erroneously interpretation. In NDT, Time Corrected Gain (TCG) compensates for spatial attenuation by increasing input gain according to the expected attenuation. Because TCG does not consider noise, signals coming from far scatterers are highly amplified, and so is noise. Wiener filter, on the other hand, deal with noise optimally, but does not compensate for spatial attenuation. In this paper, we propose an Adaptive TCG that combines the amplitude correction of the TCG with the optimality of the Wiener filter. We present simulations to evaluate the robustness of the proposed technique, as well as real-word results showing the proposed method is superior to both Wiener filter and classical TCG independently.},\n  keywords = {deconvolution;Wiener filters;equivalent scatterers;NDT;spatial attenuation;Wiener filter;amplitude correction;classical TCG;time-varying Wiener deconvolution;ultrasound testing techniques;spatial signal attenuation;signal amplitudes;adaptive time corrected gain;adaptive TCG;Attenuation;Ultrasonic imaging;Transducers;Acoustics;Signal to noise ratio;Adaptation models;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081657},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347289.pdf},\n}\n\n
\n
\n\n\n
\n Ultrasound testing techniques, either nondestructive (NDT) or medical, suffer from spatial signal attenuation, where equivalent scatterers at different distances from the transducer will display different signal amplitudes. If not corrected, these differences may lead to erroneously interpretation. In NDT, Time Corrected Gain (TCG) compensates for spatial attenuation by increasing input gain according to the expected attenuation. Because TCG does not consider noise, signals coming from far scatterers are highly amplified, and so is noise. Wiener filter, on the other hand, deal with noise optimally, but does not compensate for spatial attenuation. In this paper, we propose an Adaptive TCG that combines the amplitude correction of the TCG with the optimality of the Wiener filter. We present simulations to evaluate the robustness of the proposed technique, as well as real-word results showing the proposed method is superior to both Wiener filter and classical TCG independently.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint subsample time delay and echo template estimations for ultrasound signals.\n \n \n \n \n\n\n \n Antelo, E. W. M.; and Pipa, D. R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2486-2490, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081658,\n  author = {E. W. M. Antelo and D. R. Pipa},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Joint subsample time delay and echo template estimations for ultrasound signals},\n  year = {2017},\n  pages = {2486-2490},\n  abstract = {In ultrasound applications, the signal obtained from a real data acquisition system is corrupted by noise and the echoes may have subsample time delays, which in some cases, compromises scatterer localization. Most time delay estimation (TDE) techniques require a precise signal template, otherwise localization deteriorate. In this paper, we propose an alternate scheme that jointly estimates an echo template and time delays for several echoes from noisy measurements. Reinterpreting existing methods from a probabilistic perspective, we extend their functionalities through a joint application of a maximum likelihood estimator (MLE) and a maximum a posteriori (MAP) estimator. Finally, we present simulated results to demonstrate the superiority of the proposed method over traditional ones.},\n  keywords = {delay estimation;maximum likelihood estimation;signal processing;scatterer localization;maximum a posteriori estimator;maximum likelihood estimator;echo template;precise signal template;time delay estimation techniques;subsample time delays;echoes;data acquisition system;ultrasound applications;ultrasound signals;template estimations;joint subsample time delay;Delay effects;Splines (mathematics);Smoothing methods;Maximum likelihood estimation;Interpolation;Ultrasonic imaging},\n  doi = {10.23919/EUSIPCO.2017.8081658},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347334.pdf},\n}\n\n
\n
\n\n\n
\n In ultrasound applications, the signal obtained from a real data acquisition system is corrupted by noise and the echoes may have subsample time delays, which in some cases, compromises scatterer localization. Most time delay estimation (TDE) techniques require a precise signal template, otherwise localization deteriorate. In this paper, we propose an alternate scheme that jointly estimates an echo template and time delays for several echoes from noisy measurements. Reinterpreting existing methods from a probabilistic perspective, we extend their functionalities through a joint application of a maximum likelihood estimator (MLE) and a maximum a posteriori (MAP) estimator. Finally, we present simulated results to demonstrate the superiority of the proposed method over traditional ones.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SAR deception jamming target recognition based on the shadow feature.\n \n \n \n \n\n\n \n Tang, X.; Zhang, X.; Shi, J.; Wei, S.; and Yu, L.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2491-2495, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SARPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081659,\n  author = {X. Tang and X. Zhang and J. Shi and S. Wei and L. Yu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {SAR deception jamming target recognition based on the shadow feature},\n  year = {2017},\n  pages = {2491-2495},\n  abstract = {SAR deception jamming method is one of the most important jamming techniques by overlapping a group of fake targets into the SAR images, which can greatly reduce the accuracy of the SAR image interpretation. On the other hands, as a kind of active remote sensing technique, SAR system has less diffuse scattering, and the shadow characteristic is more significant than the optic system. In this paper, the shadow characteristics of the true and false targets are discussed via the simulation experiment, and the convolutional neural network (CNN) is applied for SAR deception jamming target recognition based on the shadow feature. Numerical experiments have shown that the CNN method can effectively distinguish the true and false targets correctly through the shadow feature.},\n  keywords = {jamming;neural nets;radar imaging;radar target recognition;synthetic aperture radar;true targets;false targets;SAR deception jamming target recognition;shadow feature;fake targets;SAR image interpretation;active remote sensing technique;shadow characteristics;diffuse scattering;convolutional neural network;Jamming;Convolution;Synthetic aperture radar;Target recognition;Rockets;Training;Europe;Deception targets;SAR;CNN;Shadow Feature},\n  doi = {10.23919/EUSIPCO.2017.8081659},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345036.pdf},\n}\n\n
\n
\n\n\n
\n SAR deception jamming method is one of the most important jamming techniques by overlapping a group of fake targets into the SAR images, which can greatly reduce the accuracy of the SAR image interpretation. On the other hands, as a kind of active remote sensing technique, SAR system has less diffuse scattering, and the shadow characteristic is more significant than the optic system. In this paper, the shadow characteristics of the true and false targets are discussed via the simulation experiment, and the convolutional neural network (CNN) is applied for SAR deception jamming target recognition based on the shadow feature. Numerical experiments have shown that the CNN method can effectively distinguish the true and false targets correctly through the shadow feature.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A blind deconvolution approach to discontinuity location and characterization in ultrasonic nondestructive testing.\n \n \n \n \n\n\n \n Guarneri, G. A.; Pipa, D. R.; Neves, F.; and de Arruda , L. V. R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2496-2500, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081660,\n  author = {G. A. Guarneri and D. R. Pipa and F. Neves and L. V. R. {de Arruda}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A blind deconvolution approach to discontinuity location and characterization in ultrasonic nondestructive testing},\n  year = {2017},\n  pages = {2496-2500},\n  abstract = {This paper presents a new algorithm for discontinuity location and characterization using A-scan signals from an ultrasonic inspection system. The algorithm is based on solving an inverse problem in which the observation model is different from that traditionally used. In this model, the input vector represents the location of the geometrical center of the discontinuity and the scattering amplitude of the discontinuity is embedded in the impulse response of the ultrasonic inspection system. First, we jointly estimate the locations and the scattering amplitudes of the discontinuities from the acquired signals. Then, the geometrical parameters of the discontinuities are calculated from the estimated scattering amplitude. The method is tested to characterize side-drilled holes using both synthetic and real data. The results demonstrates the effectiveness of the algorithm.},\n  keywords = {deconvolution;inspection;inverse problems;nondestructive testing;transient response;ultrasonic materials testing;ultrasonic scattering;impulse response;side-drilled holes;scattering amplitude;A-scan signals;ultrasonic nondestructive testing;discontinuity location;blind deconvolution approach;ultrasonic inspection system;geometrical center;observation model;inverse problem;Signal processing algorithms;Synchronous digital hierarchy;Scattering;Transducers;Acoustics;Approximation algorithms;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081660},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342793.pdf},\n}\n\n
\n
\n\n\n
\n This paper presents a new algorithm for discontinuity location and characterization using A-scan signals from an ultrasonic inspection system. The algorithm is based on solving an inverse problem in which the observation model is different from that traditionally used. In this model, the input vector represents the location of the geometrical center of the discontinuity and the scattering amplitude of the discontinuity is embedded in the impulse response of the ultrasonic inspection system. First, we jointly estimate the locations and the scattering amplitudes of the discontinuities from the acquired signals. Then, the geometrical parameters of the discontinuities are calculated from the estimated scattering amplitude. The method is tested to characterize side-drilled holes using both synthetic and real data. The results demonstrates the effectiveness of the algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Conjugate-prior-regularized multinomial pLSA for collaborative filtering.\n \n \n \n \n\n\n \n Klasson, M.; Adalbjörnsson, S. I.; Swärd, J.; and Andersen, S. V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2501-2505, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Conjugate-prior-regularizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081661,\n  author = {M. Klasson and S. I. Adalbjörnsson and J. Swärd and S. V. Andersen},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Conjugate-prior-regularized multinomial pLSA for collaborative filtering},\n  year = {2017},\n  pages = {2501-2505},\n  abstract = {We consider the over-fitting problem for multinomial probabilistic Latent Semantic Analysis (pLSA) in collaborative filtering, using a regularization approach. For big data applications, the computational complexity is at a premium and we, therefore, consider a maximum a posteriori approach based on conjugate priors that ensure that complexity of each step remains the same as compared to the un-regularized method. In the numerical section, we show that the proposed regularization method and training scheme yields an improvement on commonly used data sets, as compared to previously proposed heuristics.},\n  keywords = {Big Data;collaborative filtering;computational complexity;maximum likelihood estimation;probability;conjugate-prior-regularized multinomial pLSA;data sets;training scheme;regularization method;conjugate priors;maximum a posteriori approach;computational complexity;big data applications;regularization approach;multinomial probabilistic Latent Semantic Analysis;over-fitting problem;collaborative filtering;Data models;Training;Signal processing algorithms;Europe;Signal processing;Collaboration;Predictive models;Recommender systems;collaborative filtering;conjugate prior regularization;probabilistic latent semantic analysis},\n  doi = {10.23919/EUSIPCO.2017.8081661},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342149.pdf},\n}\n\n
\n
\n\n\n
\n We consider the over-fitting problem for multinomial probabilistic Latent Semantic Analysis (pLSA) in collaborative filtering, using a regularization approach. For big data applications, the computational complexity is at a premium and we, therefore, consider a maximum a posteriori approach based on conjugate priors that ensure that complexity of each step remains the same as compared to the un-regularized method. In the numerical section, we show that the proposed regularization method and training scheme yields an improvement on commonly used data sets, as compared to previously proposed heuristics.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pyramid encoding for fast additive quantization.\n \n \n \n \n\n\n \n Muravev, A.; Ozan, E. C.; Iosifidis, A.; and Gabbouj, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2506-2510, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PyramidPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081662,\n  author = {A. Muravev and E. C. Ozan and A. Iosifidis and M. Gabbouj},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Pyramid encoding for fast additive quantization},\n  year = {2017},\n  pages = {2506-2510},\n  abstract = {The problem of approximate nearest neighbor (ANN) search in Big Data has been tackled with a variety of recent methods. Vector quantization based solutions have been maintaining the dominant position, as they operate in the original data space, better preserving inter-point distances. Additive quantization (AQ) in particular has pushed the state-of-the-art in search accuracy, but high computational costs of encoding discourage the practical application of the method. This paper proposes pyramid encoding, a novel technique, which can replace the original beam search to provide a significant complexity reduction at the cost of a slight decrease in retrieval performance. AQ with pyramid encoding is experimentally shown to obtain results comparable with the baseline method in accuracy, while offering significant computational benefits.},\n  keywords = {Big Data;computational complexity;search problems;pyramid encoding;fast additive quantization;approximate nearest neighbor search;Big Data;vector quantization based solutions;inter-point distances;AQ;original beam search;ANN;Encoding;Quantization (signal);Additives;Computational efficiency;Complexity theory;Europe;compact encoding;image retrieval;nearest neighbor search;vector quantization},\n  doi = {10.23919/EUSIPCO.2017.8081662},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570339946.pdf},\n}\n\n
\n
\n\n\n
\n The problem of approximate nearest neighbor (ANN) search in Big Data has been tackled with a variety of recent methods. Vector quantization based solutions have been maintaining the dominant position, as they operate in the original data space, better preserving inter-point distances. Additive quantization (AQ) in particular has pushed the state-of-the-art in search accuracy, but high computational costs of encoding discourage the practical application of the method. This paper proposes pyramid encoding, a novel technique, which can replace the original beam search to provide a significant complexity reduction at the cost of a slight decrease in retrieval performance. AQ with pyramid encoding is experimentally shown to obtain results comparable with the baseline method in accuracy, while offering significant computational benefits.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Using deep learning to detect price change indications in financial markets.\n \n \n \n \n\n\n \n Tsantekidis, A.; Passalis, N.; Tefas, A.; Kanniainen, J.; Gabbouj, M.; and Iosifidis, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2511-2515, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"UsingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081663,\n  author = {A. Tsantekidis and N. Passalis and A. Tefas and J. Kanniainen and M. Gabbouj and A. Iosifidis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Using deep learning to detect price change indications in financial markets},\n  year = {2017},\n  pages = {2511-2515},\n  abstract = {Forecasting financial time-series has long been among the most challenging problems in financial market analysis. In order to recognize the correct circumstances to enter or exit the markets investors usually employ statistical models (or even simple qualitative methods). However, the inherently noisy and stochastic nature of markets severely limits the forecasting accuracy of the used models. The introduction of electronic trading and the availability of large amounts of data allow for developing novel machine learning techniques that address some of the difficulties faced by the aforementioned methods. In this work we propose a deep learning methodology, based on recurrent neural networks, that can be used for predicting future price movements from large-scale high-frequency time-series data on Limit Order Books. The proposed method is evaluated using a large-scale dataset of limit order book events.},\n  keywords = {economic forecasting;electronic trading;learning (artificial intelligence);pricing;recurrent neural nets;statistical analysis;stock markets;time series;price change indications;financial markets;financial time-series;financial market analysis;statistical models;electronic trading;deep learning methodology;recurrent neural networks;future price movements;limit order book events;machine learning;Mathematical model;Machine learning;Signal processing;Predictive models;Recurrent neural networks;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081663},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346870.pdf},\n}\n\n
\n
\n\n\n
\n Forecasting financial time-series has long been among the most challenging problems in financial market analysis. In order to recognize the correct circumstances to enter or exit the markets investors usually employ statistical models (or even simple qualitative methods). However, the inherently noisy and stochastic nature of markets severely limits the forecasting accuracy of the used models. The introduction of electronic trading and the availability of large amounts of data allow for developing novel machine learning techniques that address some of the difficulties faced by the aforementioned methods. In this work we propose a deep learning methodology, based on recurrent neural networks, that can be used for predicting future price movements from large-scale high-frequency time-series data on Limit Order Books. The proposed method is evaluated using a large-scale dataset of limit order book events.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sound-environment monitoring technique based on computational auditory scene analysis.\n \n \n \n \n\n\n \n Kawamoto, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2516-2520, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Sound-environmentPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081664,\n  author = {M. Kawamoto},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Sound-environment monitoring technique based on computational auditory scene analysis},\n  year = {2017},\n  pages = {2516-2520},\n  abstract = {Monitoring techniques are a key technology for examining conditions in various scenarios (e.g., structural and weather conditions and disasters). The appropriate extraction of the features of these scenarios from observation data is important in understanding them. This study proposes a monitoring technique that allows sound environments to be expressed as a sound pattern. To this end, the concept of synesthesia is exploited. The keys, tones, and pitches of the monitored sound are expressed using the three elements of color, namely hue, saturation, and brightness, respectively. The hue, saturation, and brightness are assumed herein to be detected from the chromagram, sonogram, and sound spectrogram, respectively, based on a previous synesthesia experiment. The sound pattern can then be drawn using color, yielding a {"}painted sound map.{"} The usefulness of the proposed monitoring technique is verified using environmental sound data observed at a galleria.},\n  keywords = {acoustic signal processing;audio signal processing;feature extraction;hearing;painted sound map;environmental sound data;computational auditory scene analysis;structural weather conditions;sound spectrogram;sound-environment monitoring technique;Monitoring;Histograms;Image color analysis;Spectrogram;Sonogram;Feature extraction;Brightness;Sound environment visualization;Environmental sounds;Monitoring;Painted sound patterns;Synesthesia},\n  doi = {10.23919/EUSIPCO.2017.8081664},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342715.pdf},\n}\n\n
\n
\n\n\n
\n Monitoring techniques are a key technology for examining conditions in various scenarios (e.g., structural and weather conditions and disasters). The appropriate extraction of the features of these scenarios from observation data is important in understanding them. This study proposes a monitoring technique that allows sound environments to be expressed as a sound pattern. To this end, the concept of synesthesia is exploited. The keys, tones, and pitches of the monitored sound are expressed using the three elements of color, namely hue, saturation, and brightness, respectively. The hue, saturation, and brightness are assumed herein to be detected from the chromagram, sonogram, and sound spectrogram, respectively, based on a previous synesthesia experiment. The sound pattern can then be drawn using color, yielding a \"painted sound map.\" The usefulness of the proposed monitoring technique is verified using environmental sound data observed at a galleria.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CFO and channel estimation for MISO-OFDM systems.\n \n \n \n \n\n\n \n Ladaycia, A.; Abed-Meraim, K.; Bader, A.; and Alouini, M. S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2521-2525, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CFOPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081665,\n  author = {A. Ladaycia and K. Abed-Meraim and A. Bader and M. S. Alouini},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {CFO and channel estimation for MISO-OFDM systems},\n  year = {2017},\n  pages = {2521-2525},\n  abstract = {This study deals with the joint channel and carrier frequency offset (CFO) estimation in a Multiple Input Single Output (MISO) communications system. This problem arises in OFDM (Orthogonal Frequency Division Multiplexing) based multi-relay transmission protocols such that the geo-routing one proposed by A. Bader et al in 2012. Indeed, the outstanding performance of this multi-hop relaying scheme relies heavily on the channel and CFO estimation quality at the PHY layer. In this work, two approaches are considered: The first is based on estimating the overall channel (including the CFO) as a time-varying one using an adaptive scheme under the assumption of small or moderate CFOs while the second one performs separately, the channel and CFO parameters estimation based on the considered data model. The two solutions are analyzed and compared in terms of performance, cost and convergence rate.},\n  keywords = {channel estimation;OFDM modulation;routing protocols;wireless channels;geo-routing;multihop relaying scheme;PHY layer;adaptive scheme;convergence rate;channel estimation;MISO-OFDM systems;CFO parameter estimation;joint channel and carrier frequency offset estimation;OFDM based multirelay transmission protocols;multiple input single output communications system;orthogonal frequency division multiplexing;CFO estimation;OFDM;Channel estimation;Maximum likelihood estimation;Signal to noise ratio;Protocols;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081665},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347516.pdf},\n}\n\n
\n
\n\n\n
\n This study deals with the joint channel and carrier frequency offset (CFO) estimation in a Multiple Input Single Output (MISO) communications system. This problem arises in OFDM (Orthogonal Frequency Division Multiplexing) based multi-relay transmission protocols such that the geo-routing one proposed by A. Bader et al in 2012. Indeed, the outstanding performance of this multi-hop relaying scheme relies heavily on the channel and CFO estimation quality at the PHY layer. In this work, two approaches are considered: The first is based on estimating the overall channel (including the CFO) as a time-varying one using an adaptive scheme under the assumption of small or moderate CFOs while the second one performs separately, the channel and CFO parameters estimation based on the considered data model. The two solutions are analyzed and compared in terms of performance, cost and convergence rate.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Rapid prototyping and FPGA-in-the-Loop verification of a DFrFT-based OFDM system.\n \n \n \n \n\n\n \n Kumar, A.; Magarini, M.; and Olivieri, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2526-2530, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RapidPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081666,\n  author = {A. Kumar and M. Magarini and S. Olivieri},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Rapid prototyping and FPGA-in-the-Loop verification of a DFrFT-based OFDM system},\n  year = {2017},\n  pages = {2526-2530},\n  abstract = {Orthogonal frequency division multiplexing (OFDM) based on the use of discrete fractional Fourier transform (DFrFT) has recently gained interest due to its lower sensitivity to synchronization errors in comparison with conventional OFDM based on the use of the discrete Fourier transform (DFT). Although this higher robustness to synchronization errors is a well-recognized fact, only few works are available in the literature that concern with DFrFT hardware implementation. In this work, we consider its implementation in a Field Programmable Gate Array (FPGA). To verify the design of the DFrFT-based OFDM system, we use FPGA-in-the-Loop (FIL) co-simulation method to evaluate bit error rate (BER) in presence of carrier frequency offset (CFO) when transmission takes place over a frequency selective Rayleigh fading channel.},\n  keywords = {discrete Fourier transforms;error statistics;field programmable gate arrays;OFDM modulation;Rayleigh channels;rapid prototyping;OFDM system;lower sensitivity;synchronization errors;DFrFT hardware implementation;bit error rate;frequency selective Rayleigh fading channel;CFO;DFrFT-based OFDM system;BER;orthogonal frequency division multiplexing;FPGA-in-the-loop verification;carrier frequency offset;field programmable gate array;discrete fractional Fourier transform;OFDM;Kernel;Receivers;Quantization (signal);Field programmable gate arrays;Software packages;Discrete Fourier transforms;OFDM;FPGA-in-the-Loop co-simulation;Rapid prototyping;carrier frequency offset (CFO);discrete fractional Fourier transform (DFrFT)},\n  doi = {10.23919/EUSIPCO.2017.8081666},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347458.pdf},\n}\n\n
\n
\n\n\n
\n Orthogonal frequency division multiplexing (OFDM) based on the use of discrete fractional Fourier transform (DFrFT) has recently gained interest due to its lower sensitivity to synchronization errors in comparison with conventional OFDM based on the use of the discrete Fourier transform (DFT). Although this higher robustness to synchronization errors is a well-recognized fact, only few works are available in the literature that concern with DFrFT hardware implementation. In this work, we consider its implementation in a Field Programmable Gate Array (FPGA). To verify the design of the DFrFT-based OFDM system, we use FPGA-in-the-Loop (FIL) co-simulation method to evaluate bit error rate (BER) in presence of carrier frequency offset (CFO) when transmission takes place over a frequency selective Rayleigh fading channel.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimal decentralized coded caching for heterogeneous files.\n \n \n \n \n\n\n \n Cheng, H.; Li, C.; Xiong, H.; and Frossard, P.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2531-2535, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OptimalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081667,\n  author = {H. Cheng and C. Li and H. Xiong and P. Frossard},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Optimal decentralized coded caching for heterogeneous files},\n  year = {2017},\n  pages = {2531-2535},\n  abstract = {Caching is a technique to reduce the peak network load by pre-fetching some popular contents at the local caches of end users. Coded caching can facilitate and exploit the coded-multicasting opportunities for users with different demands, resulting in an additional and significant reduction of the peak traffic. However, most existing researches on coded caching are limited by the assumption that all files to be delivered have the same size. We show in this paper that current schemes can only achieve suboptimal performance when the files have different sizes. To address this, we propose a novel optimization strategy for coded caching that minimizes the worst-case transmission rate of multicasting the coded content upon users requests, subject to the storage constraint at the local caches, by the optimal allocation of the caching proportion among heterogeneous files. In order to efficiently solve this problem, we develop a practical algorithm by using the Lagrange multiplier method and the sequential quadratic programming (SQP) approach. Experiment results show that the worst-case transmission rate can be reduced by the proposed scheme compared to state-of-the-art coded caching schemes. It certainly offers an important advantage in the deployment of data delivery systems.},\n  keywords = {cache storage;data handling;encoding;file organisation;optimisation;quadratic programming;telecommunication congestion control;heterogeneous files;local caches;worst-case transmission rate;coded multicasting;decentralized coded caching;Optimal coded caching;Lagrange multiplier method;sequential quadratic programming;data delivery system;Optimization;Resource management;Servers;Signal processing;Signal processing algorithms;Europe;Caching;coded caching;heterogeneous content distribution},\n  doi = {10.23919/EUSIPCO.2017.8081667},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347028.pdf},\n}\n\n
\n
\n\n\n
\n Caching is a technique to reduce the peak network load by pre-fetching some popular contents at the local caches of end users. Coded caching can facilitate and exploit the coded-multicasting opportunities for users with different demands, resulting in an additional and significant reduction of the peak traffic. However, most existing researches on coded caching are limited by the assumption that all files to be delivered have the same size. We show in this paper that current schemes can only achieve suboptimal performance when the files have different sizes. To address this, we propose a novel optimization strategy for coded caching that minimizes the worst-case transmission rate of multicasting the coded content upon users requests, subject to the storage constraint at the local caches, by the optimal allocation of the caching proportion among heterogeneous files. In order to efficiently solve this problem, we develop a practical algorithm by using the Lagrange multiplier method and the sequential quadratic programming (SQP) approach. Experiment results show that the worst-case transmission rate can be reduced by the proposed scheme compared to state-of-the-art coded caching schemes. It certainly offers an important advantage in the deployment of data delivery systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Interference MAC: Impact of improper Gaussian signaling on the rate region Pareto boundary.\n \n \n \n \n\n\n \n Kariminezhad, A.; Chaaban, A.; and Sezgin, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2536-2540, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"InterferencePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081668,\n  author = {A. Kariminezhad and A. Chaaban and A. Sezgin},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Interference MAC: Impact of improper Gaussian signaling on the rate region Pareto boundary},\n  year = {2017},\n  pages = {2536-2540},\n  abstract = {Meeting the challenges of 5G demands better exploitation of the available spectrum by allowing multiple parties to share resources. For instance, a secondary unlicensed system can share resources with the cellular uplink of a primary licensed system for an improved spectral efficiency. This induces interference which has to be taken into account when designing such a system. A simple yet robust strategy is treating interference as noise (TIN), which is widely adapted in practice. It is thus important to study the capabilities and limitations of TIN in such scenarios. In this paper, we study this scenario modelled as multiple access channel (MAC) interfered by a Point-to-Point (P2P) channel, where we focus on the characterization of the rate region. We use improper Gaussian signaling (instead of proper) at the transmitters to increase the design flexibility, which offers the freedom of optimizing the transmit signal pseudo-variance in addition to its variance. We formulate the weighted max-min problem as a semidefinite program, and use semidefinite relaxation (SDR) to obtain a near-optimal solution. Numerical optimizations show that, by improper Gaussian signaling the achievable rates can be improved upto three times when compared to proper Gaussian signaling.},\n  keywords = {cellular radio;Gaussian processes;interference (signal);multi-access systems;Pareto optimisation;radio spectrum management;radiofrequency interference;telecommunication signalling;wireless channels;secondary unlicensed system;cellular uplink;primary licensed system;improved spectral efficiency;scenario modelled;multiple access channel;improper Gaussian signaling;transmit signal pseudovariance;interference MAC;rate region pareto boundary;point-to-point channel;5G communication;multiple access channel interference;Interference;Tin;Receivers;Transmitters;Integrated circuits;Device-to-device communication;Europe;Improper Gaussian signaling;rate maximization;partial interference multiple access channel;Pareto boundary;augmented covariance matrix},\n  doi = {10.23919/EUSIPCO.2017.8081668},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346395.pdf},\n}\n\n
\n
\n\n\n
\n Meeting the challenges of 5G demands better exploitation of the available spectrum by allowing multiple parties to share resources. For instance, a secondary unlicensed system can share resources with the cellular uplink of a primary licensed system for an improved spectral efficiency. This induces interference which has to be taken into account when designing such a system. A simple yet robust strategy is treating interference as noise (TIN), which is widely adapted in practice. It is thus important to study the capabilities and limitations of TIN in such scenarios. In this paper, we study this scenario modelled as multiple access channel (MAC) interfered by a Point-to-Point (P2P) channel, where we focus on the characterization of the rate region. We use improper Gaussian signaling (instead of proper) at the transmitters to increase the design flexibility, which offers the freedom of optimizing the transmit signal pseudo-variance in addition to its variance. We formulate the weighted max-min problem as a semidefinite program, and use semidefinite relaxation (SDR) to obtain a near-optimal solution. Numerical optimizations show that, by improper Gaussian signaling the achievable rates can be improved upto three times when compared to proper Gaussian signaling.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Constant modulus beamforming for large-scale MISOME wiretap channel.\n \n \n \n \n\n\n \n Li, Q.; Li, C.; and Lin, J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2541-2545, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ConstantPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081669,\n  author = {Q. Li and C. Li and J. Lin},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Constant modulus beamforming for large-scale MISOME wiretap channel},\n  year = {2017},\n  pages = {2541-2545},\n  abstract = {The multi-input single-output multi-eavesdropper (MISOME) wiretap channel is one of the generic wiretap channels in physical layer security. In Khisti and Wornell's classical work [1], the optimal secure beamformer for MISOME has been derived under the total power constraint. In this work, we revisit the MISOME wiretap channel and focus on the large-scale transmit antenna regime and the constant modulus beamformer design. The former is motivated by the significant spectral efficiency gains provided by massive antennas, and the latter is due to the consideration of cheap hardware implementation of constant modulus beamforming. However, from an optimization point of view, the secrecy beamforming with constant modulus constraints is challenging, more specifically, NP-hard. In light of this, we propose two methods to tackle it, namely the semidefinite relaxation (SDR) method and the ADMM-Dinkelbach method. Simulation results demonstrate that the ADMM-Dinkelbach method outperforms the SDR method, and can attain nearly optimal secrecy performance for the large-scale antenna scenario.},\n  keywords = {array signal processing;mathematical programming;MIMO communication;telecommunication security;transmitting antennas;wireless channels;MISOME wiretap channel;constant modulus beamformer design;ADMM-Dinkelbach method;physical layer security;transmit antenna;spectral efficiency gains;Wornells classical work;secure beamformer;multiinput single-output multieavesdropper wiretap channel;NP-hard;semidefinite relaxation method;Array signal processing;Signal processing algorithms;Transmitting antennas;Europe;Antenna arrays},\n  doi = {10.23919/EUSIPCO.2017.8081669},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570342239.pdf},\n}\n\n
\n
\n\n\n
\n The multi-input single-output multi-eavesdropper (MISOME) wiretap channel is one of the generic wiretap channels in physical layer security. In Khisti and Wornell's classical work [1], the optimal secure beamformer for MISOME has been derived under the total power constraint. In this work, we revisit the MISOME wiretap channel and focus on the large-scale transmit antenna regime and the constant modulus beamformer design. The former is motivated by the significant spectral efficiency gains provided by massive antennas, and the latter is due to the consideration of cheap hardware implementation of constant modulus beamforming. However, from an optimization point of view, the secrecy beamforming with constant modulus constraints is challenging, more specifically, NP-hard. In light of this, we propose two methods to tackle it, namely the semidefinite relaxation (SDR) method and the ADMM-Dinkelbach method. Simulation results demonstrate that the ADMM-Dinkelbach method outperforms the SDR method, and can attain nearly optimal secrecy performance for the large-scale antenna scenario.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust precoding scheme for multi-user MIMO visible light communication system.\n \n \n \n \n\n\n \n Zeng, Z.; and Du, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2546-2550, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081670,\n  author = {Z. Zeng and H. Du},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust precoding scheme for multi-user MIMO visible light communication system},\n  year = {2017},\n  pages = {2546-2550},\n  abstract = {This paper considers a multi-user multiple-input multiple-output (MU-MIMO) visible light communication (VLC) interference channel. The multi-user interference (MUI) can be successfully eliminated with the perfect knowledge of channel state information (CSI). However, the perfect information may not be available at transmitter, which will lead to severe interference and consequently degrade the system performance. Robust precoding design with the assist of block diagonalization (BD) scheme is proposed to minimize the mean square error (MMSE), which not only completely suppresses the MUI but also maximizes the sum rate of the MU VLC system. Simulation results are presented to validate the effectiveness of the proposed algorithm.},\n  keywords = {channel coding;free-space optical communication;interference suppression;least mean squares methods;light interference;MIMO communication;multiuser channels;precoding;robust precoding scheme;multiuser MIMO visible light communication system;channel state information;block diagonalization scheme;multiuser interference suppression;MUI suppression;system performance degradation;MU-MIMO VLC system;transmitter;mean square error minimization;MMSE;BD scheme;multiuser multiple input multiple-output visible light communication interference channel;Light emitting diodes;Robustness;Precoding;MIMO;Optical transmitters;Signal processing algorithms;Correlation},\n  doi = {10.23919/EUSIPCO.2017.8081670},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347493.pdf},\n}\n\n
\n
\n\n\n
\n This paper considers a multi-user multiple-input multiple-output (MU-MIMO) visible light communication (VLC) interference channel. The multi-user interference (MUI) can be successfully eliminated with the perfect knowledge of channel state information (CSI). However, the perfect information may not be available at transmitter, which will lead to severe interference and consequently degrade the system performance. Robust precoding design with the assist of block diagonalization (BD) scheme is proposed to minimize the mean square error (MMSE), which not only completely suppresses the MUI but also maximizes the sum rate of the MU VLC system. Simulation results are presented to validate the effectiveness of the proposed algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A continuous cost function for the reconstruction of wired networks from reflection measurements.\n \n \n \n \n\n\n \n Blum, S.; Ulrich, M.; and Yang, B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2551-2555, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081671,\n  author = {S. Blum and M. Ulrich and B. Yang},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A continuous cost function for the reconstruction of wired networks from reflection measurements},\n  year = {2017},\n  pages = {2551-2555},\n  abstract = {We present in this work a novel approach for the reconstruction of wired network topologies from reflection measurements. Existing approaches state the network reconstruction as discrete optimization problem, which is difficult to solve. The (discrete) topology is optimized while the cable lengths are a secondary result. The contribution of this paper is the formulation of the topology reconstruction as a continuous problem. The idea is to rather optimize the (continuous) cable lengths and automatically obtain the topology as a secondary result. Further we present a heuristic algorithm to solve the optimization approximately. Using simulated reflectometry data, we demonstrate the performance of our approach.},\n  keywords = {approximation theory;network topology;optimisation;wires (electric);continuous problem;topology reconstruction;cable lengths;discrete optimization problem;network reconstruction;wired network topologies;reflection measurements;wired networks;continuous cost function;Topology;Network topology;Cost function;Frequency measurement;Wires;Power cables},\n  doi = {10.23919/EUSIPCO.2017.8081671},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347044.pdf},\n}\n\n
\n
\n\n\n
\n We present in this work a novel approach for the reconstruction of wired network topologies from reflection measurements. Existing approaches state the network reconstruction as discrete optimization problem, which is difficult to solve. The (discrete) topology is optimized while the cable lengths are a secondary result. The contribution of this paper is the formulation of the topology reconstruction as a continuous problem. The idea is to rather optimize the (continuous) cable lengths and automatically obtain the topology as a secondary result. Further we present a heuristic algorithm to solve the optimization approximately. Using simulated reflectometry data, we demonstrate the performance of our approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A new SA-PNC scheme for uplink HetNets.\n \n \n \n \n\n\n \n Ali, S. S.; Castanheira, D.; Silva, A.; and Gameiro, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2556-2560, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081672,\n  author = {S. S. Ali and D. Castanheira and A. Silva and A. Gameiro},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A new SA-PNC scheme for uplink HetNets},\n  year = {2017},\n  pages = {2556-2560},\n  abstract = {Mobile traffic in cellular based networks is increasing exponentially, mainly due to the use of data intensive services like video. Network operators are urged to explore new technologies in order to enhance the capacity, data rates and maximizing the utilization of available spectrum resources. One effective way to cope with these demands is to reduce the cell-size by deploying small-cells along the coverage area of the current macro-cell system. In this paper, we consider the uplink of heterogeneous network with a number of small-cells coexist with a macro-cell under the same frequency band. To deal with intertier/system interference, we combine signal alignment (SA) based precoding at the small-cell transmitters in conjunction with physical network coding (PNC) at the macro-receiver. The joint design of SA and PNC provides higher system degrees of freedom (DoF), than the case where only PNC or interference alignment (IA) is employed individually. The results show that the proposed scheme is robust to inter-tier/system interference while allowing to increase the overall data rate, by serving more users, as compared with the IA based methods.},\n  keywords = {cellular radio;network coding;precoding;radio spectrum management;radiofrequency interference;telecommunication traffic;network operators;data rate;available spectrum resources;cell-size;coverage area;current macro-cell system;heterogeneous network;frequency band;intertier/system interference;signal alignment;small-cell transmitters;physical network;macro-receiver;interference alignment;IA based methods;SA-PNC scheme;mobile traffic;cellular based networks;data intensive services;uplink HetNets;Interference;Precoding;MIMO;Uplink;Signal processing;Network coding;Information exchange;Signal Alignment;Physical Network Coding;Heterogeneous Networks;Small/Macro-cell system;Uplink},\n  doi = {10.23919/EUSIPCO.2017.8081672},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340466.pdf},\n}\n\n
\n
\n\n\n
\n Mobile traffic in cellular based networks is increasing exponentially, mainly due to the use of data intensive services like video. Network operators are urged to explore new technologies in order to enhance the capacity, data rates and maximizing the utilization of available spectrum resources. One effective way to cope with these demands is to reduce the cell-size by deploying small-cells along the coverage area of the current macro-cell system. In this paper, we consider the uplink of heterogeneous network with a number of small-cells coexist with a macro-cell under the same frequency band. To deal with intertier/system interference, we combine signal alignment (SA) based precoding at the small-cell transmitters in conjunction with physical network coding (PNC) at the macro-receiver. The joint design of SA and PNC provides higher system degrees of freedom (DoF), than the case where only PNC or interference alignment (IA) is employed individually. The results show that the proposed scheme is robust to inter-tier/system interference while allowing to increase the overall data rate, by serving more users, as compared with the IA based methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A robust signal quantization system based on error correcting codes.\n \n \n \n \n\n\n \n Jili, F. E.; Mahé, G.; and Mboup, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2561-2565, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081673,\n  author = {F. E. Jili and G. Mahé and M. Mboup},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A robust signal quantization system based on error correcting codes},\n  year = {2017},\n  pages = {2561-2565},\n  abstract = {In this paper we propose a robust representation of a digital signal based on error correction codes. For each frame of the signal (N successive samples) a binary decomposition, as a (successive power 2) weighted sum of binary vectors, is first considered. Then, each binary vector is projected into the set of codewords of a corresponding block code. The codes are designed so that their correction powers increases inversely to the weight of the binary vectors since the binary vectors with high weight are less sensitive to disturbance. The corresponding representation (decoding) thus appears as a form of signal quantization that can provide an interesting protection against noise and/or channel distorsion. Some applications showing the utility of the proposed representation are given.},\n  keywords = {binary codes;block codes;error correction codes;quantisation (signal);binary vector weighted sum;error correction codes;error correcting codes;robust signal quantization system;correction powers;corresponding block code;binary decomposition;N;Signal to noise ratio;Decoding;Channel coding;Error correction codes;Quantization (signal)},\n  doi = {10.23919/EUSIPCO.2017.8081673},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347657.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose a robust representation of a digital signal based on error correction codes. For each frame of the signal (N successive samples) a binary decomposition, as a (successive power 2) weighted sum of binary vectors, is first considered. Then, each binary vector is projected into the set of codewords of a corresponding block code. The codes are designed so that their correction powers increases inversely to the weight of the binary vectors since the binary vectors with high weight are less sensitive to disturbance. The corresponding representation (decoding) thus appears as a form of signal quantization that can provide an interesting protection against noise and/or channel distorsion. Some applications showing the utility of the proposed representation are given.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-TOA based position estimation for IR-UWB.\n \n \n \n \n\n\n \n Floriach, G.; Nájar, M.; and Navarro, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2566-2570, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-TOAPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081674,\n  author = {G. Floriach and M. Nájar and M. Navarro},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multi-TOA based position estimation for IR-UWB},\n  year = {2017},\n  pages = {2566-2570},\n  abstract = {The problem of localizing an IR-UWB transmitter from the signals received at several anchors is considered. The positioning problem is typically solved in a two-step approach where in the first step the Time of Arrival (TOA) is estimated independently at each anchor, and the position estimate is found in a second step. However, this approach can be improved, especially in challenging scenarios, if the positioning problem is treat as a whole, that is, the target position is estimated directly from the signals received on each anchor (Direct Position estimation DPE). In this paper, we present a different approach that sits halfway between these two approaches. The algorithm is based on a soft two-steps approach, where several possible TOA estimators are selected in the first step, and then the best estimators are used to find the position. The performance of the method is assessed under the framework of the IEEE 802.15.4a channel models.},\n  keywords = {radio transmitters;time-of-arrival estimation;ultra wideband communication;wireless channels;Zigbee;IEEE 802.15.4a channel models;DPE;direct position estimation;two-steps approach;different approach;target position;two-step approach;positioning problem;IR-UWB transmitter;multiTOA based position estimation;Estimation;Delays;Signal processing algorithms;Frequency-domain analysis;Cost function;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081674},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347550.pdf},\n}\n\n
\n
\n\n\n
\n The problem of localizing an IR-UWB transmitter from the signals received at several anchors is considered. The positioning problem is typically solved in a two-step approach where in the first step the Time of Arrival (TOA) is estimated independently at each anchor, and the position estimate is found in a second step. However, this approach can be improved, especially in challenging scenarios, if the positioning problem is treat as a whole, that is, the target position is estimated directly from the signals received on each anchor (Direct Position estimation DPE). In this paper, we present a different approach that sits halfway between these two approaches. The algorithm is based on a soft two-steps approach, where several possible TOA estimators are selected in the first step, and then the best estimators are used to find the position. The performance of the method is assessed under the framework of the IEEE 802.15.4a channel models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Compressive multispectral model for spectrum sensing in cognitive radio networks.\n \n \n \n \n\n\n \n Marín, A. J.; Martinez, T. J. I.; Betancur, L.; and Arguello, H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2575-2571, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CompressivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081675,\n  author = {A. J. Marín and T. J. I. Martinez and L. Betancur and H. Arguello},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Compressive multispectral model for spectrum sensing in cognitive radio networks},\n  year = {2017},\n  pages = {2575-2571},\n  abstract = {Cognitive Radio (CR) is one of the most promising techniques for optimizing the spectrum usage. However, the large amount of data of spectral information that must be processed to identify and assign spectral resources increases the channel assignment times, therefore worsening the quality of service for the devices using the spectrum. Compressive Sensing (CS) is a digital processing technique that allows the reconstruction of sparse or compressible signals using fewer samples than those required traditionally. This paper presents a model that addresses the Spectral Sensing problem in Cognitive Radio using Compressive Sensing as an effective way of decreasing the number of samples required in the sensing process. This model is based on Compressive Spectral Imaging (CSI) architectures where a centralized spectrum manager selects what power data must be delivered by the different wireless devices using binary patterns, and builds a multispectral data cube image with the geographical and spectral data power information. The results show that this multispectral data cube can be built with only a 50% of the samples generated by the devices and, therefore reducing the data traffic dramatically.},\n  keywords = {channel allocation;cognitive radio;compressed sensing;quality of service;radio spectrum management;signal detection;signal reconstruction;Compressive multispectral model;spectrum sensing;cognitive radio networks;spectrum usage;spectral information;spectral resources;channel assignment times;quality of service;Compressive Sensing;digital processing technique;sparse signals;compressible signals;Spectral Sensing problem;Compressive Spectral Imaging architectures;centralized spectrum manager;power data;multispectral data cube image;geographical data power information;spectral data power information;data traffic;wireless devices;binary patterns;Sensors;Mathematical model;Data models;Image coding;Signal processing;Optimization;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081675},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346685.pdf},\n}\n\n
\n
\n\n\n
\n Cognitive Radio (CR) is one of the most promising techniques for optimizing the spectrum usage. However, the large amount of data of spectral information that must be processed to identify and assign spectral resources increases the channel assignment times, therefore worsening the quality of service for the devices using the spectrum. Compressive Sensing (CS) is a digital processing technique that allows the reconstruction of sparse or compressible signals using fewer samples than those required traditionally. This paper presents a model that addresses the Spectral Sensing problem in Cognitive Radio using Compressive Sensing as an effective way of decreasing the number of samples required in the sensing process. This model is based on Compressive Spectral Imaging (CSI) architectures where a centralized spectrum manager selects what power data must be delivered by the different wireless devices using binary patterns, and builds a multispectral data cube image with the geographical and spectral data power information. The results show that this multispectral data cube can be built with only a 50% of the samples generated by the devices and, therefore reducing the data traffic dramatically.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non-binary turbo-coded OFDM-PLC system in the presence of impulsive noise.\n \n \n \n \n\n\n \n Abd-Alaziz, W.; Mei, Z.; Johnston, M.; and Le Goff, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2576-2580, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Non-binaryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081676,\n  author = {W. Abd-Alaziz and Z. Mei and M. Johnston and S. {Le Goff}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Non-binary turbo-coded OFDM-PLC system in the presence of impulsive noise},\n  year = {2017},\n  pages = {2576-2580},\n  abstract = {The power-line communication (PLC) channel causes information-bearing signals to be affected by impulsive noise and the effects of the multipath fading. To mitigate these effects, we propose the employment of non-binary turbo codes, since non-binary error-correcting codes generally promise an enhanced performance in such harsh environments. In this paper, we investigate the performance of non-binary turbo-codes on PLC channels that exhibit frequency selectivity with additive Middleton Class A noise and compare with a comparable binary turbo-coded PLC system. In order to reduce the effect of multipath and impulsive noise, orthogonal frequency-division multiplexing (OFDM) with non-linear receivers (blanking and clipping) have been employed. The system is examined on extremely impulsive channels where the value of the impulsive index (A) is 0.01 and the noise ratio (Γ) is 0.01. The results show that non-binary turbo codes are very robust and achieve a large gain over binary turbo codes on PLC channels.},\n  keywords = {carrier transmission on power lines;error correction codes;impulse noise;multipath channels;OFDM modulation;turbo codes;OFDM-PLC system;comparable binary turbo;additive Middleton Class A noise;nonbinary turbo-codes;nonbinary error-correcting codes;power-line communication channel;binary turbo codes;nonbinary turbo codes;extremely impulsive channels;impulsive noise;OFDM;Turbo codes;Probability density function;Attenuation;Convolutional codes;Convolution;Europe;non-binary turbo codes;power-line communication;impulsive noise;Middleton Class A noise;OFDM},\n  doi = {10.23919/EUSIPCO.2017.8081676},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347006.pdf},\n}\n\n
\n
\n\n\n
\n The power-line communication (PLC) channel causes information-bearing signals to be affected by impulsive noise and the effects of the multipath fading. To mitigate these effects, we propose the employment of non-binary turbo codes, since non-binary error-correcting codes generally promise an enhanced performance in such harsh environments. In this paper, we investigate the performance of non-binary turbo-codes on PLC channels that exhibit frequency selectivity with additive Middleton Class A noise and compare with a comparable binary turbo-coded PLC system. In order to reduce the effect of multipath and impulsive noise, orthogonal frequency-division multiplexing (OFDM) with non-linear receivers (blanking and clipping) have been employed. The system is examined on extremely impulsive channels where the value of the impulsive index (A) is 0.01 and the noise ratio (Γ) is 0.01. The results show that non-binary turbo codes are very robust and achieve a large gain over binary turbo codes on PLC channels.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n RAKE: A simple and efficient lossless compression algorithm for the Internet of Things.\n \n \n \n \n\n\n \n Campobello, G.; Segreto, A.; Zanafi, S.; and Serrano, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2581-2585, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RAKE:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081677,\n  author = {G. Campobello and A. Segreto and S. Zanafi and S. Serrano},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {RAKE: A simple and efficient lossless compression algorithm for the Internet of Things},\n  year = {2017},\n  pages = {2581-2585},\n  abstract = {In this paper we propose a new lossless compression algorithm suitable for Internet of Things (IoT). The proposed algorithm, named RAKE, is based only on elementary counting operations and has low memory requirements, and therefore it can be easily implemented in low-cost and low-speed micro-controllers as those used in IoT devices. Despite its simplicity, simulation results show that, in the case of sparse sequences, the proposed algorithm outperforms well-known lossless compression algorithms such as rar, gzip and bzip2. Moreover, in the case of real-world data, RAKE achieves higher compression ratios as even compared to IoT-specific lossless compression algorithms.},\n  keywords = {data compression;image coding;Internet of Things;elementary counting operations;IoT devices;IoT-specific lossless compression algorithms;lossless compression algorithm;memory requirements;compression ratio;RAKE algorithm;Compression algorithms;Signal processing algorithms;Dictionaries;Prediction algorithms;Encoding;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081677},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570344021.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose a new lossless compression algorithm suitable for Internet of Things (IoT). The proposed algorithm, named RAKE, is based only on elementary counting operations and has low memory requirements, and therefore it can be easily implemented in low-cost and low-speed micro-controllers as those used in IoT devices. Despite its simplicity, simulation results show that, in the case of sparse sequences, the proposed algorithm outperforms well-known lossless compression algorithms such as rar, gzip and bzip2. Moreover, in the case of real-world data, RAKE achieves higher compression ratios as even compared to IoT-specific lossless compression algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Collision and packet loss analysis in a LoRaWAN network.\n \n \n \n \n\n\n \n Ferre, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2586-2590, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CollisionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081678,\n  author = {G. Ferre},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Collision and packet loss analysis in a LoRaWAN network},\n  year = {2017},\n  pages = {2586-2590},\n  abstract = {Internet of things (IoT) is considered as the next technological revolution. Therefore, many solutions are developed either in free, i.e. ISM bands or in non free bands with the ultimate aim of affording connectivity over several kilometers. Based on this feature, in urban environment the density of IoT devices will be extremely high. In this paper we propose to analyze the collision and packet loss when LoRaWAN is considered. Based on the LoRaWAN features, we develop closed-form expressions of collision and packet loss probabilities. Simulation results confirm our theoretical developments. We also show that our theoretical expressions are more accurate than the Poisson distributed process to describe the collisions.},\n  keywords = {Internet of Things;Poisson distribution;probability;wide area networks;theoretical expressions;theoretical developments;packet loss probabilities;closed-form expressions;IoT devices;urban environment the density;nonfree bands;ISM bands;technological revolution;LoRaWAN network;packet loss analysis;collisions;Poisson distributed process;Internet of things;Chirp;Logic gates;Packet loss;Uplink;Atmospheric modeling;Modulation},\n  doi = {10.23919/EUSIPCO.2017.8081678},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347606.pdf},\n}\n\n
\n
\n\n\n
\n Internet of things (IoT) is considered as the next technological revolution. Therefore, many solutions are developed either in free, i.e. ISM bands or in non free bands with the ultimate aim of affording connectivity over several kilometers. Based on this feature, in urban environment the density of IoT devices will be extremely high. In this paper we propose to analyze the collision and packet loss when LoRaWAN is considered. Based on the LoRaWAN features, we develop closed-form expressions of collision and packet loss probabilities. Simulation results confirm our theoretical developments. We also show that our theoretical expressions are more accurate than the Poisson distributed process to describe the collisions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Trust-based strategies for wireless networks under partial monitoring.\n \n \n \n \n\n\n \n Ntemos, K.; Kalouptsidis, N.; and Kolokotronis, N.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2591-2595, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Trust-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081679,\n  author = {K. Ntemos and N. Kalouptsidis and N. Kolokotronis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Trust-based strategies for wireless networks under partial monitoring},\n  year = {2017},\n  pages = {2591-2595},\n  abstract = {In modern wireless networks autonomous agents may exhibit selfish or malicious behavior which can compromise the performance of the network. For this reason, Intrusion Detection Systems (IDS) have been proposed to monitor the agents' behavior, along with the deployment of Trust/reputation Management Systems (TMS) to enforce cooperation among the agents. IDS may not continuously monitor agents' behavior to avoid excessive deployment costs. In this work we consider agents that exhibit both selfish and malicious behavior and study their pairwise interactions when they participate in a packet-forwarding task, in the scenario of partial monitoring of their actions by the IDS. We investigate the decision-making process of the agents and derive conditions that if satisfied, the trust-based strategy proposed by the TMS constitutes an optimal strategy for the agents.},\n  keywords = {decision making;game theory;multi-agent systems;radio networks;telecommunication security;partial monitoring;modern wireless networks autonomous agents;selfish behavior;malicious behavior;Intrusion Detection Systems;IDS;Trust/reputation Management Systems;TMS;excessive deployment costs;packet-forwarding task;decision-making process;trust-based strategy;Monitoring;History;Wireless networks;Europe;Signal processing;Informatics;Trust;Game Theory;Monitoring;Cooperation},\n  doi = {10.23919/EUSIPCO.2017.8081679},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347810.pdf},\n}\n\n
\n
\n\n\n
\n In modern wireless networks autonomous agents may exhibit selfish or malicious behavior which can compromise the performance of the network. For this reason, Intrusion Detection Systems (IDS) have been proposed to monitor the agents' behavior, along with the deployment of Trust/reputation Management Systems (TMS) to enforce cooperation among the agents. IDS may not continuously monitor agents' behavior to avoid excessive deployment costs. In this work we consider agents that exhibit both selfish and malicious behavior and study their pairwise interactions when they participate in a packet-forwarding task, in the scenario of partial monitoring of their actions by the IDS. We investigate the decision-making process of the agents and derive conditions that if satisfied, the trust-based strategy proposed by the TMS constitutes an optimal strategy for the agents.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modeling and estimation of massive MIMO channel non-reciprocity: Sparsity-aided approach.\n \n \n \n \n\n\n \n Raeesi, O.; Gokceoglu, A.; Sofotasios, P. C.; Renfors, M.; and Valkama, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2596-2600, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ModelingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081680,\n  author = {O. Raeesi and A. Gokceoglu and P. C. Sofotasios and M. Renfors and M. Valkama},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Modeling and estimation of massive MIMO channel non-reciprocity: Sparsity-aided approach},\n  year = {2017},\n  pages = {2596-2600},\n  abstract = {In this paper, we study the estimation of channel non-reciprocity in precoded time division duplexing based massive multiple-input multiple-output (MIMO) systems. The considered channel non-reciprocity model covers both the frequency-response and the mutual coupling mismatches between the transmitter and the receiver chains of a massive MIMO basestation (BS). Based on the assumed non-reciprocity model, it is shown that the effective downlink channel can be decomposed as a product of the uplink channel and another sparse matrix, referred to as the BS transceiver non-reciprocity matrix. Stemming from such modeling, we then propose an efficient estimator of the BS transceiver non-reciprocity matrix exploiting its sparse nature, combined with an appropriate formulation of the associated over-the-air pilot-signaling. The mean-squared error performance of the overall corresponding estimation method is finally evaluated using extensive computer simulations which indicate that the non-reciprocity characteristics can be estimated efficiently and accurately, thus potentially facilitating large system-level performance gains in multi-user massive MIMO systems.},\n  keywords = {channel coding;channel estimation;frequency response;mean square error methods;MIMO communication;precoding;radio transceivers;sparse matrices;time division multiplexing;wireless channels;sparsity-aided approach;precoded time division duplexing;massive multiple-input multiple-output systems;effective downlink channel;uplink channel;nonreciprocity characteristics;multiuser massive MIMO systems;massive MIMO base station;estimation method;massive MIMO channel nonreciprocity estimation;massive MIMO channel nonreciprocity modelling;frequency-response;transmitter chains;receiver chains;sparse matrix decomposition;BS transceiver nonreciprocity matrix estimator;over-the-air pilot-signaling;mean-squared error performance;extensive computer simulations;system-level performance gains;MIMO;Transceivers;Estimation;Channel estimation;Mutual coupling;Antenna arrays;Channel non-reciprocity;frequency-response mismatch;massive MIMO;mutual coupling;sparsity;time division duplexing (TDD)},\n  doi = {10.23919/EUSIPCO.2017.8081680},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346999.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we study the estimation of channel non-reciprocity in precoded time division duplexing based massive multiple-input multiple-output (MIMO) systems. The considered channel non-reciprocity model covers both the frequency-response and the mutual coupling mismatches between the transmitter and the receiver chains of a massive MIMO basestation (BS). Based on the assumed non-reciprocity model, it is shown that the effective downlink channel can be decomposed as a product of the uplink channel and another sparse matrix, referred to as the BS transceiver non-reciprocity matrix. Stemming from such modeling, we then propose an efficient estimator of the BS transceiver non-reciprocity matrix exploiting its sparse nature, combined with an appropriate formulation of the associated over-the-air pilot-signaling. The mean-squared error performance of the overall corresponding estimation method is finally evaluated using extensive computer simulations which indicate that the non-reciprocity characteristics can be estimated efficiently and accurately, thus potentially facilitating large system-level performance gains in multi-user massive MIMO systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance evaluation of adaptive filters for sparse wireless channel estimation.\n \n \n \n \n\n\n \n Lima, M. V. S.; Ferreira, T. N.; Martins, W. A.; Mendonça, M. O. K.; and Diniz, P. S. R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2601-2065, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081681,\n  author = {M. V. S. Lima and T. N. Ferreira and W. A. Martins and M. O. K. Mendonça and P. S. R. Diniz},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Performance evaluation of adaptive filters for sparse wireless channel estimation},\n  year = {2017},\n  pages = {2601-2065},\n  abstract = {The progressive increase of data rates in wireless communication systems has induced channel models with sampled impulse responses which are mostly sparse. This paper presents a unified derivation of adaptive filters exploiting sparsity in the complex domain, and compares the performance of classic and state-of-the-art adaptive algorithms for estimating sparse wireless channels as well as their tracking ability in this inherently time-varying environment. Simulation results confirm the efficiency of the sparsity-aware algorithms.},\n  keywords = {adaptive filters;channel estimation;compressed sensing;wireless channels;adaptive filters;sparse wireless channel estimation;wireless communication systems;sparsity-aware algorithms;impulse responses;adaptive algorithms;Signal processing algorithms;Wireless communication;Channel estimation;Channel models;Adaptation models;Doppler effect;adaptive filtering;sparsity;set-membership;wireless channel estimation;NLMS},\n  doi = {10.23919/EUSIPCO.2017.8081681},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347269.pdf},\n}\n\n
\n
\n\n\n
\n The progressive increase of data rates in wireless communication systems has induced channel models with sampled impulse responses which are mostly sparse. This paper presents a unified derivation of adaptive filters exploiting sparsity in the complex domain, and compares the performance of classic and state-of-the-art adaptive algorithms for estimating sparse wireless channels as well as their tracking ability in this inherently time-varying environment. Simulation results confirm the efficiency of the sparsity-aware algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A fast matrix completion method for index coding.\n \n \n \n \n\n\n \n Asadi, E.; Aziznejad, S.; Amerimehr, M. H.; and Amini, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2606-2610, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081682,\n  author = {E. Asadi and S. Aziznejad and M. H. Amerimehr and A. Amini},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A fast matrix completion method for index coding},\n  year = {2017},\n  pages = {2606-2610},\n  abstract = {We investigate the problem of index coding, where a sender transmits distinct packets over a shared link to multiple users with side information. The aim is to find an encoding scheme (linear combinations) to minimize the number of transmitted packets, while providing each user with sufficient amount of data for the recovery of the desired parts. It has been shown that finding the optimal linear index code is equivalent to a matrix completion problem, where the observed elements of the matrix indicate the side information available for the users. This modeling results in an incomplete square matrix with all ones on the main diagonal (and some other parts), which needs to be completed with minimum rank. Unfortunately, this is a case in which conventional matrix completion techniques based on nuclear-norm minimization are proved to fail [Huang, Rouayheb 2015]. Instead, an alternating projection (the AP algorithm) method is proposed in [Huang, Rouayheb 2015]. In this paper, in addition to proving the convergence of the AP algorithm under certain conditions, we introduce a modification which considerably improves the run time of the method.},\n  keywords = {combinatorial mathematics;linear codes;matrix algebra;minimisation;conventional matrix completion techniques;nuclear-norm minimization;AP algorithm;fast matrix completion method;index coding;distinct packets;multiple users;encoding scheme;linear combinations;transmitted packets;optimal linear index code;incomplete square matrix;Indexes;Encoding;Signal processing algorithms;Algorithm design and analysis;Convergence;Receivers;Europe;Alternating projection;coded caching;index coding;matrix completion;network caching;rank minimization},\n  doi = {10.23919/EUSIPCO.2017.8081682},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347355.pdf},\n}\n\n
\n
\n\n\n
\n We investigate the problem of index coding, where a sender transmits distinct packets over a shared link to multiple users with side information. The aim is to find an encoding scheme (linear combinations) to minimize the number of transmitted packets, while providing each user with sufficient amount of data for the recovery of the desired parts. It has been shown that finding the optimal linear index code is equivalent to a matrix completion problem, where the observed elements of the matrix indicate the side information available for the users. This modeling results in an incomplete square matrix with all ones on the main diagonal (and some other parts), which needs to be completed with minimum rank. Unfortunately, this is a case in which conventional matrix completion techniques based on nuclear-norm minimization are proved to fail [Huang, Rouayheb 2015]. Instead, an alternating projection (the AP algorithm) method is proposed in [Huang, Rouayheb 2015]. In this paper, in addition to proving the convergence of the AP algorithm under certain conditions, we introduce a modification which considerably improves the run time of the method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Compressed sensing technique for synchronization and channel estimation in OFDMA uplink transmissions.\n \n \n \n \n\n\n \n Şenyuva, R. V.; Kurt, G. K.; and Anarim, E.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2611-2615, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CompressedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081683,\n  author = {R. V. Şenyuva and G. K. Kurt and E. Anarim},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Compressed sensing technique for synchronization and channel estimation in OFDMA uplink transmissions},\n  year = {2017},\n  pages = {2611-2615},\n  abstract = {In this study joint estimation of the symbol timing offset (STO), and channel impulse response (CIR) of each active user under the presence of the carrier frequency offset (CFO) is considered for the uplink of an orthogonal frequency-division multiple access (OFDMA) system. A new method based on the compressed sensing (CS) framework using pilot symbols is proposed for the joint estimation of the STO and the CIR of each active user. Sparsity is achieved through incorporating the STO, the cyclic prefix (CP) samples, and the CIR coefficients into a new signal model. The proposed method does not require CIR coefficients to be sparse. Numerical results of the performance of the proposed method using the orthogonal matching pursuit (OMP) algorithm is presented. The presence of CTO is also considered as a perturbation to the CS dictionary of pilot symbols.},\n  keywords = {channel estimation;compressed sensing;frequency division multiple access;OFDM modulation;synchronisation;transient response;pilot symbols;compressed sensing technique;synchronization;channel estimation;OFDMA uplink transmissions;STO;channel impulse response;active user;CFO;orthogonal frequency-division multiple access system;cyclic prefix samples;CIR coefficients;orthogonal matching pursuit algorithm;symbol timing offset;carrier frequency offset;CP;OFDM;Uplink;Delays;Discrete Fourier transforms;Channel estimation;Estimation},\n  doi = {10.23919/EUSIPCO.2017.8081683},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347523.pdf},\n}\n\n
\n
\n\n\n
\n In this study joint estimation of the symbol timing offset (STO), and channel impulse response (CIR) of each active user under the presence of the carrier frequency offset (CFO) is considered for the uplink of an orthogonal frequency-division multiple access (OFDMA) system. A new method based on the compressed sensing (CS) framework using pilot symbols is proposed for the joint estimation of the STO and the CIR of each active user. Sparsity is achieved through incorporating the STO, the cyclic prefix (CP) samples, and the CIR coefficients into a new signal model. The proposed method does not require CIR coefficients to be sparse. Numerical results of the performance of the proposed method using the orthogonal matching pursuit (OMP) algorithm is presented. The presence of CTO is also considered as a perturbation to the CS dictionary of pilot symbols.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparsity-based direction of arrival estimation in the presence of gain/phase uncertainty.\n \n \n \n \n\n\n \n Afkhaminia, F.; and Azghani, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2616-2619, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Sparsity-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081684,\n  author = {F. Afkhaminia and M. Azghani},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Sparsity-based direction of arrival estimation in the presence of gain/phase uncertainty},\n  year = {2017},\n  pages = {2616-2619},\n  abstract = {Estimating the direction of arrival (DOA) in sensor arrays is a crucial task in array signal processing systems. This task becomes more difficult when the sensors have gain/phase uncertainty. We have addressed this issue by modeling the problem as a combination of two sparse components, the DOA vector and the gain/phase uncertainty vector. Therefore, a sparse decomposition technique is suggested to jointly recover the DOAs and the sensors with gain/phase uncertainty. The simulation results confirm that the suggested method offers very good performance in different scenarios and is superior to its counterparts.},\n  keywords = {array signal processing;direction-of-arrival estimation;sensor arrays;vectors;DOA vector;gain/phase uncertainty vector;sensor arrays;array signal processing;sparse decomposition;sparsity-based direction of arrival estimation;Direction-of-arrival estimation;Uncertainty;Estimation;Signal to noise ratio;Sensor arrays;Array signal processing;sparsity;DOA estimation;gain/phase uncertainty},\n  doi = {10.23919/EUSIPCO.2017.8081684},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347176.pdf},\n}\n\n
\n
\n\n\n
\n Estimating the direction of arrival (DOA) in sensor arrays is a crucial task in array signal processing systems. This task becomes more difficult when the sensors have gain/phase uncertainty. We have addressed this issue by modeling the problem as a combination of two sparse components, the DOA vector and the gain/phase uncertainty vector. Therefore, a sparse decomposition technique is suggested to jointly recover the DOAs and the sensors with gain/phase uncertainty. The simulation results confirm that the suggested method offers very good performance in different scenarios and is superior to its counterparts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An NLMS algorithm for the identification of bilinear forms.\n \n \n \n \n\n\n \n Paleologu, C.; Benesty, J.; and Ciochină, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2620-2624, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081685,\n  author = {C. Paleologu and J. Benesty and S. Ciochină},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An NLMS algorithm for the identification of bilinear forms},\n  year = {2017},\n  pages = {2620-2624},\n  abstract = {In a recent work, we addressed the identification poblem of bilinear forms with the Wiener filter. In this context, a different approach was introduced, by defining the bilinear term with respect to the impulse responses of a spatiotemporal model, which resembles a multiple-input/single-output (MISO) system. However, in practice, the Wiener filter may not be always very efficient or convenient to use. Consequently, in this paper, we further develop a normalized least-mean-square (NLMS) adaptive filter tailored for bilinear forms. Many simulations, which are performed from a system identification perspective, indicate the good performance of the proposed algorithm.},\n  keywords = {adaptive filters;filtering theory;identification;least mean squares methods;transient response;Wiener filters;Wiener filter;bilinear term;system identification perspective;NLMS algorithm;multiple-input-single-output system;bilinear form identification problem;impulse response;spatiotemporal model;normalized least-mean-square adaptive filter;Spatiotemporal phenomena;Europe;Signal processing;MISO;Signal processing algorithms;Adaptation models},\n  doi = {10.23919/EUSIPCO.2017.8081685},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570344781.pdf},\n}\n\n
\n
\n\n\n
\n In a recent work, we addressed the identification poblem of bilinear forms with the Wiener filter. In this context, a different approach was introduced, by defining the bilinear term with respect to the impulse responses of a spatiotemporal model, which resembles a multiple-input/single-output (MISO) system. However, in practice, the Wiener filter may not be always very efficient or convenient to use. Consequently, in this paper, we further develop a normalized least-mean-square (NLMS) adaptive filter tailored for bilinear forms. Many simulations, which are performed from a system identification perspective, indicate the good performance of the proposed algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quaternion adaptive line enhancer.\n \n \n \n \n\n\n \n Sanei, S.; Took, C. C.; Enshaeifar, S.; and Lee, T. K. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2625-2629, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"QuaternionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081686,\n  author = {S. Sanei and C. C. Took and S. Enshaeifar and T. K. M. Lee},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Quaternion adaptive line enhancer},\n  year = {2017},\n  pages = {2625-2629},\n  abstract = {The recovery of periodic signals from their noisy single channel mixtures has made wide use of the adaptive line enhancer (ALE). The ALE, however, is not designed for detection of two-(2-D) or three-dimensional (3-D) periodic signals such as tremor in an unconstrained hand motion. An ALE which can perform restoration of 3-D periodic signals is therefore required for such purposes. These signals may not exhibit periodicity in a single dimension. To address and solve this problem a quaternion adaptive line enhancer (QALE) is introduced in this paper for the first time which exploits the quaternion least mean square (QLMS) algorithm for the detection of 3-D (extendable to 4-D) periodic signals.},\n  keywords = {least mean squares methods;signal detection;quaternion adaptive line enhancer;noisy single channel mixtures;ALE;unconstrained hand motion;single dimension;quaternion least mean square algorithm;QALE;QLMS algorithm;3D periodic signal detection;Quaternions;Noise measurement;Signal processing algorithms;Trajectory;Line enhancers;Correlation;Europe;ALE;quaternion adaptive line enhancer;QLMS;quaternion shift},\n  doi = {10.23919/EUSIPCO.2017.8081686},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346010.pdf},\n}\n\n
\n
\n\n\n
\n The recovery of periodic signals from their noisy single channel mixtures has made wide use of the adaptive line enhancer (ALE). The ALE, however, is not designed for detection of two-(2-D) or three-dimensional (3-D) periodic signals such as tremor in an unconstrained hand motion. An ALE which can perform restoration of 3-D periodic signals is therefore required for such purposes. These signals may not exhibit periodicity in a single dimension. To address and solve this problem a quaternion adaptive line enhancer (QALE) is introduced in this paper for the first time which exploits the quaternion least mean square (QLMS) algorithm for the detection of 3-D (extendable to 4-D) periodic signals.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low-complexity approximation to the Kalman filter using convex combinations of adaptive filters from different families.\n \n \n \n \n\n\n \n Claser, R.; and Nascimento, V. H.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2630-2633, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Low-complexityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081687,\n  author = {R. Claser and V. H. Nascimento},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Low-complexity approximation to the Kalman filter using convex combinations of adaptive filters from different families},\n  year = {2017},\n  pages = {2630-2633},\n  abstract = {It is known that combinations of the least mean square (LMS) and recursive least squares (RLS) algorithms may achieve a performance in tracking better than what is possible to obtain with either kind of filter individually. In this paper, we consider combinations of LMS and RLS filters and compare their performance under a nonstationary condition with the optimal solution obtained via Kalman filter. We show that combination schemes may have a tracking performance close to that of a Kalman filter, but with lower computational complexity (linear in the filter length instead of quadratic - in the case of the example shown here - or cubic, for general Kalman models).},\n  keywords = {adaptive filters;computational complexity;Kalman filters;least mean squares methods;least squares approximations;recursive estimation;convex combinations;adaptive filters;Kalman filter;general Kalman models;low-complexity approximation;computational complexity;Kalman filters;Covariance matrices;Signal processing algorithms;Adaptation models;Mathematical model;Computational complexity;Computational modeling},\n  doi = {10.23919/EUSIPCO.2017.8081687},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347694.pdf},\n}\n\n
\n
\n\n\n
\n It is known that combinations of the least mean square (LMS) and recursive least squares (RLS) algorithms may achieve a performance in tracking better than what is possible to obtain with either kind of filter individually. In this paper, we consider combinations of LMS and RLS filters and compare their performance under a nonstationary condition with the optimal solution obtained via Kalman filter. We show that combination schemes may have a tracking performance close to that of a Kalman filter, but with lower computational complexity (linear in the filter length instead of quadratic - in the case of the example shown here - or cubic, for general Kalman models).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A design methodology for the Gaussian KLMS algorithm.\n \n \n \n \n\n\n \n Pedrosa, P.; Bermudez, J. C. M.; and Richard, C.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2634-2638, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081688,\n  author = {P. Pedrosa and J. C. M. Bermudez and C. Richard},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A design methodology for the Gaussian KLMS algorithm},\n  year = {2017},\n  pages = {2634-2638},\n  abstract = {The Gaussian kernel least-mean-square (Gaussian KLMS) algorithm has been studied under different implementation conditions. Though analytical models that predict its behavior are available, methodologies for determining the algorithm parameter values to satisfy given design criteria is still missing from the literature. In this paper we propose, test, and validate a methodology for the design of the Gaussian KLMS algorithm. Designing the algorithm consists in selecting adequate values for its free parameters from available theoretical performance models. These parameters comprise the filter length, the adaptive step-size, and the kernel bandwidth. The objective is to achieve specific design objectives, e.g., fast convergence time, good steady-state performance and/or reduced computational load. These goals are quantified in terms of different performance measures. Particularly, the time to convergence, the residual mean-squared-error (MSE), and the filter order.},\n  keywords = {adaptive filters;Gaussian processes;least mean squares methods;Gaussian KLMS algorithm;Gaussian kernel least-mean-square algorithm;filter length;adaptive step-size;kernel bandwidth;residual mean-squared-error;filter order;Kernel;Dictionaries;Algorithm design and analysis;Signal processing algorithms;Bandwidth;Convergence;Design methodology},\n  doi = {10.23919/EUSIPCO.2017.8081688},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343290.pdf},\n}\n\n
\n
\n\n\n
\n The Gaussian kernel least-mean-square (Gaussian KLMS) algorithm has been studied under different implementation conditions. Though analytical models that predict its behavior are available, methodologies for determining the algorithm parameter values to satisfy given design criteria is still missing from the literature. In this paper we propose, test, and validate a methodology for the design of the Gaussian KLMS algorithm. Designing the algorithm consists in selecting adequate values for its free parameters from available theoretical performance models. These parameters comprise the filter length, the adaptive step-size, and the kernel bandwidth. The objective is to achieve specific design objectives, e.g., fast convergence time, good steady-state performance and/or reduced computational load. These goals are quantified in terms of different performance measures. Particularly, the time to convergence, the residual mean-squared-error (MSE), and the filter order.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quasi-Newton least-mean fourth adaptive algorithm.\n \n \n \n \n\n\n \n bin Mansoor , U.; Mayyala, Q.; Moinuddin, M.; and Zerguine, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2639-2643, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Quasi-NewtonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081689,\n  author = {U. {bin Mansoor} and Q. Mayyala and M. Moinuddin and A. Zerguine},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Quasi-Newton least-mean fourth adaptive algorithm},\n  year = {2017},\n  pages = {2639-2643},\n  abstract = {This paper proposes a new Newton-based adaptive filtering algorithm, namely the Quasi-Newton Least-Mean Fourth (QNLMF) algorithm. The main goal is to have a higher order adaptive filter that usually fits the non-Gaussian signals with an improved performance behavior, which is achieved using the Newton numerical method. Both the convergence analysis and the steady-state performance analysis are derived. More importantly, unlike other stochastic based algorithms, the step size parameter that controls the convergence of the QNLMF is independent of the statistics of the input signal, and consequently, the analytical assessments show that the proposed algorithm enjoys an independent performance from the input signal eigenvalue spread. Finally, a number of simulation experiments are carried out to corroborate the theoretical findings.},\n  keywords = {adaptive filters;convergence;convergence of numerical methods;eigenvalues and eigenfunctions;filtering theory;least mean squares methods;Newton method;least-mean fourth adaptive algorithm;adaptive filtering algorithm;QNLMF;higher order adaptive filter;nonGaussian signals;improved performance behavior;Newton numerical method;convergence analysis;steady-state performance analysis;stochastic based algorithms;step size parameter;input signal;independent performance;QuasiNewton Least-Mean Fourth algorithm;Signal processing algorithms;Algorithm design and analysis;Convergence;Steady-state;Eigenvalues and eigenfunctions;Europe;Signal processing;Newton Method;LMF;Adaptive filtering},\n  doi = {10.23919/EUSIPCO.2017.8081689},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347374.pdf},\n}\n\n
\n
\n\n\n
\n This paper proposes a new Newton-based adaptive filtering algorithm, namely the Quasi-Newton Least-Mean Fourth (QNLMF) algorithm. The main goal is to have a higher order adaptive filter that usually fits the non-Gaussian signals with an improved performance behavior, which is achieved using the Newton numerical method. Both the convergence analysis and the steady-state performance analysis are derived. More importantly, unlike other stochastic based algorithms, the step size parameter that controls the convergence of the QNLMF is independent of the statistics of the input signal, and consequently, the analytical assessments show that the proposed algorithm enjoys an independent performance from the input signal eigenvalue spread. Finally, a number of simulation experiments are carried out to corroborate the theoretical findings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The impact of diverse preprocessing pipelines on brain functional connectivity.\n \n \n \n \n\n\n \n Výtvarová, E.; Fousek, J.; Bartoň, M.; Mareček, R.; Gajdoš, M.; Lamoš, M.; Nováková, M.; Slavíček, T.; Peterlik, I.; and Mikl, M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2644-2648, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081690,\n  author = {E. Výtvarová and J. Fousek and M. Bartoň and R. Mareček and M. Gajdoš and M. Lamoš and M. Nováková and T. Slavíček and I. Peterlik and M. Mikl},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {The impact of diverse preprocessing pipelines on brain functional connectivity},\n  year = {2017},\n  pages = {2644-2648},\n  abstract = {Brain functional connectivity measured by functional magnetic resonance imaging was shown to be influenced by preprocessing procedures. We aim to describe this influence separately for different preprocessing factors and in 20 different most used preprocessing pipelines. We evaluate the effects of slice-timing correction and physiological noise filtering by RETROICOR, diverse levels of motion correction, and white matter, cerebrospinal fluid, and global signal filtering. With usage of three datasets, we show the impact on global metrics of resting-state functional brain networks and their reliability. We show negative effect of RETROICOR on reliability of metrics and disrupting effect of global signal regression on network topology. We do not support the use of slice-timing correction because it does not significantly influence any of the measured features. We also show that the selected types of preprocessing may affect averaged node strength, normalized clustering coefficient, normalized characteristic path length and modularity.},\n  keywords = {biomedical MRI;brain;image filtering;medical image processing;neural nets;neurophysiology;slice-timing correction;motion correction;global signal filtering;global metrics;resting-state functional brain networks;disrupting effect;global signal regression;diverse preprocessing pipelines;brain functional connectivity;functional magnetic resonance imaging;physiological noise filtering;white matter;cerebrospinal fluid;network topology;averaged node strength;normalized clustering coefficient;normalized characteristic path length;Measurement;Pipelines;Reliability;Correlation;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081690},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570337914.pdf},\n}\n\n
\n
\n\n\n
\n Brain functional connectivity measured by functional magnetic resonance imaging was shown to be influenced by preprocessing procedures. We aim to describe this influence separately for different preprocessing factors and in 20 different most used preprocessing pipelines. We evaluate the effects of slice-timing correction and physiological noise filtering by RETROICOR, diverse levels of motion correction, and white matter, cerebrospinal fluid, and global signal filtering. With usage of three datasets, we show the impact on global metrics of resting-state functional brain networks and their reliability. We show negative effect of RETROICOR on reliability of metrics and disrupting effect of global signal regression on network topology. We do not support the use of slice-timing correction because it does not significantly influence any of the measured features. We also show that the selected types of preprocessing may affect averaged node strength, normalized clustering coefficient, normalized characteristic path length and modularity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fine-scale patterns driving dynamic functional connectivity provide meaningful brain parcellations.\n \n \n \n \n\n\n \n Preti, M. G.; and Van De Ville, D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2649-2653, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Fine-scalePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081691,\n  author = {M. G. Preti and D. {Van De Ville}},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Fine-scale patterns driving dynamic functional connectivity provide meaningful brain parcellations},\n  year = {2017},\n  pages = {2649-2653},\n  abstract = {Dynamic functional connectivity (dFC) derived from resting-state functional magnetic resonance imaging (fMRl) allows identifying large-scale functional brain networks based on spontaneous activity and their temporal reconfigurations. Due to limited memory and computational resources, these pairwise measures are typically computed for a set of brain regions from a pre-defined brain atlas, which choice is non-trivial and might influence results. Here, we first leverage the availability of dynamic information and new computational methods to retrieve dFC at the finest voxel level in terms of dominant patterns of fluctuations, and, second, we demonstrate that this new representation is informative to derive meaningful brain parcellations that capture both long-range interactions and fine-scale local organization. We analyzed resting-state fMRI of 54 healthy participants from the Human Connectome Project. For each position of a temporal window, we determined eigenvector centrality of the windowed fMRl data at the voxel level. These were then concatenated across time and subjects and clustered into the most representative dominant patterns (RDPs). Each voxel was then labeled according to a binary code indicating positive or negative contribution to each of the RDPs. We obtained a 36-label parcellation displaying long-range interactions with remarkable hemispherical symmetry. By separating contiguous regions, a finer-scale parcellation of 448 areas was also retrieved, showing consistency with known connectivity of cortical/subcortical structures including thalamus. Our contribution bridges the gap between voxel-based approaches and graph theoretical analysis.},\n  keywords = {binary codes;biomedical MRI;brain;graph theory;medical image processing;neural nets;neurophysiology;dynamic functional connectivity;resting-state functional magnetic resonance imaging;large-scale functional brain networks;spontaneous activity;temporal reconfigurations;brain regions;fine-scale local organization;windowed fMRl data;representative dominant patterns;fine-scale patterns;voxel level;brain parcellations;fluctuation pattern;human connectome project;cortical-subcortical structures;binary code;graph theoretical analysis;Europe;Signal processing;Biomedical measurement;functional MRI;dynamic functional connectivity;brain networks;fine-scale brain parcellation},\n  doi = {10.23919/EUSIPCO.2017.8081691},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346902.pdf},\n}\n\n
\n
\n\n\n
\n Dynamic functional connectivity (dFC) derived from resting-state functional magnetic resonance imaging (fMRl) allows identifying large-scale functional brain networks based on spontaneous activity and their temporal reconfigurations. Due to limited memory and computational resources, these pairwise measures are typically computed for a set of brain regions from a pre-defined brain atlas, which choice is non-trivial and might influence results. Here, we first leverage the availability of dynamic information and new computational methods to retrieve dFC at the finest voxel level in terms of dominant patterns of fluctuations, and, second, we demonstrate that this new representation is informative to derive meaningful brain parcellations that capture both long-range interactions and fine-scale local organization. We analyzed resting-state fMRI of 54 healthy participants from the Human Connectome Project. For each position of a temporal window, we determined eigenvector centrality of the windowed fMRl data at the voxel level. These were then concatenated across time and subjects and clustered into the most representative dominant patterns (RDPs). Each voxel was then labeled according to a binary code indicating positive or negative contribution to each of the RDPs. We obtained a 36-label parcellation displaying long-range interactions with remarkable hemispherical symmetry. By separating contiguous regions, a finer-scale parcellation of 448 areas was also retrieved, showing consistency with known connectivity of cortical/subcortical structures including thalamus. Our contribution bridges the gap between voxel-based approaches and graph theoretical analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Lattice Boltzmann method for modelling of biological phenomena.\n \n \n \n \n\n\n \n Noël, R.; Ge, F.; Zhang, Y.; Navarro, L.; and Courbebaisse, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2654-2658, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"LatticePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081692,\n  author = {R. Noël and F. Ge and Y. Zhang and L. Navarro and G. Courbebaisse},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Lattice Boltzmann method for modelling of biological phenomena},\n  year = {2017},\n  pages = {2654-2658},\n  abstract = {This paper suggests a new methodology based on the Lattice Boltzmann Method for the modelling of complex biomechanical systems. The LBM can be applied for different operations due to the matching of the pixels of medical images with the nodes of the lattice used by the Lattice Boltzmann method. This allows the optimisation and reduction of the computation time when solving multiphysics complex phenomena. To demonstrate the efficiency of the chosen approach, the modelling of the thrombosis phenomenon within the cavity of a giant cerebral aneurysm has been implemented. The underlying strategy is to implement the Lattice Boltzmann Method for different operations such as extracting the geometry of a considered aneurysm associated to its parent vessel, solving fluid dynamics governing the blood flow and modelling the thrombus growth.},\n  keywords = {biomechanics;blood vessels;brain;diseases;flow simulation;haemodynamics;lattice Boltzmann methods;medical image processing;physiological models;Lattice Boltzmann Method;biological phenomena;complex biomechanical systems;medical images;multiphysics complex phenomena;thrombosis phenomenon;optimisation;giant cerebral aneurysm;parent vessel;fluid dynamics;blood flow;thrombus growth;Mathematical model;Aneurysm;Lattice Boltzmann methods;Biological system modeling;Computational fluid dynamics},\n  doi = {10.23919/EUSIPCO.2017.8081692},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347091.pdf},\n}\n\n
\n
\n\n\n
\n This paper suggests a new methodology based on the Lattice Boltzmann Method for the modelling of complex biomechanical systems. The LBM can be applied for different operations due to the matching of the pixels of medical images with the nodes of the lattice used by the Lattice Boltzmann method. This allows the optimisation and reduction of the computation time when solving multiphysics complex phenomena. To demonstrate the efficiency of the chosen approach, the modelling of the thrombosis phenomenon within the cavity of a giant cerebral aneurysm has been implemented. The underlying strategy is to implement the Lattice Boltzmann Method for different operations such as extracting the geometry of a considered aneurysm associated to its parent vessel, solving fluid dynamics governing the blood flow and modelling the thrombus growth.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated analysis of collagen networks via microscopy.\n \n \n \n \n\n\n \n Suñe-Auñon, A.; Gonzalez-Arjona, M.; Vidal, A.; Lanillos, J.; and Muñoz-Barrutia, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2659-2663, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081693,\n  author = {A. Suñe-Auñon and M. Gonzalez-Arjona and A. Vidal and J. Lanillos and A. Muñoz-Barrutia},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Automated analysis of collagen networks via microscopy},\n  year = {2017},\n  pages = {2659-2663},\n  abstract = {Full understanding about the interactions between cells and their surrounding environment is needed to characterize the implication of cellular dynamics on physiology and pathology. The knowledge about the composition, the geometry and the mechanical properties of the extracellular matrix is essential for this purpose. In this manuscript, we use an established method for the characterization of 3D collagen networks at fiber resolution in confocal reflection microscopy images. Firstly, a binary mask of the entire network is obtained using steerable filtering and local Otsu thresholding. Secondly, individual collagen fibers are reconstructed by tracking maximum ridges in the Euclidean distance map of the binary mask. The approach was applied to quantify the 3D network geometry of hydrogels polymerized with different collagen concentrations in two in vitro platforms: an eight-well culture plate and a microfluidic device. Our results shown similar fiber lengths, fiber persistence lengths and cross-link densities for the fibers of the collagen hydrogels polymerized in different platforms for the same concentration, while the differences on the pore size are large reflecting on the anisotropy of the network polymerized on the microfluidic device.},\n  keywords = {biological tissues;biomechanics;biomedical materials;biomedical optical imaging;cellular biophysics;hydrogels;image segmentation;medical image processing;microfluidics;molecular biophysics;optical microscopy;proteins;automated analysis;collagen networks;surrounding environment;cellular dynamics;physiology;pathology;mechanical properties;extracellular matrix;fiber resolution;confocal reflection microscopy images;steerable filtering;individual collagen fibers;Euclidean distance map;microfluidic device;similar fiber lengths;fiber persistence lengths;collagen concentrations;local-Otsu thresholding;hydrogel polymerized 3D network geometry;Optical fiber networks;Optical fiber devices;Microscopy;Polymers;Microfluidics;Geometry;Reflection;Collagen networks;confocal reflection microscopy;fiber reconstruction algorithm;microfluidic devices},\n  doi = {10.23919/EUSIPCO.2017.8081693},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347282.pdf},\n}\n\n
\n
\n\n\n
\n Full understanding about the interactions between cells and their surrounding environment is needed to characterize the implication of cellular dynamics on physiology and pathology. The knowledge about the composition, the geometry and the mechanical properties of the extracellular matrix is essential for this purpose. In this manuscript, we use an established method for the characterization of 3D collagen networks at fiber resolution in confocal reflection microscopy images. Firstly, a binary mask of the entire network is obtained using steerable filtering and local Otsu thresholding. Secondly, individual collagen fibers are reconstructed by tracking maximum ridges in the Euclidean distance map of the binary mask. The approach was applied to quantify the 3D network geometry of hydrogels polymerized with different collagen concentrations in two in vitro platforms: an eight-well culture plate and a microfluidic device. Our results shown similar fiber lengths, fiber persistence lengths and cross-link densities for the fibers of the collagen hydrogels polymerized in different platforms for the same concentration, while the differences on the pore size are large reflecting on the anisotropy of the network polymerized on the microfluidic device.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the value of graph-based segmentation for the analysis of structural networks in life sciences.\n \n \n \n \n\n\n \n Bujoreanu, D.; Rasti, P.; and Rousseau, D.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2664-2668, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081694,\n  author = {D. Bujoreanu and P. Rasti and D. Rousseau},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {On the value of graph-based segmentation for the analysis of structural networks in life sciences},\n  year = {2017},\n  pages = {2664-2668},\n  abstract = {We propose, under the form of a short overview, to stress the interest of graph to encode the {"}topological{"} structure of networks hidden in images especially when applied in life sciences. We point toward existing computer science tools to extract such structural graph from images. We then illustrate different applications, such as segmentation, denoising, and simulation on practical examples of various bioimaging domains including vascular networks observed with fluorescent microscopy in 2D imaging, macroscopic root systems observed in 2D optical intensity imaging, and 3D porosity networks of seed observed in absorption X-ray microtomography.},\n  keywords = {biomedical optical imaging;blood vessels;feature extraction;fluorescence;image denoising;image segmentation;medical image processing;optical microscopy;porosity;structural graph;vascular networks;2D optical intensity imaging;structural networks;life sciences;computer science tools;graph-based segmentation;topological structure;image segmentation;image denoising;bioimaging domains;fluorescent microscopy;macroscopic root systems;3D porosity networks;absorption X-ray microtomography;Image segmentation;Imaging;Muscles;Topology;Measurement;Skeleton;Three-dimensional displays},\n  doi = {10.23919/EUSIPCO.2017.8081694},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343632.pdf},\n}\n\n
\n
\n\n\n
\n We propose, under the form of a short overview, to stress the interest of graph to encode the \"topological\" structure of networks hidden in images especially when applied in life sciences. We point toward existing computer science tools to extract such structural graph from images. We then illustrate different applications, such as segmentation, denoising, and simulation on practical examples of various bioimaging domains including vascular networks observed with fluorescent microscopy in 2D imaging, macroscopic root systems observed in 2D optical intensity imaging, and 3D porosity networks of seed observed in absorption X-ray microtomography.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generalized CMAC adaptive ensembles for concept-drifting data streams.\n \n \n \n \n\n\n \n González-Serrano, F. J.; and Figueiras-Vidal, A. R.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2669-2673, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"GeneralizedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081695,\n  author = {F. J. González-Serrano and A. R. Figueiras-Vidal},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Generalized CMAC adaptive ensembles for concept-drifting data streams},\n  year = {2017},\n  pages = {2669-2673},\n  abstract = {In this paper we propose to use an adaptive ensemble learning framework with different levels of diversity to handle streams of data in non-stationary scenarios in which concept drifts are present. Our adaptive system consists of two ensembles, each one with a different level of diversity (from high to low), and, therefore, with different and complementary capabilities, that are adaptively combined to obtain an overall system of improved performance. In our approach, the ensemble members are generalized CMACs, a linear-in-the-parameters network. The ensemble of CMACs provides a reasonable trade-off between expressive power, simplicity, and fast learning speed. At the end of the paper, we provide a performance analysis of the proposed learning framework on benchmark datasets with concept drifts of different levels of severity and speed.},\n  keywords = {cerebellar model arithmetic computers;data handling;learning (artificial intelligence);adaptive system;ensemble members;concept drifts;generalized CMAC adaptive ensembles;concept-drifting data streams;adaptive ensemble learning framework;nonstationary scenarios;data stream handling;learning speed;Adaptation models;Diversity reception;Europe;Training;Electronic mail;Real-time systems},\n  doi = {10.23919/EUSIPCO.2017.8081695},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570340671.pdf},\n}\n\n
\n
\n\n\n
\n In this paper we propose to use an adaptive ensemble learning framework with different levels of diversity to handle streams of data in non-stationary scenarios in which concept drifts are present. Our adaptive system consists of two ensembles, each one with a different level of diversity (from high to low), and, therefore, with different and complementary capabilities, that are adaptively combined to obtain an overall system of improved performance. In our approach, the ensemble members are generalized CMACs, a linear-in-the-parameters network. The ensemble of CMACs provides a reasonable trade-off between expressive power, simplicity, and fast learning speed. At the end of the paper, we provide a performance analysis of the proposed learning framework on benchmark datasets with concept drifts of different levels of severity and speed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Recursive multikernel filters exploiting nonlinear temporal structure.\n \n \n \n \n\n\n \n Van Vaerenbergh, S.; Scardapane, S.; and Santamaria, I.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2674-2678, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RecursivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081696,\n  author = {S. {Van Vaerenbergh} and S. Scardapane and I. Santamaria},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Recursive multikernel filters exploiting nonlinear temporal structure},\n  year = {2017},\n  pages = {2674-2678},\n  abstract = {In kernel methods, temporal information on the data is commonly included by using time-delayed embeddings as inputs. Recently, an alternative formulation was proposed by defining a γ-filter explicitly in a reproducing kernel Hilbert space, giving rise to a complex model where multiple kernels operate on different temporal combinations of the input signal. In the original formulation, the kernels are then simply combined to obtain a single kernel matrix (for instance by averaging), which provides computational benefits but discards important information on the temporal structure of the signal. Inspired by works on multiple kernel learning, we overcome this drawback by considering the different kernels separately. We propose an efficient strategy to adaptively combine and select these kernels during the training phase. The resulting batch and online algorithms automatically learn to process highly nonlinear temporal information extracted from the input signal, which is implicitly encoded in the kernel values. We evaluate our proposal on several artificial and real tasks, showing that it can outperform classical approaches both in batch and online settings.},\n  keywords = {Hilbert spaces;learning (artificial intelligence);matrix algebra;recursive filters;recursive multikernel filters;nonlinear temporal structure;single kernel matrix;multiple kernel learning;highly nonlinear temporal information;kernel Hilbert space;time-delayed embeddings;γ-filter;Kernel;Computational modeling;Signal processing algorithms;Hilbert space;Support vector machines;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081696},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343448.pdf},\n}\n\n
\n
\n\n\n
\n In kernel methods, temporal information on the data is commonly included by using time-delayed embeddings as inputs. Recently, an alternative formulation was proposed by defining a γ-filter explicitly in a reproducing kernel Hilbert space, giving rise to a complex model where multiple kernels operate on different temporal combinations of the input signal. In the original formulation, the kernels are then simply combined to obtain a single kernel matrix (for instance by averaging), which provides computational benefits but discards important information on the temporal structure of the signal. Inspired by works on multiple kernel learning, we overcome this drawback by considering the different kernels separately. We propose an efficient strategy to adaptively combine and select these kernels during the training phase. The resulting batch and online algorithms automatically learn to process highly nonlinear temporal information extracted from the input signal, which is implicitly encoded in the kernel values. We evaluate our proposal on several artificial and real tasks, showing that it can outperform classical approaches both in batch and online settings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multivariance nonlinear system identification using Wiener basis functions and perfect sequences.\n \n \n \n \n\n\n \n Orcioni, S.; Cecchi, S.; and Carini, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2679-2683, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"MultivariancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081697,\n  author = {S. Orcioni and S. Cecchi and A. Carini},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Multivariance nonlinear system identification using Wiener basis functions and perfect sequences},\n  year = {2017},\n  pages = {2679-2683},\n  abstract = {Multivariance identification methods exploit input signals with multiple variances for estimating the Volterra kernels of nonlinear systems. They overcome the problem of the locality of the solution, i.e., the fact that the estimated model well approximates the system only at the same input signal variance of the measurement. The estimation of a kernel for a certain input signal variance requires recomputing all lower order kernels. In this paper, a novel multivariance identification method based on Wiener basis functions is proposed to avoid recomputing the lower order kernels with computational saving. Formulas are provided for evaluating the Volterra kernels from the Wiener multivariance kernels. In order to further improve the nonlinear filter estimation, perfect periodic sequences that guarantee the orthogonality of the Wiener basis functions are used for Wiener kernel identification. Simulations and real measurements show that the proposed approach can accurately model nonlinear devices on a wide range of input signal variances.},\n  keywords = {filtering theory;identification;multivariable control systems;nonlinear control systems;nonlinear filters;nonlinear systems;signal processing;statistical analysis;Volterra series;Wiener basis functions;perfect sequences;multiple variances;Volterra kernels;nonlinear systems;estimated model;input signal variance;Wiener multivariance kernels;nonlinear filter estimation;perfect periodic sequences;Wiener kernel identification;nonlinear devices;multivariance nonlinear system identification methods;Kernel;Nonlinear systems;Estimation;Europe;Stochastic processes;Gaussian noise},\n  doi = {10.23919/EUSIPCO.2017.8081697},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346639.pdf},\n}\n\n
\n
\n\n\n
\n Multivariance identification methods exploit input signals with multiple variances for estimating the Volterra kernels of nonlinear systems. They overcome the problem of the locality of the solution, i.e., the fact that the estimated model well approximates the system only at the same input signal variance of the measurement. The estimation of a kernel for a certain input signal variance requires recomputing all lower order kernels. In this paper, a novel multivariance identification method based on Wiener basis functions is proposed to avoid recomputing the lower order kernels with computational saving. Formulas are provided for evaluating the Volterra kernels from the Wiener multivariance kernels. In order to further improve the nonlinear filter estimation, perfect periodic sequences that guarantee the orthogonality of the Wiener basis functions are used for Wiener kernel identification. Simulations and real measurements show that the proposed approach can accurately model nonlinear devices on a wide range of input signal variances.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modification of second-order nonlinear IIR filter for compensating linear and nonlinear distortions of electrodynamic loudspeaker.\n \n \n \n \n\n\n \n Iwai, K.; and Kajikawa, Y.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2684-2688, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ModificationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081698,\n  author = {K. Iwai and Y. Kajikawa},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Modification of second-order nonlinear IIR filter for compensating linear and nonlinear distortions of electrodynamic loudspeaker},\n  year = {2017},\n  pages = {2684-2688},\n  abstract = {In this paper, we propose a modified 2nd-order nonlinear IIR filter for compensation of both linear and nonlinear distortions of electrodynamic loudspeakers. A nonlinear IIR filter is a nonlinear compensator and is based on a physical nonlinear model of electrodynamic loudspeakers. However, it is difficult to compensate nonlinear distortions when a loudspeaker has large sharpness at the lowest resonance frequency, at which the displacement of the diaphragm becomes large. Although it is necessary to compensate the sharpness of the loudspeaker, the nonlinear IIR filter cannot compensate the sharpness because it does not have a linear filtering feature. In this paper, we propose a modified 2nd-order nonlinear IIR filter that can compensate not only the nonlinear distortions but also the sharpness by employing the linear characteristics of the loudspeaker with the desired sharpness. Experimental results show that the proposed filter can compensate the linear and nonlinear distortions more effectively than a conventional filter.},\n  keywords = {electrodynamics;IIR filters;loudspeakers;nonlinear distortion;nonlinear filters;nonlinear distortions;electrodynamic loudspeaker;nonlinear compensator;linear filtering feature;2nd-order nonlinear IIR filter;second-order nonlinear IIR filter;Loudspeakers;Resonant frequency;Nonlinear distortion;Frequency measurement;Electrodynamics;Distortion measurement;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081698},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346368.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a modified 2nd-order nonlinear IIR filter for compensation of both linear and nonlinear distortions of electrodynamic loudspeakers. A nonlinear IIR filter is a nonlinear compensator and is based on a physical nonlinear model of electrodynamic loudspeakers. However, it is difficult to compensate nonlinear distortions when a loudspeaker has large sharpness at the lowest resonance frequency, at which the displacement of the diaphragm becomes large. Although it is necessary to compensate the sharpness of the loudspeaker, the nonlinear IIR filter cannot compensate the sharpness because it does not have a linear filtering feature. In this paper, we propose a modified 2nd-order nonlinear IIR filter that can compensate not only the nonlinear distortions but also the sharpness by employing the linear characteristics of the loudspeaker with the desired sharpness. Experimental results show that the proposed filter can compensate the linear and nonlinear distortions more effectively than a conventional filter.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Steady-state analysis of the maximum correntropy Volterra filter with application to nonlinear channel equalization.\n \n \n \n \n\n\n \n Lu, L.; Zhao, H.; and Champagne, B.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2689-2693, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Steady-statePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081699,\n  author = {L. Lu and H. Zhao and B. Champagne},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Steady-state analysis of the maximum correntropy Volterra filter with application to nonlinear channel equalization},\n  year = {2017},\n  pages = {2689-2693},\n  abstract = {As a well-established adaptation criterion, the maximum correntropy criterion (MCC) has received increased attention due to its robustness against outliers. In this paper, a new complex maximum correntropy criterion Volterra filter (Volterra-CMCC) that does not need any a priori information about the noise statistical characteristics, is proposed based on the recursive scheme. We study the steady-state excess mean-square-error (EMSE) behavior of the Volterra-CMCC algorithm by using the energy conservation relation and Taylor series expansion approach. Then, the proposed algorithm is applied to the nonlinear channel equalization problem, where the channel is contaminated by impulsive noise. The results obtained from simulation study establish the effectiveness of this new Voltera-CMC equalizer.},\n  keywords = {adaptive filters;impulse noise;least mean squares methods;mean square error methods;nonlinear filters;series (mathematics);maximum correntropy Volterra filter;noise statistical characteristics;steady-state excess mean-square-error behavior;Volterra-CMCC algorithm;nonlinear channel equalization problem;steady-state analysis;maximum correntropy criterion Volterra filter;energy conservation relation;Taylor series expansion approach;impulsive noise;Voltera-CMC equalizer;Europe;Signal processing},\n  doi = {10.23919/EUSIPCO.2017.8081699},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347258.pdf},\n}\n\n
\n
\n\n\n
\n As a well-established adaptation criterion, the maximum correntropy criterion (MCC) has received increased attention due to its robustness against outliers. In this paper, a new complex maximum correntropy criterion Volterra filter (Volterra-CMCC) that does not need any a priori information about the noise statistical characteristics, is proposed based on the recursive scheme. We study the steady-state excess mean-square-error (EMSE) behavior of the Volterra-CMCC algorithm by using the energy conservation relation and Taylor series expansion approach. Then, the proposed algorithm is applied to the nonlinear channel equalization problem, where the channel is contaminated by impulsive noise. The results obtained from simulation study establish the effectiveness of this new Voltera-CMC equalizer.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Collaborative adaptive exponential linear-in-the-parameters nonlinear filters.\n \n \n \n \n\n\n \n Patel, V.; Pradhan, S.; and George, N. V.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2694-2698, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"CollaborativePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081700,\n  author = {V. Patel and S. Pradhan and N. V. George},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Collaborative adaptive exponential linear-in-the-parameters nonlinear filters},\n  year = {2017},\n  pages = {2694-2698},\n  abstract = {An adaptive exponential functional link artificial neural network (AEFLANN) based active noise control (ANC) system trained using a collaborative learning scheme has been designed in this paper. In the proposed approach, separate learning mechanism is used for updating the weights of the linear portion of the AEFLANN and its non-linear section. The outputs of the linear and non-linear sections are suitably combined and the update mechanism involves the update of weights of linear and non-linear portions, the combination parameter and the adaptive exponential factor. Simulation study shows enhanced noise cancellation in comparison with other non-linear ANC schemes compared.},\n  keywords = {active noise control;filtering theory;hearing aids;learning (artificial intelligence);least mean squares methods;neural nets;nonlinear filters;collaborative adaptive exponential linear-in-the-parameters nonlinear filters;adaptive exponential functional link artificial neural network;AEFLANN;active noise control system;collaborative learning scheme;separate learning mechanism;linear portion;nonlinear section;update mechanism;combination parameter;adaptive exponential factor;nonlinear ANC schemes;Microphones;Collaboration;Collaborative work;Adaptation models;Europe;Noise cancellation;Active noise control;functional link artificial neural network;noise cancellation;non-linear filter},\n  doi = {10.23919/EUSIPCO.2017.8081700},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346428.pdf},\n}\n\n
\n
\n\n\n
\n An adaptive exponential functional link artificial neural network (AEFLANN) based active noise control (ANC) system trained using a collaborative learning scheme has been designed in this paper. In the proposed approach, separate learning mechanism is used for updating the weights of the linear portion of the AEFLANN and its non-linear section. The outputs of the linear and non-linear sections are suitably combined and the update mechanism involves the update of weights of linear and non-linear portions, the combination parameter and the adaptive exponential factor. Simulation study shows enhanced noise cancellation in comparison with other non-linear ANC schemes compared.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance analysis of multi-frequency GNSS carrier tracking for strong ionospheric scintillation mitigation.\n \n \n \n \n\n\n \n Vilà-Valls, J.; Closas, P.; and Curran, J. T.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2699-2703, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PerformancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081701,\n  author = {J. {Vilà-Valls} and P. Closas and J. T. Curran},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Performance analysis of multi-frequency GNSS carrier tracking for strong ionospheric scintillation mitigation},\n  year = {2017},\n  pages = {2699-2703},\n  abstract = {In Global Navigation Satellite Systems (GNSS), ionospheric scintillation is one of the more challenging propagation scenarios, particularly affecting high-precision receivers based on carrier phase measurements. In this contribution, we propose a new digital carrier synchronization state-space formulation for the mitigation of strong scintillation. It takes into account multi-frequency GNSS observations, allowing tracking of both line-of-sight phase variations and complex gain scintillation components, that is, scintillation amplitude and phase. The joint carrier tracking and scintillation mitigation problem is solved using a multi-frequency nonlinear Kalman filter-based solution. The performance improvement of the new approach is shown using realistic synthetic data, and compared to state-of-the-art PLL and KF-based architectures.},\n  keywords = {interference suppression;ionospheric electromagnetic wave propagation;Kalman filters;nonlinear filters;radiowave propagation;satellite navigation;synchronisation;multifrequency GNSS carrier tracking;strong ionospheric scintillation mitigation;challenging propagation scenarios;high-precision receivers;carrier phase measurements;digital carrier synchronization state-space formulation;account multifrequency GNSS observations;line-of-sight phase variations;complex gain scintillation components;scintillation amplitude;joint carrier tracking;scintillation mitigation problem;multifrequency nonlinear Kalman filter;Mathematical model;Global navigation satellite system;Receivers;Synchronization;Europe;Signal processing;Satellites},\n  doi = {10.23919/EUSIPCO.2017.8081701},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570339391.pdf},\n}\n\n
\n
\n\n\n
\n In Global Navigation Satellite Systems (GNSS), ionospheric scintillation is one of the more challenging propagation scenarios, particularly affecting high-precision receivers based on carrier phase measurements. In this contribution, we propose a new digital carrier synchronization state-space formulation for the mitigation of strong scintillation. It takes into account multi-frequency GNSS observations, allowing tracking of both line-of-sight phase variations and complex gain scintillation components, that is, scintillation amplitude and phase. The joint carrier tracking and scintillation mitigation problem is solved using a multi-frequency nonlinear Kalman filter-based solution. The performance improvement of the new approach is shown using realistic synthetic data, and compared to state-of-the-art PLL and KF-based architectures.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n NLOS identification and compensation for UWB ranging based on obstruction classification.\n \n \n \n \n\n\n \n Wen, K.; Yu, K.; and Li, Y.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2704-2708, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"NLOSPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081702,\n  author = {K. Wen and K. Yu and Y. Li},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {NLOS identification and compensation for UWB ranging based on obstruction classification},\n  year = {2017},\n  pages = {2704-2708},\n  abstract = {Non-line-of-sight (NLOS) propagation is one of the major barriers to accurate ranging and positioning based on time of arrival (TOA) in the application of an ultra wideband (UWB) system. This paper proposes a new method for NLOS identification and mitigation based on signal characteristic analysis and fuzzy theory. This method neither requires to build a statistical model nor to create and update a training database, so that it can be used conveniently for different application scenarios. Extensive experiments were conducted and the results show that the cumulative distribution function of the ranging error below 0.5 meter is over 90% when using the proposed mitigation method, while that without using the mitigation method is below 70%. Also, by using the proposed method, the root mean square error (RMSE) of the range measurements is reduced from 0.77 to 0.33 meter. The results demonstrate that this method can effectively identify NLOS and mitigate the NLOS-induced ranging error.},\n  keywords = {fuzzy set theory;mean square error methods;radiowave propagation;statistical analysis;time-of-arrival estimation;ultra wideband communication;fuzzy theory;statistical model;cumulative distribution function;mitigation method;obstruction classification;nonline-of-sight propagation;ultra wideband system;signal characteristic analysis;NLOS propagation;UWB system;TOA;time of arrival;root mean square error;RMSE;NLOS-induced ranging error;Distance measurement;Iron;Concrete;Delays;Signal to noise ratio;Measurement uncertainty;Europe;UWB;NLOS identification;fuzzy theory;error mitigation},\n  doi = {10.23919/EUSIPCO.2017.8081702},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570345025.pdf},\n}\n\n
\n
\n\n\n
\n Non-line-of-sight (NLOS) propagation is one of the major barriers to accurate ranging and positioning based on time of arrival (TOA) in the application of an ultra wideband (UWB) system. This paper proposes a new method for NLOS identification and mitigation based on signal characteristic analysis and fuzzy theory. This method neither requires to build a statistical model nor to create and update a training database, so that it can be used conveniently for different application scenarios. Extensive experiments were conducted and the results show that the cumulative distribution function of the ranging error below 0.5 meter is over 90% when using the proposed mitigation method, while that without using the mitigation method is below 70%. Also, by using the proposed method, the root mean square error (RMSE) of the range measurements is reduced from 0.77 to 0.33 meter. The results demonstrate that this method can effectively identify NLOS and mitigate the NLOS-induced ranging error.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An empirical study on gamma shadow fading based localization.\n \n \n \n \n\n\n \n Büyükçorak, S.; Kurt, G. K.; and Yongaçoğlu, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2709-2713, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081703,\n  author = {S. Büyükçorak and G. K. Kurt and A. Yongaçoğlu},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {An empirical study on gamma shadow fading based localization},\n  year = {2017},\n  pages = {2709-2713},\n  abstract = {In this paper, we propose a maximum likelihood estimator for received signal strength (RSS) based indoor localization systems by exploiting gamma shadow fading model. In order to investigate the validity of proposed method in a realistic environment, we develop a testbed based on Wi-Fi technology. Through experimental analyses, we first demonstrate the gamma distribution is a good fit to lognormal distribution, and both of them can sufficiently accurately characterize the empirical RSS observations. Then, we observe that gamma distribution is worth investigating for indoor localization compared to lognormal model because it provides superior accuracy. We further analyze the impacts of uncertainties of considered distributions' parameters on the localization performance via simulations.},\n  keywords = {fading channels;gamma distribution;indoor navigation;log normal distribution;maximum likelihood estimation;RSSI;wireless LAN;received signal strength;indoor localization systems;Wi-Fi technology;gamma distribution;lognormal distribution;lognormal model;maximum likelihood estimator;gamma shadow fading based localization;Fading channels;Wireless fidelity;Maximum likelihood estimation;Analytical models;Microscopy;Europe;Positioning;received power level;gamma shadow fading;maximum likelihood estimator;Wi-Fi},\n  doi = {10.23919/EUSIPCO.2017.8081703},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346861.pdf},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a maximum likelihood estimator for received signal strength (RSS) based indoor localization systems by exploiting gamma shadow fading model. In order to investigate the validity of proposed method in a realistic environment, we develop a testbed based on Wi-Fi technology. Through experimental analyses, we first demonstrate the gamma distribution is a good fit to lognormal distribution, and both of them can sufficiently accurately characterize the empirical RSS observations. Then, we observe that gamma distribution is worth investigating for indoor localization compared to lognormal model because it provides superior accuracy. We further analyze the impacts of uncertainties of considered distributions' parameters on the localization performance via simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A collaborative method for positioning based on GNSS inter agent range estimation.\n \n \n \n \n\n\n \n Minetto, A.; Cristodaro, C.; and Dovis, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2714-2718, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081704,\n  author = {A. Minetto and C. Cristodaro and F. Dovis},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A collaborative method for positioning based on GNSS inter agent range estimation},\n  year = {2017},\n  pages = {2714-2718},\n  abstract = {The limited availability and the lack of continuity in the service of Global Positioning Satellite Systems (GNSS) in harsh environments is a critical issue for Intelligent Transport Systems (ITS) applications relying on the position. This work is developed within the framework of vehicle-to-everything (V2X) communication, with the aim to guarantee a continuous position availability to all the agents belonging to the network when GNSS is not available for a subset of them. The simultaneous observation of shared satellites is exploited to estimate the Non-Line-Of-Sight Inter-Agent Range within a real-time-connected network of receivers. It is demonstrated the effectiveness of a hybrid localization algorithm based on the the integration of standard GNSS measurements and linearised IAR estimates. The hybrid position estimation is solved through a self-adaptive iterative algorithm to find the position of receivers experiencing GNSS outages.},\n  keywords = {Global Positioning System;iterative methods;position control;vehicle-to-everything communication;V2X communication;Non-Line-Of-Sight Inter-Agent Range;self-adaptive iterative algorithm;linearised IAR estimates;GNSS outages;hybrid position estimation;standard GNSS measurements;hybrid localization algorithm;real-time-connected network;shared satellites;continuous position availability;Intelligent Transport Systems applications;Global Positioning Satellite Systems;GNSS inter agent range estimation;collaborative method;Receivers;Global navigation satellite system;Satellites;Signal processing algorithms;Collaboration;Estimation;Signal processing;GNSS;ITS;Aided Positioning;Collaborative Localization;IAR},\n  doi = {10.23919/EUSIPCO.2017.8081704},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347508.pdf},\n}\n\n
\n
\n\n\n
\n The limited availability and the lack of continuity in the service of Global Positioning Satellite Systems (GNSS) in harsh environments is a critical issue for Intelligent Transport Systems (ITS) applications relying on the position. This work is developed within the framework of vehicle-to-everything (V2X) communication, with the aim to guarantee a continuous position availability to all the agents belonging to the network when GNSS is not available for a subset of them. The simultaneous observation of shared satellites is exploited to estimate the Non-Line-Of-Sight Inter-Agent Range within a real-time-connected network of receivers. It is demonstrated the effectiveness of a hybrid localization algorithm based on the the integration of standard GNSS measurements and linearised IAR estimates. The hybrid position estimation is solved through a self-adaptive iterative algorithm to find the position of receivers experiencing GNSS outages.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ranging precision analysis of LTE signals.\n \n \n \n \n\n\n \n Shamaei, K.; Khalife, J.; and Kassas, Z. M.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2719-2723, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RangingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081705,\n  author = {K. Shamaei and J. Khalife and Z. M. Kassas},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Ranging precision analysis of LTE signals},\n  year = {2017},\n  pages = {2719-2723},\n  abstract = {The ranging precision of the secondary synchronization signal in cellular long-term evolution (LTE) systems is evaluated. First, the pseudorange error for a delay-locked loop with a coherent baseband discriminator is analyzed, and a closed-form expression for the standard deviation of the pseudorange error is derived. Second, the effect of multipath on the ranging error is evaluated analytically. Experimental results closely matching the analytical expression of the pseudorange error standard deviation are presented. Key remarks to take into consideration when designing a receiver for positioning using LTE signals are provided throughout the paper.},\n  keywords = {cellular radio;delay lock loops;Long Term Evolution;signal processing;synchronisation;secondary synchronization signal;pseudorange error standard deviation;LTE signals;cellular long-term evolution systems;delay-locked loop;Distance measurement;Baseband;Long Term Evolution;Bandwidth;Global Positioning System;Correlators},\n  doi = {10.23919/EUSIPCO.2017.8081705},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347696.pdf},\n}\n\n
\n
\n\n\n
\n The ranging precision of the secondary synchronization signal in cellular long-term evolution (LTE) systems is evaluated. First, the pseudorange error for a delay-locked loop with a coherent baseband discriminator is analyzed, and a closed-form expression for the standard deviation of the pseudorange error is derived. Second, the effect of multipath on the ranging error is evaluated analytically. Experimental results closely matching the analytical expression of the pseudorange error standard deviation are presented. Key remarks to take into consideration when designing a receiver for positioning using LTE signals are provided throughout the paper.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust time-of-arrival estimation in multipath channels with OFDM signals.\n \n \n \n \n\n\n \n Bialer, O.; Raphaeli, D.; and Weiss, A. J.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2724-2728, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081706,\n  author = {O. Bialer and D. Raphaeli and A. J. Weiss},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Robust time-of-arrival estimation in multipath channels with OFDM signals},\n  year = {2017},\n  pages = {2724-2728},\n  abstract = {Time-of-arrival (TOA) estimation of OFDM signals in multipath channels is challenging, especially when the signal bandwidth is limited, such as is the case of WIFI. In this paper, a novel low complexity TOA estimation algorithm is developed for OFDM signals that is robust to various multipath environments. We approximate the received signal as Gaussian with a proposed autocorrelation model, and then derive the Maximum Likelihood estimator with an efficient implementation algorithm. The estimator performance was tested with off-the-shelf WIFI 802.11g routers. The results show that the proposed TOA estimator outperforms other known reference methods over various indoor environments.},\n  keywords = {maximum likelihood estimation;multipath channels;OFDM modulation;time-of-arrival estimation;wireless channels;wireless LAN;off-the-shelf Wi-Fi 802.11g routers;autocorrelation model;low complexity TOA estimation algorithm;maximum likelihood estimator;estimator performance;received signal;multipath environments;signal bandwidth;multipath channels;time-of-arrival estimation;Complexity theory;Time of arrival estimation;OFDM;Maximum likelihood estimation;Delays;Multipath channels;Indoor environments},\n  doi = {10.23919/EUSIPCO.2017.8081706},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347546.pdf},\n}\n\n
\n
\n\n\n
\n Time-of-arrival (TOA) estimation of OFDM signals in multipath channels is challenging, especially when the signal bandwidth is limited, such as is the case of WIFI. In this paper, a novel low complexity TOA estimation algorithm is developed for OFDM signals that is robust to various multipath environments. We approximate the received signal as Gaussian with a proposed autocorrelation model, and then derive the Maximum Likelihood estimator with an efficient implementation algorithm. The estimator performance was tested with off-the-shelf WIFI 802.11g routers. The results show that the proposed TOA estimator outperforms other known reference methods over various indoor environments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the feasibility of personal audio systems over a network of distributed loudspeakers.\n \n \n \n \n\n\n \n Piñero, G.; Botella, C.; de Diego , M.; Ferrer, M.; and González, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2729-2733, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081707,\n  author = {G. Piñero and C. Botella and M. {de Diego} and M. Ferrer and A. González},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {On the feasibility of personal audio systems over a network of distributed loudspeakers},\n  year = {2017},\n  pages = {2729-2733},\n  abstract = {Personal audio reproduction systems deal with the creation of personal sound zones within a room without the necessity of using headphones. These systems use an array of loudspeakers and design the required filters at each loudspeaker in order to render the desired audio signal to each person in the room as free of interferences as possible. There are very interesting proposals in the literature that make use of circular or linear arrays, but in this paper we study the problem considering a network of distributed loudspeakers controlled by a set of acoustic nodes, which can exchange information through a network. We state the model of such a distributed system by considering the electro-acoustic paths between the loudspeakers and each microphone, and try to provide a minimum signal-to-interference-and-noise ratio (SINR) to each zone, but constraining the emitted power of the loudspeakers to a maximum value (avoiding annoying feedback effects). We make use of optimization techniques to decide if, given a distribution of the loudspeakers and a location of the personal sound zones within the room, the system will be feasible. Simulations are done to support the use of the proposed optimization techniques.},\n  keywords = {audio signal processing;audio systems;loudspeakers;optimisation;sound reproduction;distributed loudspeakers;distributed system;electro-acoustic paths;personal sound zones;circular arrays;linear arrays;audio signal;personal audio reproduction systems;minimum signal-to-interference-and-noise ratio;Loudspeakers;Interference;Microphones;Signal to noise ratio;Acoustics;Optimization;Wireless communication},\n  doi = {10.23919/EUSIPCO.2017.8081707},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570341655.pdf},\n}\n\n
\n
\n\n\n
\n Personal audio reproduction systems deal with the creation of personal sound zones within a room without the necessity of using headphones. These systems use an array of loudspeakers and design the required filters at each loudspeaker in order to render the desired audio signal to each person in the room as free of interferences as possible. There are very interesting proposals in the literature that make use of circular or linear arrays, but in this paper we study the problem considering a network of distributed loudspeakers controlled by a set of acoustic nodes, which can exchange information through a network. We state the model of such a distributed system by considering the electro-acoustic paths between the loudspeakers and each microphone, and try to provide a minimum signal-to-interference-and-noise ratio (SINR) to each zone, but constraining the emitted power of the loudspeakers to a maximum value (avoiding annoying feedback effects). We make use of optimization techniques to decide if, given a distribution of the loudspeakers and a location of the personal sound zones within the room, the system will be feasible. Simulations are done to support the use of the proposed optimization techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Glottal mixture model (GLOMM) for speaker identification on telephone channels.\n \n \n \n \n\n\n \n Baggenstoss, P. M.; Wilkinghoff, K.; and Kurth, F.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2734-2738, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"GlottalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081708,\n  author = {P. M. Baggenstoss and K. Wilkinghoff and F. Kurth},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Glottal mixture model (GLOMM) for speaker identification on telephone channels},\n  year = {2017},\n  pages = {2734-2738},\n  abstract = {The Glottal Mixture Model (GLOMM) extracts speaker-dependent voice source information from speech data. It has previously been shown to provide speaker identification performance on clean speech comparable to universal background model (UBM), a state of the art method based on MFCC. And, when combined with UBM, the error rate was reduced by a factor of three, showing that the voice source information is largely independent of the information contained in the MFCC, yet holds as much speaker-related information. We now describe how GLOMM can be adapted for telephone quality audio and provide significant error reduction when combined with UBM and I-vector approaches. We demonstrate a factor of two error reduction on the NTIMIT data set with respect to the best published results.},\n  keywords = {feature extraction;mixture models;speaker recognition;GLOMM;telephone channels;speaker-dependent voice source information;speech data;speaker identification performance;MFCC;error rate;telephone quality audio;error reduction;glottal mixture model;speaker-related information;I-vector approach;UBM approach;NTIMIT data set;Mel frequency cepstral coefficient;Speech;Feature extraction;Signal processing algorithms;Telephone sets;Europe},\n  doi = {10.23919/EUSIPCO.2017.8081708},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570343401.pdf},\n}\n\n
\n
\n\n\n
\n The Glottal Mixture Model (GLOMM) extracts speaker-dependent voice source information from speech data. It has previously been shown to provide speaker identification performance on clean speech comparable to universal background model (UBM), a state of the art method based on MFCC. And, when combined with UBM, the error rate was reduced by a factor of three, showing that the voice source information is largely independent of the information contained in the MFCC, yet holds as much speaker-related information. We now describe how GLOMM can be adapted for telephone quality audio and provide significant error reduction when combined with UBM and I-vector approaches. We demonstrate a factor of two error reduction on the NTIMIT data set with respect to the best published results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n What makes audio event detection harder than classification?.\n \n \n \n \n\n\n \n Phan, H.; Koch, P.; Katzberg, F.; Maass, M.; Mazur, R.; McLoughlin, I.; and Mertins, A.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2739-2743, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"WhatPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081709,\n  author = {H. Phan and P. Koch and F. Katzberg and M. Maass and R. Mazur and I. McLoughlin and A. Mertins},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {What makes audio event detection harder than classification?},\n  year = {2017},\n  pages = {2739-2743},\n  abstract = {There is a common observation that audio event classification is easier to deal with than detection. So far, this observation has been accepted as a fact and we lack of a careful analysis. In this paper, we reason the rationale behind this fact and, more importantly, leverage them to benefit the audio event detection task. We present an improved detection pipeline in which a verification step is appended to augment a detection system. This step employs a high-quality event classifier to postprocess the benign event hypotheses outputted by the detection system and reject false alarms. To demonstrate the effectiveness of the proposed pipeline, we implement and pair up different event detectors based on the most common detection schemes and various event classifiers, ranging from the standard bag-of-words model to the state-of-the-art bank-of-regressors one. Experimental results on the ITC-Irst dataset show significant improvements to detection performance. More importantly, these improvements are consistent for all detector-classifier combinations.},\n  keywords = {audio signal processing;feature extraction;object detection;pattern classification;signal classification;signal detection;event classifiers;audio event classification;audio event detection task;verification step;high-quality event classifier;audio event detection system;ITC-Irst dataset;detector-classifier combinations;Pipelines;Event detection;Hidden Markov models;Detectors;Feature extraction;Training},\n  doi = {10.23919/EUSIPCO.2017.8081709},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570346959.pdf},\n}\n\n
\n
\n\n\n
\n There is a common observation that audio event classification is easier to deal with than detection. So far, this observation has been accepted as a fact and we lack of a careful analysis. In this paper, we reason the rationale behind this fact and, more importantly, leverage them to benefit the audio event detection task. We present an improved detection pipeline in which a verification step is appended to augment a detection system. This step employs a high-quality event classifier to postprocess the benign event hypotheses outputted by the detection system and reject false alarms. To demonstrate the effectiveness of the proposed pipeline, we implement and pair up different event detectors based on the most common detection schemes and various event classifiers, ranging from the standard bag-of-words model to the state-of-the-art bank-of-regressors one. Experimental results on the ITC-Irst dataset show significant improvements to detection performance. More importantly, these improvements are consistent for all detector-classifier combinations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Timbre analysis of music audio signals with convolutional neural networks.\n \n \n \n \n\n\n \n Pons, J.; Slizovskaia, O.; Gong, R.; Gómez, E.; and Serra, X.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2744-2748, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"TimbrePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081710,\n  author = {J. Pons and O. Slizovskaia and R. Gong and E. Gómez and X. Serra},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {Timbre analysis of music audio signals with convolutional neural networks},\n  year = {2017},\n  pages = {2744-2748},\n  abstract = {The focus of this work is to study how to efficiently tailor Convolutional Neural Networks (CNNs) towards learning timbre representations from log-mel magnitude spectrograms. We first review the trends when designing CNN architectures. Through this literature overview we discuss which are the crucial points to consider for efficiently learning timbre representations using CNNs. From this discussion we propose a design strategy meant to capture the relevant time-frequency contexts for learning timbre, which permits using domain knowledge for designing architectures. In addition, one of our main goals is to design efficient CNN architectures - what reduces the risk of these models to over-fit, since CNNs' number of parameters is minimized. Several architectures based on the design principles we propose are successfully assessed for different research tasks related to timbre: singing voice phoneme classification, musical instrument recognition and music auto-tagging.},\n  keywords = {acoustic signal processing;audio signal processing;feature extraction;learning (artificial intelligence);music;musical instruments;neural nets;speech processing;speech recognition;design strategy;relevant time-frequency contexts;design principles;musical instrument recognition;music auto-tagging;timbre analysis;music audio signals;convolutional neural networks;timbre representations;log-mel magnitude spectrograms;CNN architectures;CNN number;Timbre;Spectrogram;Shape;Time-frequency analysis;Context modeling;Machine learning},\n  doi = {10.23919/EUSIPCO.2017.8081710},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347061.pdf},\n}\n\n
\n
\n\n\n
\n The focus of this work is to study how to efficiently tailor Convolutional Neural Networks (CNNs) towards learning timbre representations from log-mel magnitude spectrograms. We first review the trends when designing CNN architectures. Through this literature overview we discuss which are the crucial points to consider for efficiently learning timbre representations using CNNs. From this discussion we propose a design strategy meant to capture the relevant time-frequency contexts for learning timbre, which permits using domain knowledge for designing architectures. In addition, one of our main goals is to design efficient CNN architectures - what reduces the risk of these models to over-fit, since CNNs' number of parameters is minimized. Several architectures based on the design principles we propose are successfully assessed for different research tasks related to timbre: singing voice phoneme classification, musical instrument recognition and music auto-tagging.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A hybrid approach with multi-channel i-vectors and convolutional neural networks for acoustic scene classification.\n \n \n \n \n\n\n \n Eghbal-zadeh, H.; Lehner, B.; Dorfer, M.; and Widmer, G.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2749-2753, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081711,\n  author = {H. Eghbal-zadeh and B. Lehner and M. Dorfer and G. Widmer},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A hybrid approach with multi-channel i-vectors and convolutional neural networks for acoustic scene classification},\n  year = {2017},\n  pages = {2749-2753},\n  abstract = {In Acoustic Scene Classification (ASC) two major approaches have been followed. While one utilizes engineered features such as mel-frequency-cepstral-coefficients (MFCCs), the other uses learned features that are the outcome of an optimization algorithm. I-vectors are the result of a modeling technique that usually takes engineered features as input. It has been shown that standard MFCCs extracted from monaural audio signals lead to i-vectors that exhibit poor performance, especially on indoor acoustic scenes. At the same time, Convolutional Neural Networks (CNNs) are well known for their ability to learn features by optimizing their filters. They have been applied on ASC and have shown promising results. In this paper, we first propose a novel multi-channel i-vector extraction and scoring scheme for ASC, improving their performance on indoor and outdoor scenes. Second, we propose a CNN architecture that achieves promising ASC results. Further, we show that i-vectors and CNNs capture complementary information from acoustic scenes. Finally, we propose a hybrid system for ASC using multi-channel i-vectors and CNNs by utilizing a score fusion technique. Using our method, we participated in the ASC task of the DCASE-2016 challenge. Our hybrid approach achieved 1st rank among 49 submissions, substantially improving the previous state of the art.},\n  keywords = {acoustic signal processing;cepstral analysis;feature extraction;learning (artificial intelligence);neural nets;signal classification;hybrid approach;multichannel i-vectors;convolutional neural networks;optimization algorithm;modeling technique;monaural audio signals;indoor acoustic scenes;multichannel i-vector extraction;scoring scheme;hybrid system;score fusion technique;ASC task;acoustic scene classification;MFCC;complementary information;Mel-frequency-cepstral-coefficients;CNN architecture;Feature extraction;Mel frequency cepstral coefficient;Adaptation models;Training;Computational modeling;Neural networks},\n  doi = {10.23919/EUSIPCO.2017.8081711},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347275.pdf},\n}\n\n
\n
\n\n\n
\n In Acoustic Scene Classification (ASC) two major approaches have been followed. While one utilizes engineered features such as mel-frequency-cepstral-coefficients (MFCCs), the other uses learned features that are the outcome of an optimization algorithm. I-vectors are the result of a modeling technique that usually takes engineered features as input. It has been shown that standard MFCCs extracted from monaural audio signals lead to i-vectors that exhibit poor performance, especially on indoor acoustic scenes. At the same time, Convolutional Neural Networks (CNNs) are well known for their ability to learn features by optimizing their filters. They have been applied on ASC and have shown promising results. In this paper, we first propose a novel multi-channel i-vector extraction and scoring scheme for ASC, improving their performance on indoor and outdoor scenes. Second, we propose a CNN architecture that achieves promising ASC results. Further, we show that i-vectors and CNNs capture complementary information from acoustic scenes. Finally, we propose a hybrid system for ASC using multi-channel i-vectors and CNNs by utilizing a score fusion technique. Using our method, we participated in the ASC task of the DCASE-2016 challenge. Our hybrid approach achieved 1st rank among 49 submissions, substantially improving the previous state of the art.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A neural network approach for sound event detection in real life audio.\n \n \n \n \n\n\n \n Valenti, M.; Tonelli, D.; Vesperini, F.; Principi, E.; and Squartini, S.\n\n\n \n\n\n\n In 2017 25th European Signal Processing Conference (EUSIPCO), pages 2754-2758, Aug 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{8081712,\n  author = {M. Valenti and D. Tonelli and F. Vesperini and E. Principi and S. Squartini},\n  booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)},\n  title = {A neural network approach for sound event detection in real life audio},\n  year = {2017},\n  pages = {2754-2758},\n  abstract = {This paper presents and compares two algorithms based on artificial neural networks (ANNs) for sound event detection in real life audio. Both systems have been developed and evaluated with the material provided for the third task of the Detection and Classification of Acoustic Scenes and Events (DCASE) 2016 challenge. For the first algorithm, we make use of an ANN trained on different features extracted from the down-mixed mono channel audio. Secondly, we analyse a binaural algorithm where the same feature extraction is performed on four different channels: the two binaural channels, the averaged monaural signal and the difference between the binaural channels. The proposed feature set comprehends, along with mel-frequency cepstral coefficients and log-mel energies, also activity information extracted with two different voice activity detection (VAD) algorithms. Moreover, we will present results obtained with two different neural architectures, namely multi-layer perceptrons (MLPs) and recurrent neural networks. The highest scores obtained on the DCASE 2016 evaluation dataset are achieved by a MLP trained on binaural features and adaptive energy VAD; they consist of an averaged error rate of 0.79 and an averaged F1 score of 48.1%, thus marking an improvement over the best score registered in the DCASE 2016 challenge.},\n  keywords = {acoustic signal processing;audio signal processing;cepstral analysis;feature extraction;multilayer perceptrons;recurrent neural nets;signal classification;speech recognition;MLP;multilayer perceptrons;VAD algorithms;down-mixed monochannel audio;detection and classification of acoustic scenes and events;real life audio;voice activity detection algorithms;monaural signal;binaural features;DCASE 2016 evaluation dataset;recurrent neural networks;log-mel energies;mel-frequency cepstral coefficients;binaural channels;feature extraction;binaural algorithm;sound event detection;ANN;artificial neural networks;Feature extraction;Signal processing algorithms;Acoustics;Event detection;Training},\n  doi = {10.23919/EUSIPCO.2017.8081712},\n  issn = {2076-1465},\n  month = {Aug},\n  url = {https://www.eurasip.org/proceedings/eusipco/eusipco2017/papers/1570347664.pdf},\n}\n
\n
\n\n\n
\n This paper presents and compares two algorithms based on artificial neural networks (ANNs) for sound event detection in real life audio. Both systems have been developed and evaluated with the material provided for the third task of the Detection and Classification of Acoustic Scenes and Events (DCASE) 2016 challenge. For the first algorithm, we make use of an ANN trained on different features extracted from the down-mixed mono channel audio. Secondly, we analyse a binaural algorithm where the same feature extraction is performed on four different channels: the two binaural channels, the averaged monaural signal and the difference between the binaural channels. The proposed feature set comprehends, along with mel-frequency cepstral coefficients and log-mel energies, also activity information extracted with two different voice activity detection (VAD) algorithms. Moreover, we will present results obtained with two different neural architectures, namely multi-layer perceptrons (MLPs) and recurrent neural networks. The highest scores obtained on the DCASE 2016 evaluation dataset are achieved by a MLP trained on binaural features and adaptive energy VAD; they consist of an averaged error rate of 0.79 and an averaged F1 score of 48.1%, thus marking an improvement over the best score registered in the DCASE 2016 challenge.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);